From 2ed8ec77d6effb6c373f56209aa52d9f6158f571 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 May 2023 14:49:06 -0700 Subject: [PATCH 001/236] chore(deps): bump reqwest from 0.11.17 to 0.11.18 (#17420) * chore(deps): bump reqwest from 0.11.17 to 0.11.18 Bumps [reqwest](https://github.com/seanmonstar/reqwest) from 0.11.17 to 0.11.18. - [Release notes](https://github.com/seanmonstar/reqwest/releases) - [Changelog](https://github.com/seanmonstar/reqwest/blob/master/CHANGELOG.md) - [Commits](https://github.com/seanmonstar/reqwest/compare/v0.11.17...v0.11.18) --- updated-dependencies: - dependency-name: reqwest dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Regenerate licenses inventory Signed-off-by: Jesse Szwedko --------- Signed-off-by: dependabot[bot] Signed-off-by: Jesse Szwedko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jesse Szwedko --- Cargo.lock | 25 +++++++++++++++++++------ LICENSE-3rdparty.csv | 1 + lib/k8s-e2e-tests/Cargo.toml | 2 +- lib/vector-api-client/Cargo.toml | 2 +- 4 files changed, 22 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 074592470a5a8..041550c53ff99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1513,7 +1513,7 @@ dependencies = [ "hex", "http", "hyper", - "hyper-rustls", + "hyper-rustls 0.23.1", "hyperlocal", "log", "pin-project-lite", @@ -4025,6 +4025,19 @@ dependencies = [ "tokio-rustls 0.23.4", ] +[[package]] +name = "hyper-rustls" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +dependencies = [ + "http", + "hyper", + "rustls 0.21.0", + "tokio", + "tokio-rustls 0.24.0", +] + [[package]] name = "hyper-timeout" version = "0.4.1" @@ -6814,9 +6827,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.17" +version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13293b639a097af28fc8a90f22add145a9c954e49d77da06263d58cf44d5fb91" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ "base64 0.21.0", "bytes 1.4.0", @@ -6827,7 +6840,7 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls", + "hyper-rustls 0.24.0", "hyper-tls", "ipnet", "js-sys", @@ -6837,14 +6850,14 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.20.7", + "rustls 0.21.0", "rustls-pemfile 1.0.1", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-rustls 0.23.4", + "tokio-rustls 0.24.0", "tokio-util", "tower-service", "url", diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index ef5c467a86c93..d7727c200c259 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -234,6 +234,7 @@ hyper,https://github.com/hyperium/hyper,MIT,Sean McArthur hyper-openssl,https://github.com/sfackler/hyper-openssl,MIT OR Apache-2.0,Steven Fackler hyper-proxy,https://github.com/tafia/hyper-proxy,MIT,Johann Tuffe hyper-rustls,https://github.com/ctz/hyper-rustls,Apache-2.0 OR ISC OR MIT,Joseph Birr-Pixton +hyper-rustls,https://github.com/ctz/hyper-rustls,Apache-2.0 OR ISC OR MIT,The hyper-rustls Authors hyper-timeout,https://github.com/hjr3/hyper-timeout,MIT OR Apache-2.0,Herman J. Radtke III hyper-tls,https://github.com/hyperium/hyper-tls,MIT OR Apache-2.0,Sean McArthur hyperlocal,https://github.com/softprops/hyperlocal,MIT,softprops diff --git a/lib/k8s-e2e-tests/Cargo.toml b/lib/k8s-e2e-tests/Cargo.toml index 2489b6c769273..ad164e128355b 100644 --- a/lib/k8s-e2e-tests/Cargo.toml +++ b/lib/k8s-e2e-tests/Cargo.toml @@ -12,7 +12,7 @@ futures = "0.3" k8s-openapi = { version = "0.16.0", default-features = false, features = ["v1_19"] } k8s-test-framework = { version = "0.1", path = "../k8s-test-framework" } regex = "1" -reqwest = { version = "0.11.17", features = ["json"] } +reqwest = { version = "0.11.18", features = ["json"] } serde_json = "1" tokio = { version = "1.28.1", features = ["full"] } indoc = "2.0.1" diff --git a/lib/vector-api-client/Cargo.toml b/lib/vector-api-client/Cargo.toml index d7e15156f0997..36c40bcd7e9e7 100644 --- a/lib/vector-api-client/Cargo.toml +++ b/lib/vector-api-client/Cargo.toml @@ -25,7 +25,7 @@ tokio-stream = { version = "0.1.14", default-features = false, features = ["sync graphql_client = { version = "0.12.0", default-features = false, features = ["graphql_query_derive"] } # HTTP / WebSockets -reqwest = { version = "0.11.17", default-features = false, features = ["json"] } +reqwest = { version = "0.11.18", default-features = false, features = ["json"] } tokio-tungstenite = { version = "0.19.0", default-features = false, features = ["connect", "rustls"] } # External libs From e7fa8d373b74117c4d0d90902c3124e620c3c6c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 May 2023 23:08:05 +0000 Subject: [PATCH 002/236] chore(deps): bump rdkafka from 0.30.0 to 0.31.0 (#17428) Bumps [rdkafka](https://github.com/fede1024/rust-rdkafka) from 0.30.0 to 0.31.0. - [Changelog](https://github.com/fede1024/rust-rdkafka/blob/master/changelog.md) - [Commits](https://github.com/fede1024/rust-rdkafka/compare/0.30.0...v0.31.0) --- updated-dependencies: - dependency-name: rdkafka dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 041550c53ff99..358c636ef43fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6689,9 +6689,9 @@ dependencies = [ [[package]] name = "rdkafka" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97640b53443880ca65df40e9373a8193f9ad58b3f7419bc7206067f4a952500d" +checksum = "88383df3a85a38adfa2aa447d3ab6eb9cedcb49613adcf18e7e7ebb3b62e9b03" dependencies = [ "futures-channel", "futures-util", diff --git a/Cargo.toml b/Cargo.toml index 49fd1c3fc65f2..8367f450fc82b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -291,7 +291,7 @@ postgres-openssl = { version = "0.5.0", default-features = false, features = ["r pulsar = { version = "5.1.1", default-features = false, features = ["tokio-runtime", "auth-oauth2", "flate2", "lz4", "snap", "zstd"], optional = true } rand = { version = "0.8.5", default-features = false, features = ["small_rng"] } rand_distr = { version = "0.4.3", default-features = false } -rdkafka = { version = "0.30.0", default-features = false, features = ["tokio", "libz", "ssl", "zstd"], optional = true } +rdkafka = { version = "0.31.0", default-features = false, features = ["tokio", "libz", "ssl", "zstd"], optional = true } redis = { version = "0.23.0", default-features = false, features = ["connection-manager", "tokio-comp", "tokio-native-tls-comp"], optional = true } regex = { version = "1.8.1", default-features = false, features = ["std", "perf"] } roaring = { version = "0.10.1", default-features = false, optional = true } From ae656c7124b9c148e7a678967f58edc2a32501e5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 May 2023 15:04:53 +0000 Subject: [PATCH 003/236] chore(deps): bump proc-macro2 from 1.0.57 to 1.0.58 (#17426) Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.57 to 1.0.58. - [Release notes](https://github.com/dtolnay/proc-macro2/releases) - [Commits](https://github.com/dtolnay/proc-macro2/compare/1.0.57...1.0.58) --- updated-dependencies: - dependency-name: proc-macro2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 130 ++++++++++++++++++++++++++--------------------------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 358c636ef43fe..49ca20312d867 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -437,7 +437,7 @@ dependencies = [ "async-graphql-parser", "darling 0.14.2", "proc-macro-crate 1.2.1", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", "thiserror", @@ -557,7 +557,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -579,7 +579,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -596,7 +596,7 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -1564,7 +1564,7 @@ dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "syn 1.0.109", ] @@ -1574,7 +1574,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61820b4c5693eafb998b1e67485423c923db4a75f72585c247bdee32bad81e7b" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -1585,7 +1585,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c76cdbfa13def20d1f8af3ae7b3c6771f06352a74221d8851262ac384c122b8e" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -1656,7 +1656,7 @@ version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -1741,7 +1741,7 @@ checksum = "e10ca87c81aaa3a949dbbe2b5e6c2c45dbc94ba4897e45ea31ff9ec5087be3dc" dependencies = [ "cached_proc_macro_types", "darling 0.14.2", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -1978,7 +1978,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81d7dc0031c3a59a04fc2ba395c8e2dd463cba1859275f065d225f6122221b45" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -2485,7 +2485,7 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "scratch", "syn 1.0.109", @@ -2503,7 +2503,7 @@ version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -2536,7 +2536,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "strsim 0.10.0", "syn 1.0.109", @@ -2550,7 +2550,7 @@ checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "strsim 0.10.0", "syn 1.0.109", @@ -2695,7 +2695,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -2707,7 +2707,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "rustc_version 0.4.0", "syn 1.0.109", @@ -2964,7 +2964,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -2976,7 +2976,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -2988,7 +2988,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11f36e95862220b211a6e2aa5eca09b4fa391b13cd52ceb8035a24bf65a79de2" dependencies = [ "once_cell", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -3008,7 +3008,7 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -3214,7 +3214,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4c81935e123ab0741c4c4f0d9b8377e5fb21d3de7e062fa4b1263b1fbcba1ea" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -3395,7 +3395,7 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -3478,7 +3478,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb19fe8de3ea0920d282f7b77dd4227aea6b8b999b42cdf0ca41b2472b14443a" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -3578,7 +3578,7 @@ dependencies = [ "graphql-parser", "heck 0.4.0", "lazy_static", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "serde", "serde_json", @@ -3592,7 +3592,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52fc9cde811f44b15ec0692b31e56a3067f6f431c5ace712f286e47c1dacc98" dependencies = [ "graphql_client_codegen", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "syn 1.0.109", ] @@ -4946,7 +4946,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -5463,7 +5463,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.2.1", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -5475,7 +5475,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.2.1", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -5649,7 +5649,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -5901,7 +5901,7 @@ checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -5989,7 +5989,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -6204,7 +6204,7 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c142c0e46b57171fe0c528bee8c5b7569e80f0c17e377cd0e30ea57dbc11bb51" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "syn 1.0.109", ] @@ -6249,7 +6249,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", "version_check", @@ -6261,7 +6261,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "version_check", ] @@ -6289,9 +6289,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4ec6d5fe0b140acb27c9a0444118cf55bfbb4e0b259739429abb4521dd67c16" +checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" dependencies = [ "unicode-ident", ] @@ -6371,7 +6371,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -6400,7 +6400,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -6505,7 +6505,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -6525,7 +6525,7 @@ version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", ] [[package]] @@ -6920,7 +6920,7 @@ version = "0.7.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -7405,7 +7405,7 @@ version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -7416,7 +7416,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -7468,7 +7468,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -7527,7 +7527,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -7539,7 +7539,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" dependencies = [ "darling 0.14.2", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -7801,7 +7801,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -7942,7 +7942,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -7960,7 +7960,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "rustversion", "syn 1.0.109", @@ -7999,7 +7999,7 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "unicode-ident", ] @@ -8010,7 +8010,7 @@ version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "unicode-ident", ] @@ -8027,7 +8027,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", "unicode-xid 0.2.4", @@ -8186,7 +8186,7 @@ version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -8331,7 +8331,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -8551,7 +8551,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "prost-build", "quote 1.0.27", "syn 1.0.109", @@ -8656,7 +8656,7 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -8927,7 +8927,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -8957,7 +8957,7 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3e1c30cedd24fc597f7d37a721efdbdc2b1acae012c1ef1218f4c7c2c0f3e7" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", ] @@ -9516,7 +9516,7 @@ dependencies = [ "darling 0.13.4", "indexmap", "once_cell", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "serde", "serde_json", @@ -9529,7 +9529,7 @@ name = "vector-config-macros" version = "0.1.0" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "serde", "serde_derive_internals", @@ -9910,7 +9910,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", ] @@ -10011,7 +10011,7 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", "wasm-bindgen-shared", @@ -10045,7 +10045,7 @@ version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 2.0.10", "wasm-bindgen-backend", @@ -10450,7 +10450,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6505e6815af7de1746a08f69c69606bb45695a17149517680f3b2149713b19a3" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", ] @@ -10470,7 +10470,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ - "proc-macro2 1.0.57", + "proc-macro2 1.0.58", "quote 1.0.27", "syn 1.0.109", "synstructure", From c7d7cf8e36b9de6de7cd963e472d33b792c24413 Mon Sep 17 00:00:00 2001 From: everpcpc Date: Fri, 19 May 2023 00:51:58 +0800 Subject: [PATCH 004/236] fix(databend sink): use get for page request (#17373) --- src/sinks/databend/api.rs | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/src/sinks/databend/api.rs b/src/sinks/databend/api.rs index 73d769ff6a67a..a3b87e9a72c1a 100644 --- a/src/sinks/databend/api.rs +++ b/src/sinks/databend/api.rs @@ -111,19 +111,8 @@ impl DatabendAPIClient { async fn do_request( &self, - url: String, - req: Option, + mut request: Request, ) -> Result { - let body = match req { - Some(r) => { - let body = serde_json::to_vec(&r)?; - Body::from(body) - } - None => Body::empty(), - }; - let mut request = Request::post(url) - .header("Content-Type", "application/json") - .body(body)?; if let Some(a) = &self.auth { a.apply(&mut request); } @@ -163,7 +152,10 @@ impl DatabendAPIClient { next_uri: String, ) -> Result { let endpoint = self.get_page_endpoint(&next_uri)?; - self.do_request(endpoint, None).await + let request = Request::get(endpoint) + .header("Content-Type", "application/json") + .body(Body::empty())?; + self.do_request(request).await } pub(super) async fn query( @@ -171,7 +163,10 @@ impl DatabendAPIClient { req: DatabendHttpRequest, ) -> Result { let endpoint = self.get_query_endpoint()?; - let resp = self.do_request(endpoint, Some(req)).await?; + let request = Request::post(endpoint) + .header("Content-Type", "application/json") + .body(Body::from(serde_json::to_vec(&req)?))?; + let resp = self.do_request(request).await?; match resp.next_uri { None => Ok(resp), Some(_) => { From d1949921a81181e2eeb1780d7e081d767f758f5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20GARNIER?= Date: Thu, 18 May 2023 19:55:39 +0200 Subject: [PATCH 005/236] fix(fluent source): fix ack message format (#17407) --- src/sources/fluent/mod.rs | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/sources/fluent/mod.rs b/src/sources/fluent/mod.rs index 76e51ae7a066d..f25f413590324 100644 --- a/src/sources/fluent/mod.rs +++ b/src/sources/fluent/mod.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::io::{self, Read}; use std::net::SocketAddr; use std::time::Duration; @@ -9,8 +10,8 @@ use codecs::{BytesDeserializerConfig, StreamDecodingError}; use flate2::read::MultiGzDecoder; use lookup::lookup_v2::parse_value_path; use lookup::{metadata_path, owned_value_path, path, OwnedValuePath, PathPrefix}; -use rmp_serde::{decode, Deserializer}; -use serde::Deserialize; +use rmp_serde::{decode, Deserializer, Serializer}; +use serde::{Deserialize, Serialize}; use smallvec::{smallvec, SmallVec}; use tokio_util::codec::Decoder; use vector_config::configurable_component; @@ -532,15 +533,18 @@ impl TcpSourceAcker for FluentAcker { return None; } - let mut acks = String::new(); + let mut buf = Vec::new(); + let mut ser = Serializer::new(&mut buf); + let mut ack_map = HashMap::new(); + for chunk in self.chunks { - let ack = match ack { - TcpSourceAck::Ack => format!(r#"{{"ack": "{}"}}"#, chunk), - _ => String::from("{}"), + ack_map.clear(); + if let TcpSourceAck::Ack = ack { + ack_map.insert("ack", chunk); }; - acks.push_str(&ack); + ack_map.serialize(&mut ser).unwrap(); } - Some(acks.into()) + Some(buf.into()) } } @@ -861,7 +865,8 @@ mod tests { async fn ack_delivered_with_chunk() { let (result, output) = check_acknowledgements(EventStatus::Delivered, true).await; assert_eq!(result.unwrap().unwrap(), output.len()); - assert!(output.starts_with(b"{\"ack\":")); + let expected: Vec = vec![0x81, 0xa3, 0x61, 0x63]; // { "ack": ... + assert_eq!(output[..expected.len()], expected); } #[tokio::test] @@ -875,7 +880,8 @@ mod tests { async fn ack_failed_with_chunk() { let (result, output) = check_acknowledgements(EventStatus::Rejected, true).await; assert_eq!(result.unwrap().unwrap(), output.len()); - assert_eq!(output, &b"{}"[..]); + let expected: Vec = vec![0x80]; // { } + assert_eq!(output, expected); } async fn check_acknowledgements( From 187f142ef5c28dec8e9b1ffbdfe0196acbe45804 Mon Sep 17 00:00:00 2001 From: neuronull Date: Thu, 18 May 2023 12:00:47 -0600 Subject: [PATCH 006/236] chore(external docs): update fluentd link (#17436) --- .../content/en/docs/reference/configuration/sources/fluent.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/en/docs/reference/configuration/sources/fluent.md b/website/content/en/docs/reference/configuration/sources/fluent.md index bd41805315928..409d320adfb89 100644 --- a/website/content/en/docs/reference/configuration/sources/fluent.md +++ b/website/content/en/docs/reference/configuration/sources/fluent.md @@ -1,6 +1,6 @@ --- title: Fluent -description: Collect logs from a [Fluentd](https://fluentd.org) or [Fluent Bit](https://fluentbit.io) agent +description: Collect logs from a [Fluentd](https://www.fluentd.org) or [Fluent Bit](https://fluentbit.io) agent kind: source layout: component tags: ["fluentd", "fluent", "component", "source", "logs"] From 54d9c99492ec14924994a4857961aaafe3200f9b Mon Sep 17 00:00:00 2001 From: Vladimir <31961982+zvlb@users.noreply.github.com> Date: Fri, 19 May 2023 18:46:28 +0300 Subject: [PATCH 007/236] chore(docs): Add info about Vector Operator to Kubernetes instalation page (#17432) * chore(docs): Add info about Vector Operator Signed-off-by: zvlb * Cleanup Signed-off-by: zvlb --------- Signed-off-by: zvlb --- .../en/docs/setup/installation/platforms/kubernetes.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/website/content/en/docs/setup/installation/platforms/kubernetes.md b/website/content/en/docs/setup/installation/platforms/kubernetes.md index ddd8a7c1b794e..1d79cfcdf0a62 100644 --- a/website/content/en/docs/setup/installation/platforms/kubernetes.md +++ b/website/content/en/docs/setup/installation/platforms/kubernetes.md @@ -13,7 +13,7 @@ tested with Kubernetes versions **1.19** or higher. ## Install -You can install Vector on Kubernetes using either [Helm](#helm) or [kubectl](#kubectl). +You can install Vector on Kubernetes using [Helm](#helm), [kubectl](#kubectl) or [Vector Operator](#vector-operator) ### Helm @@ -144,6 +144,12 @@ kubectl apply -k . "kubectl logs -n vector statefulset/vector" ``` +### Vector Operator + +The [Vector Operator](https://github.com/kaasops/vector-operator) is community supported resource. The operator deploys and configures a Vector Agent as a DaemonSet on every Node to collect container and application logs from the Node's file system. + +For additional information, see the [documentation](https://github.com/kaasops/vector-operator/tree/main/docs). + ## Deployment Vector is an end-to-end observability data pipeline designed to deploy under various roles. You mix and match these roles to create topologies. The intent is to make Vector as flexible as possible, allowing you to fluidly integrate Vector into your infrastructure over time. The deployment section demonstrates common Vector pipelines: From 7a7bc9a3fe65d04d4e945186b1cbb31517ed8a64 Mon Sep 17 00:00:00 2001 From: Scott Balmos <399112+sbalmos@users.noreply.github.com> Date: Fri, 19 May 2023 11:57:47 -0400 Subject: [PATCH 008/236] enhancement(s3 source) Add minimal support to unwrap an S3-SQS event from an SNS event (#17352) --- .github/actions/spelling/expect.txt | 232 +--------------------------- src/sources/aws_s3/sqs.rs | 40 ++++- 2 files changed, 46 insertions(+), 226 deletions(-) diff --git a/.github/actions/spelling/expect.txt b/.github/actions/spelling/expect.txt index a21bb9d750a9a..9b8f575030f13 100644 --- a/.github/actions/spelling/expect.txt +++ b/.github/actions/spelling/expect.txt @@ -4,37 +4,26 @@ abcdefghijklmnopqrstuvwxyzand abced abortable acb -Acho ack'ing acking Acq -addrof -addtnl AEAD agentpayload aimd akx allowerased -alphanum -AMF amka -ampersat amping amqps amz amzn -anaana anchore androideabi andy -annanas ansicpg -ansix anumber anycondition -anymap anypb -ANZ apievent apipodspec apk @@ -45,16 +34,12 @@ aqf architecting archivable ARNOTAREALIDD -Arrowz arshiyasolei asdf asdfasdf -asis ASMS -aspiegel assertverify Asterix -Astring asynk atag atx @@ -77,20 +62,14 @@ autospawning autotools avro awscli -awseb awsec awslabs -aww axum Aziz azureresourceid babim -Baca -Bada badunit bak -baq -barbaz barfoo barieom baseof @@ -104,7 +83,6 @@ bcdea benchgen benchmarker benefritz -Berita bev bfb bgwriter @@ -113,7 +91,6 @@ Bincode bindgen bizbaz bla -blabla blabop blafoo blem @@ -121,19 +98,14 @@ blkio Bloblang Blockfi blockmanager -bloob blpop blt -bmobile -bononos bonzai -bools boop booper booperbot bopbla boringssl -boto bottlenecked bpower brackethighlighter @@ -203,7 +175,6 @@ cloudwatchlogs cmark CMK cnf -Coc CODEOWNERS colddb coldline @@ -211,13 +182,11 @@ commonmark comms compactarr compactmap -compiter componenterror componenteventsdropped componenteventsreceived componenteventssent composability -compvalue concating concats condrestart @@ -225,7 +194,6 @@ configkey configmap confl confy -Conkeror consigliere CONTEUDO cooldown @@ -248,28 +216,21 @@ cryptsoft csb cstats csvlog -cta Cthink -cubot customise customizability customtype cwl Dailywarehousing -damerau -darp daschl dashmap dataflows datafuselabs -Datanyze datasources datastream Datelike -dateref datid datname -davidhuie dbkind dbreader DBserver @@ -296,23 +257,19 @@ defattr defaultauthdb deff defghijklmnopqrstuvwxyzand -definit delaycompress deliverystream demoing -derp descriptorpb deser deserializations desync -DEUC developerguide devno DHAVE dhclient diffing diffs -DIGNO disintermediate distrib dld @@ -321,17 +278,13 @@ DNi dnsmsg docsearch docsrs -doesnotcrash dogcats Dogsketch dogsketches dogstatsd domhandler Doop -doot downcasted -DQUOTE -droid droptest dsl dstat @@ -348,13 +301,11 @@ eabihf eay ebfcee edenhill -Edg edns eeyun efg efgh Elhage -Eluga emca EMON Emph @@ -362,7 +313,6 @@ emptypb emt Enableable encodable -Encryptor endef endler enduml @@ -372,7 +322,6 @@ enumdecl enumflags ENVARS envsubst -EOI EOIG EOL'ed Erfxl @@ -380,13 +329,11 @@ Err'ing errorf Errorsfor esb -ESPN esque etheus etl ETNUV etsy -EUA eur eventarray evented @@ -396,12 +343,10 @@ eventstoredb eventstreamsink evictingpages evmap -evntslog -EVO evt -EWENE ewma examplegroup +EXAMPLEn exitcodes exprhere extendedstatus @@ -412,20 +357,17 @@ extrepo Failable fakedata falco -fals fanatid fanouts fastcc fastmod fbarbar fbaro -FBAV fbf fcharset fcontext feeney festeburg -FFar ffebda ffeef fffffffff @@ -436,7 +378,6 @@ filecontents filterlist finalizable fingerprinter -firetv fizzaxbbuzz fizzbuzz fkn @@ -446,11 +387,7 @@ Flatbuffers flate flatmaps flattenings -fleep flork -florp -FME -fnclosure fng fnil Fomichev @@ -462,7 +399,6 @@ FOOBARy foobaz foobazbar foobla -foofoo foometric fooo foos @@ -470,6 +406,7 @@ footag footgunning Forcepoint formatdoc +FPa framestream FRecord freerunning @@ -481,20 +418,17 @@ fuchsnj fullhuman futs fuzzcheck -fuzzer fwfiles -FXPDEUC +GAmw GAPI gaugehistogram gaugor GC'ing gcra -gdx geh genericify genproto genrsa -Genx geoffleyland geolite getelementptr @@ -539,7 +473,6 @@ hashring hashset hashsum hba -Hbb hdrhistogram headchunk hec @@ -549,7 +482,6 @@ heka hereregexp herestring hexdump -Hfoo highlighters histo hname @@ -576,7 +508,6 @@ iamanapikey iamasplunkhectoken iana iarna -Ideos idhack IDML ified @@ -590,7 +521,6 @@ incrementalize indexmap indicatif indoc -Inettv influxdata ingesters ingestor @@ -599,19 +529,15 @@ initech Instrumentable interpolatedstring interpretervm -intex invalidauth invokefunction invtrapezium -inzone -IOA ioapiset ionicon iostat iouring iowait IPORHOST -iru isainfo isdbgrid isp @@ -639,7 +565,6 @@ jstype jsvs jszwedko jtype -JUC juchiast karppila kartar @@ -649,7 +574,6 @@ keybase keyclock keyid keypair -keystream keyxxxxx khvzak kib @@ -658,20 +582,20 @@ killproc kinesisfirehose kinit klog +Knx ktff kvlist kvs +Kxs labelmap lalrpop Lamport landingpad -lastitem lastword ldd leebenson leveldb lfd -lfoo libclang LIBGNUTLS liblogging @@ -682,7 +606,6 @@ linting listenfd litstr llen -Lme lnt lntable lntd @@ -692,7 +615,6 @@ logbar logdna logevents logfmt -logid lognamespace lognamespacing logplex @@ -702,20 +624,11 @@ logsense logsev logseverity logtypes -loguid -lookaround -lookupbufs losslessly lpop lpush -LPW -LQuery -LRule -LSQRBRACKET -lstr Luc luciofranco -luckystar lucperkins lukesteensen macports @@ -727,11 +640,9 @@ markability markdownify markdownlintrc marketo -matchall maxdepth maxed maxes -maxint maxs maxwritten maybeanothertest @@ -762,14 +673,11 @@ minioadmin miniodat minwindef mio -mirall misordering -Miui mkcert mkto mlua mmdb -mmdd Mmm moby mockwatchlogs @@ -777,13 +685,10 @@ modulesloaddir mooper moosh Mooshing -morefield moretags -morevalue mortems motivatingly MOZGIII -mpms mre msgpack mskv @@ -793,8 +698,6 @@ msv multiarch multievents multitenant -multiterm -multitermlookahead munging musleabihf muslueabihf @@ -812,12 +715,10 @@ mymachine mypod mytable myvalue -MZX nacked nacks Namazu namespacefoo -nananaman nananana nanosecs nats @@ -827,19 +728,14 @@ nbase ndarray ndjson nearline -nestedkey -NETTV neuronull newcerts newrelix nextest -nfield nfox ngx nightlies nindent -ning -nink nkey nmeta noack @@ -853,7 +749,6 @@ nomac NONALPHANUM nonbare noncycles -nonk nonsending nonstring noog @@ -864,11 +759,8 @@ nopqrstuvwxyz norc norecurse noreplace -norg -norgle norknoog norknork -no_run nosync notext notls @@ -883,40 +775,26 @@ nowin npipe NQTP nresamples -nullandnull nullishness numbackends -numericstart oap -Obar -Obigo OKD omfwd omitempty -ond -Onefootball oneline oneof onezone -onik -onk onlyfields onlyone ooba oobar ook -oopsie opcounters openstring opinsights oplog -opples -OPR optimizable -Optimus -organisations orgid -originsicname ostype otel otelcol @@ -926,10 +804,8 @@ otlp otlphttp ouicompat outputspatterns -outzone overaligned overalignment -overwritable owo oyaml pablosichert @@ -939,12 +815,9 @@ parallelizable pareto partitionable passthrough -patchelf pathbuf pathgen -peci peekable -peeker PEMS pgmajfault PII @@ -956,7 +829,6 @@ plork pnh podspec Ponge -ponk portpicker POSINT postinst @@ -972,9 +844,7 @@ prerot presetdir pretrunc prettydiff -prettytable primaryfont -printstd probot processname procid @@ -987,23 +857,15 @@ protoc protofbuf protosizer Prt -PRTG psv -PTST publickey purgecss pyld -QFn -QGIS -qmobile +Pzb qqq -qstr -queryroot -QUESTIONMARK quickcheck quix quuux -quuz qux quz qwe @@ -1012,7 +874,6 @@ rande RANDFILE rawconfig rawstring -rbaz rdkafka rdparty rdr @@ -1025,18 +886,15 @@ referenceable regexes regexset reinstantiate -Rekonq reloadable remapper remotehost reorganisation reparse -replacen replacepkgs replicaset replset reqwest -rer rereleased reserialize resharding @@ -1044,7 +902,6 @@ resourcemanager respawn restorecon retryable -rhv rkyv rmem rmi @@ -1053,18 +910,10 @@ rmpv rndc rngs rolledback -rootquery -roxmltree rpd rpush -rquery -RRRRRRRLLLLLLLLLLLLLLLLLLLLLLLL -RRule -rsplitn -RSQRBRACKET rstrings RTTs -rulenum runc runhcs rusoto @@ -1077,13 +926,11 @@ Rustinomicon rustls RUSTSEC rustup -rustyline rxi rxmsg rxs ryangjchandler ryu -saanich sadf samehost samenet @@ -1091,12 +938,10 @@ samerole sameuser sandboxed sandboxing -sby sccache schemaless schemars schoen -schucks scl sda sdata @@ -1104,16 +949,13 @@ SDID seahash secfrac Seedable -segmentbuf semanage sematext SEO -sequencenum serie serverlogs serviceaccount servicebus -Seznam sfixed sfrag sghall @@ -1121,8 +963,6 @@ shane sharedstatedir Shenzhen shiplift -shning -shnoog shortcode shortstat should've @@ -1140,7 +980,6 @@ sinknetworkbytessent sizecache Sizefor skinparam -SKIPDATA skywalking slashin slf @@ -1170,18 +1009,14 @@ spencergilbert splitn SPOF spog -spork springframework srcport SREs -SResult sret SRPMS ssekms ssn sspi -SSSZ -SSSZZ sstrings stabilises stackdrive @@ -1199,14 +1034,12 @@ strat strconv streamsink strng -strp structfield subchunks suberr subfolders subfooter sublimelinter -sublocation subsec substrategies subtagline @@ -1219,7 +1052,6 @@ supertrait suser sustainability svalue -Swiftfox Sya sysfs sysinit @@ -1229,7 +1061,6 @@ systemid Syu Szwedko tablesmap -tac tagfoo tagline tagset @@ -1239,18 +1070,11 @@ Takeaways targetgroup tarpit tcmalloc -tdkind -Techvision -tecno -Teleca -Telechips telecom -Telesys templatable templateable templating terabytes -termcolor terraform tes testevent @@ -1265,11 +1089,8 @@ thaweddb thicc Thimphu thinkies -Thisais thiserror thisisastring -thonk -thot threatmanager throughputs thrpt @@ -1288,7 +1109,6 @@ tobz tocbot todos tokio -Tolino Tomola tonydanza toolbars @@ -1296,41 +1116,30 @@ toolchains TOOLSDIRECTORY toolset toor -topbuzz topdir topojson toproto -torvec -Toughpad Toxiproxy Tpng Trauring Treemap Trello -Treo trialled triggerable tripwires Trivago trivy Troutwine -tru TRUSTSTORE TSDB Tsvg turbofish -Twitterbot twox txmsg txs -Tygron typechecked typetag -tzs uap -uaparser -uas -UCWEB udm UIDs uieao @@ -1340,9 +1149,7 @@ unacked undertagline underutilized underutilizing -Unescaping unevictable -ungrokkable unioning unitdir unmark @@ -1351,7 +1158,6 @@ unnests unstructuredlogentries unsync untuple -uol upgradable urql usecase @@ -1359,7 +1165,6 @@ userinfo userlands usermod userpass -ustr uucp UVY uwtable @@ -1367,7 +1172,6 @@ valfoo validpaths Varda vdev -Vdroid VECTORCFG vectordir vectordotdev @@ -1375,19 +1179,15 @@ vectorized vendored veryyyyyyy viceversa -VIERA viewkind visualising -VLC VMs VNQ volumeconfig vrl -VRR vts vvo vvv -VVVVVVVVRRRRRRRRRRRRRRRRR VYa wahhh waitsforfullbatch @@ -1398,16 +1198,11 @@ waninfo wasmtime watchexec watchlogs -Waterfox wayfor webgraphviz webservers websites -webviews weee -weekyear -Wellco -Weltbild wemustcontinuetodiscard wensite whoopsie @@ -1416,7 +1211,6 @@ willreturn winioctl wiredtiger wiremock -with'quote wix wixobj WIXUI @@ -1433,20 +1227,15 @@ writablelib writeback wrongpass wrongsecret -wronly wtcache wtime wtimeouts wtr wurstmeister wwang -xaablabla xact -xbar xcatsy Xcg -XENENE -Xiao xlarge xpack xscale @@ -1455,9 +1244,6 @@ XUtil xvf XVXv xxs -xxxxxxx -xxxxxxxxxxxxxxx -xxxxxxxxxxxxxxxxxxxxxxx xzy YAMLs YBv @@ -1466,20 +1252,18 @@ Yellowdog yippeee yolo YRjhx +Ystd ytt -zalgo zam -Zania ZDfz zerk zibble zieme -Zii zirp +Zkwcmo zoob zoobub zoog -zook zoop zork zorp diff --git a/src/sources/aws_s3/sqs.rs b/src/sources/aws_s3/sqs.rs index 4adae407ce9d2..3907ecaa9241c 100644 --- a/src/sources/aws_s3/sqs.rs +++ b/src/sources/aws_s3/sqs.rs @@ -405,8 +405,12 @@ impl IngestorProcess { } async fn handle_sqs_message(&mut self, message: Message) -> Result<(), ProcessingError> { - let s3_event: SqsEvent = serde_json::from_str(message.body.unwrap_or_default().as_ref()) - .context(InvalidSqsMessageSnafu { + let sqs_body = message.body.unwrap_or_default(); + let sqs_body = serde_json::from_str::(sqs_body.as_ref()) + .map(|notification| notification.message) + .unwrap_or(sqs_body); + let s3_event: SqsEvent = + serde_json::from_str(sqs_body.as_ref()).context(InvalidSqsMessageSnafu { message_id: message .message_id .clone() @@ -713,6 +717,13 @@ fn handle_single_log( }; } +// https://docs.aws.amazon.com/sns/latest/dg/sns-sqs-as-subscriber.html +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct SnsNotification { + pub message: String, +} + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/how-to-enable-disable-notification-intro.html #[derive(Clone, Debug, Deserialize)] #[serde(untagged)] @@ -940,3 +951,28 @@ fn test_s3_testevent() { assert_eq!(value.event.kind, "s3".to_string()); assert_eq!(value.event.name, "TestEvent".to_string()); } + +#[test] +fn test_s3_sns_testevent() { + let sns_value: SnsNotification = serde_json::from_str( + r#"{ + "Type" : "Notification", + "MessageId" : "63a3f6b6-d533-4a47-aef9-fcf5cf758c76", + "TopicArn" : "arn:aws:sns:us-west-2:123456789012:MyTopic", + "Subject" : "Testing publish to subscribed queues", + "Message" : "{\"Bucket\":\"bucketname\",\"Event\":\"s3:TestEvent\",\"HostId\":\"8cLeGAmw098X5cv4Zkwcmo8vvZa3eH3eKxsPzbB9wrR+YstdA6Knx4Ip8EXAMPLE\",\"RequestId\":\"5582815E1AEA5ADF\",\"Service\":\"Amazon S3\",\"Time\":\"2014-10-13T15:57:02.089Z\"}", + "Timestamp" : "2012-03-29T05:12:16.901Z", + "SignatureVersion" : "1", + "Signature" : "EXAMPLEnTrFPa3...", + "SigningCertURL" : "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem", + "UnsubscribeURL" : "https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:123456789012:MyTopic:c7fe3a54-ab0e-4ec2-88e0-db410a0f2bee" + }"#, + ).unwrap(); + + let value: S3TestEvent = serde_json::from_str(sns_value.message.as_ref()).unwrap(); + + assert_eq!(value.service, "Amazon S3".to_string()); + assert_eq!(value.bucket, "bucketname".to_string()); + assert_eq!(value.event.kind, "s3".to_string()); + assert_eq!(value.event.name, "TestEvent".to_string()); +} From a8b7899bea771e6f2ca2e7c78c5a1c578f03d78f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 May 2023 10:00:07 -0700 Subject: [PATCH 009/236] chore(deps): bump lapin from 2.1.1 to 2.1.2 (#17439) Bumps [lapin](https://github.com/amqp-rs/lapin) from 2.1.1 to 2.1.2. - [Changelog](https://github.com/amqp-rs/lapin/blob/main/CHANGELOG.md) - [Commits](https://github.com/amqp-rs/lapin/compare/lapin-2.1.1...lapin-2.1.2) --- updated-dependencies: - dependency-name: lapin dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 49ca20312d867..ebd36e70b604d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4587,9 +4587,9 @@ checksum = "e5c1f7869c94d214466c5fd432dfed12c379fd87786768d36455892d46b18edd" [[package]] name = "lapin" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd03ea5831b44775e296239a64851e2fd14a80a363d202ba147009ffc994ff0f" +checksum = "21e22f78402c4f817c8d8986ff62921952d9f2218db11de6a6b10517d2112cf4" dependencies = [ "amq-protocol", "async-global-executor-trait", diff --git a/Cargo.toml b/Cargo.toml index 8367f450fc82b..e6d38929276c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -207,7 +207,7 @@ goauth = { version = "0.13.1", optional = true } smpl_jwt = { version = "0.7.1", default-features = false, optional = true } # AMQP -lapin = { version = "2.1.1", default-features = false, features = ["native-tls"], optional = true } +lapin = { version = "2.1.2", default-features = false, features = ["native-tls"], optional = true } # API async-graphql = { version = "5.0.8", default-features = false, optional = true, features = ["chrono"] } From ac0c7e82fc5877a58a60da872c40ad9b63143953 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 May 2023 10:03:07 -0700 Subject: [PATCH 010/236] chore(deps): bump security-framework from 2.9.0 to 2.9.1 (#17441) Bumps [security-framework](https://github.com/kornelski/rust-security-framework) from 2.9.0 to 2.9.1. - [Release notes](https://github.com/kornelski/rust-security-framework/releases) - [Commits](https://github.com/kornelski/rust-security-framework/compare/v2.9.0...v2.9.1) --- updated-dependencies: - dependency-name: security-framework dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- lib/vector-core/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ebd36e70b604d..dcfc8127d6ef6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7294,9 +7294,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2855b3715770894e67cbfa3df957790aa0c9edc3bf06efa1a84d77fa0839d1" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags", "core-foundation", diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index deac7b5988d6e..2ed6e414e2e3e 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -68,7 +68,7 @@ vector-config-macros = { path = "../vector-config-macros" } vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0" } [target.'cfg(target_os = "macos")'.dependencies] -security-framework = "2.9.0" +security-framework = "2.9.1" [target.'cfg(windows)'.dependencies] schannel = "0.1.21" From 91ba052ba59d920761a02f7999c4b5d8b39d1766 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 May 2023 18:27:29 +0000 Subject: [PATCH 011/236] chore(deps): bump toml from 0.7.3 to 0.7.4 (#17440) * chore(deps): bump toml from 0.7.3 to 0.7.4 Bumps [toml](https://github.com/toml-rs/toml) from 0.7.3 to 0.7.4. - [Commits](https://github.com/toml-rs/toml/compare/toml-v0.7.3...toml-v0.7.4) --- updated-dependencies: - dependency-name: toml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Regenerate license file Signed-off-by: Jesse Szwedko --------- Signed-off-by: dependabot[bot] Signed-off-by: Jesse Szwedko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jesse Szwedko --- Cargo.lock | 30 +++++++++++++++--------------- Cargo.toml | 2 +- LICENSE-3rdparty.csv | 2 +- lib/vector-config/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 4 ++-- vdev/Cargo.toml | 2 +- 6 files changed, 21 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dcfc8127d6ef6..e056f1c1ed105 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7354,7 +7354,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a78072b550e5c20bc4a9d1384be28809cbdb7b25b2b4707ddc6d908b7e6de3bf" dependencies = [ - "toml 0.7.3", + "toml 0.7.4", ] [[package]] @@ -7475,9 +7475,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" dependencies = [ "serde", ] @@ -8479,9 +8479,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21" +checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec" dependencies = [ "serde", "serde_spanned", @@ -8491,18 +8491,18 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.6" +version = "0.19.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08de71aa0d6e348f070457f85af8bd566e2bc452156a423ddf22861b3a953fae" +checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f" dependencies = [ "indexmap", "serde", @@ -9183,7 +9183,7 @@ dependencies = [ "serde_yaml 0.9.21", "sha2 0.10.6", "tempfile", - "toml 0.7.3", + "toml 0.7.4", ] [[package]] @@ -9352,7 +9352,7 @@ dependencies = [ "tokio-test", "tokio-tungstenite 0.19.0", "tokio-util", - "toml 0.7.3", + "toml 0.7.4", "tonic", "tonic-build", "tower", @@ -9500,7 +9500,7 @@ dependencies = [ "serde_json", "serde_with 2.3.2", "snafu", - "toml 0.7.3", + "toml 0.7.4", "tracing 0.1.37", "url", "vector-config-common", @@ -9601,7 +9601,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "toml 0.7.3", + "toml 0.7.4", "tonic", "tower", "tracing 0.1.37", @@ -10356,9 +10356,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.3.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee7b2c67f962bf5042bfd8b6a916178df33a26eec343ae064cb8e069f638fa6f" +checksum = "61de7bac303dc551fe038e2b3cef0f571087a47571ea6e79a87692ac99b99699" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index e6d38929276c8..2c39b81891058 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -307,7 +307,7 @@ syslog = { version = "6.1.0", default-features = false, optional = true } tikv-jemallocator = { version = "0.5.0", default-features = false, optional = true } tokio-postgres = { version = "0.7.7", default-features = false, features = ["runtime", "with-chrono-0_4"], optional = true } tokio-tungstenite = {version = "0.19.0", default-features = false, features = ["connect"], optional = true} -toml = { version = "0.7.3", default-features = false, features = ["parse", "display"] } +toml = { version = "0.7.4", default-features = false, features = ["parse", "display"] } tonic = { version = "0.9", optional = true, default-features = false, features = ["transport", "codegen", "prost", "tls", "tls-roots", "gzip"] } trust-dns-proto = { version = "0.22.0", default-features = false, features = ["dnssec"], optional = true } typetag = { version = "0.2.8", default-features = false } diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index d7727c200c259..ebace511178c0 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -523,7 +523,7 @@ tokio-postgres,https://github.com/sfackler/rust-postgres,MIT OR Apache-2.0,Steve tokio-rustls,https://github.com/tokio-rs/tls,MIT OR Apache-2.0,quininer kel tokio-tungstenite,https://github.com/snapview/tokio-tungstenite,MIT,"Daniel Abramov , Alexey Galakhov " toml,https://github.com/toml-rs/toml,MIT OR Apache-2.0,Alex Crichton -toml_edit,https://github.com/ordian/toml_edit,MIT OR Apache-2.0,"Andronik Ordian , Ed Page " +toml_edit,https://github.com/toml-rs/toml,MIT OR Apache-2.0,"Andronik Ordian , Ed Page " tonic,https://github.com/hyperium/tonic,MIT,Lucio Franco tower,https://github.com/tower-rs/tower,MIT,Tower Maintainers tower-http,https://github.com/tower-rs/tower-http,MIT,Tower Maintainers diff --git a/lib/vector-config/Cargo.toml b/lib/vector-config/Cargo.toml index 63227b44d4a13..41f1a40ff2c59 100644 --- a/lib/vector-config/Cargo.toml +++ b/lib/vector-config/Cargo.toml @@ -23,7 +23,7 @@ serde = { version = "1.0", default-features = false } serde_json = { version = "1.0", default-features = false, features = ["std"] } serde_with = { version = "2.3.2", default-features = false, features = ["std"] } snafu = { version = "0.7.4", default-features = false } -toml = { version = "0.7.3", default-features = false } +toml = { version = "0.7.4", default-features = false } tracing = { version = "0.1.34", default-features = false } url = { version = "2.3.1", default-features = false, features = ["serde"] } vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["compiler"] } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 2ed6e414e2e3e..488322f0786c1 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -50,7 +50,7 @@ tokio = { version = "1.28.1", default-features = false, features = ["net"] } tokio-openssl = { version = "0.6.3", default-features = false } tokio-stream = { version = "0.1", default-features = false, features = ["time"], optional = true } tokio-util = { version = "0.7.0", default-features = false, features = ["time"] } -toml = { version = "0.7.3", default-features = false } +toml = { version = "0.7.4", default-features = false } tonic = { version = "0.9", default-features = false, features = ["transport"] } tower = { version = "0.4", default-features = false, features = ["util"] } tracing = { version = "0.1.34", default-features = false } @@ -86,7 +86,7 @@ quickcheck_macros = "1" proptest = "1.1" similar-asserts = "1.4.2" tokio-test = "0.4.2" -toml = { version = "0.7.3", default-features = false, features = ["parse"] } +toml = { version = "0.7.4", default-features = false, features = ["parse"] } ndarray = "0.15.6" ndarray-stats = "0.5.1" noisy_float = "0.2.0" diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 30bec3e54f089..bb9d4714ac770 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -37,4 +37,4 @@ serde_json = "1.0.96" serde_yaml = "0.9.21" sha2 = "0.10.6" tempfile = "3.5.0" -toml = { version = "0.7.2", default-features = false, features = ["parse"] } +toml = { version = "0.7.4", default-features = false, features = ["parse"] } From b6394228d53508f22c6a65c69961baff19457c05 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 May 2023 19:22:44 +0000 Subject: [PATCH 012/236] chore(deps): bump lapin from 2.1.2 to 2.2.0 (#17443) Bumps [lapin](https://github.com/amqp-rs/lapin) from 2.1.2 to 2.2.0. - [Changelog](https://github.com/amqp-rs/lapin/blob/main/CHANGELOG.md) - [Commits](https://github.com/amqp-rs/lapin/compare/lapin-2.1.2...lapin-2.2.0) --- updated-dependencies: - dependency-name: lapin dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e056f1c1ed105..66bf503df2293 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4587,9 +4587,9 @@ checksum = "e5c1f7869c94d214466c5fd432dfed12c379fd87786768d36455892d46b18edd" [[package]] name = "lapin" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e22f78402c4f817c8d8986ff62921952d9f2218db11de6a6b10517d2112cf4" +checksum = "c8fccae926c0fd9bc7199d79119ed26b91c9da4534153d46b8ab2b65bcbc5a02" dependencies = [ "amq-protocol", "async-global-executor-trait", diff --git a/Cargo.toml b/Cargo.toml index 2c39b81891058..54121597b81e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -207,7 +207,7 @@ goauth = { version = "0.13.1", optional = true } smpl_jwt = { version = "0.7.1", default-features = false, optional = true } # AMQP -lapin = { version = "2.1.2", default-features = false, features = ["native-tls"], optional = true } +lapin = { version = "2.2.0", default-features = false, features = ["native-tls"], optional = true } # API async-graphql = { version = "5.0.8", default-features = false, optional = true, features = ["chrono"] } From 05bf262536031d199c06d980f47be317c97520ea Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 May 2023 19:43:25 +0000 Subject: [PATCH 013/236] chore(deps): bump clap_complete from 4.2.3 to 4.3.0 (#17447) Bumps [clap_complete](https://github.com/clap-rs/clap) from 4.2.3 to 4.3.0. - [Release notes](https://github.com/clap-rs/clap/releases) - [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md) - [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.2.3...clap_complete-v4.3.0) --- updated-dependencies: - dependency-name: clap_complete dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- vdev/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66bf503df2293..20bb90f8e9769 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1964,9 +1964,9 @@ dependencies = [ [[package]] name = "clap_complete" -version = "4.2.3" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1594fe2312ec4abf402076e407628f5c313e54c32ade058521df4ee34ecac8a8" +checksum = "a04ddfaacc3bc9e6ea67d024575fafc2a813027cf374b8f24f7bc233c6b6be12" dependencies = [ "clap 4.1.14", ] diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index bb9d4714ac770..c8b9b4f39ccf2 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -14,7 +14,7 @@ cached = "0.43.0" chrono = { version = "0.4.22", default-features = false, features = ["serde", "clock"] } clap = { version = "4.1.14", features = ["derive"] } clap-verbosity-flag = "2.0.1" -clap_complete = "4.2.3" +clap_complete = "4.3.0" confy = "0.5.1" directories = "5.0.1" # remove this when stabilized https://doc.rust-lang.org/stable/std/path/fn.absolute.html From 618379a27583f6233a76c5b788616816b74bee03 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 May 2023 20:36:37 +0000 Subject: [PATCH 014/236] chore(deps): bump lapin from 2.2.0 to 2.2.1 (#17448) Bumps [lapin](https://github.com/amqp-rs/lapin) from 2.2.0 to 2.2.1. - [Changelog](https://github.com/amqp-rs/lapin/blob/main/CHANGELOG.md) - [Commits](https://github.com/amqp-rs/lapin/compare/lapin-2.2.0...lapin-2.2.1) --- updated-dependencies: - dependency-name: lapin dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20bb90f8e9769..73fc72074ec44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4587,9 +4587,9 @@ checksum = "e5c1f7869c94d214466c5fd432dfed12c379fd87786768d36455892d46b18edd" [[package]] name = "lapin" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8fccae926c0fd9bc7199d79119ed26b91c9da4534153d46b8ab2b65bcbc5a02" +checksum = "acc13beaa09eed710f406201f46b961345b4d061dd90ec3d3ccc70721e70342a" dependencies = [ "amq-protocol", "async-global-executor-trait", diff --git a/Cargo.toml b/Cargo.toml index 54121597b81e3..bd54413ed613c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -207,7 +207,7 @@ goauth = { version = "0.13.1", optional = true } smpl_jwt = { version = "0.7.1", default-features = false, optional = true } # AMQP -lapin = { version = "2.2.0", default-features = false, features = ["native-tls"], optional = true } +lapin = { version = "2.2.1", default-features = false, features = ["native-tls"], optional = true } # API async-graphql = { version = "5.0.8", default-features = false, optional = true, features = ["chrono"] } From 060399a4bbef4280d1cea7c04304ed1308504ca0 Mon Sep 17 00:00:00 2001 From: neuronull Date: Mon, 22 May 2023 09:37:55 -0600 Subject: [PATCH 015/236] chore(ci): Move most CI checks to merge queue (#17340) Introduces a number of changes to our project's GH workflows: - There is a more limited set of tests that run on each PR commit, and the rest were moved to the merge queue. - Most workflows can be triggered manually by a team member though a comment on a PR. - Integration tests are run on PR commits if they touch the files specific to that component. --- .github/workflows/changes.yml | 333 ++++++++ .github/workflows/cli.yml | 63 ++ .github/workflows/comment-trigger.yml | 118 +++ .../{baseline.yml => compilation-timings.yml} | 4 +- .github/workflows/component_features.yml | 46 ++ .github/workflows/cross.yml | 102 +++ .github/workflows/environment.yml | 37 +- .github/workflows/install-sh.yml | 73 +- .github/workflows/integration-comment.yml | 175 +++++ .github/workflows/integration-test.yml | 141 +--- .github/workflows/integration.yml | 140 ++++ .github/workflows/k8s_e2e.yml | 214 +++-- .github/workflows/master_merge_queue.yml | 153 ++++ .github/workflows/misc.yml | 62 ++ .github/workflows/msrv.yml | 21 + .github/workflows/regression.yml | 739 ++++++++++++++++-- .github/workflows/regression_trusted.yml | 595 -------------- .github/workflows/test.yml | 268 +------ .github/workflows/unit_mac.yml | 65 ++ .github/workflows/unit_windows.yml | 53 ++ docs/CONTRIBUTING.md | 7 + 21 files changed, 2308 insertions(+), 1101 deletions(-) create mode 100644 .github/workflows/changes.yml create mode 100644 .github/workflows/cli.yml create mode 100644 .github/workflows/comment-trigger.yml rename .github/workflows/{baseline.yml => compilation-timings.yml} (97%) create mode 100644 .github/workflows/component_features.yml create mode 100644 .github/workflows/cross.yml create mode 100644 .github/workflows/integration-comment.yml create mode 100644 .github/workflows/integration.yml create mode 100644 .github/workflows/master_merge_queue.yml create mode 100644 .github/workflows/misc.yml create mode 100644 .github/workflows/msrv.yml delete mode 100644 .github/workflows/regression_trusted.yml create mode 100644 .github/workflows/unit_mac.yml create mode 100644 .github/workflows/unit_windows.yml diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml new file mode 100644 index 0000000000000..449f4210b624a --- /dev/null +++ b/.github/workflows/changes.yml @@ -0,0 +1,333 @@ +# This workflow identifies changes between the base and the head ref, for use in +# other workflows to decide if they should be executed. + +name: Identify Changes + +on: + workflow_call: + # These inputs allow the filter action to be able to access the correct refs for + # comparison in changes detection, it is required as this is called from the + # merge_group context. + inputs: + base_ref: + required: true + type: string + head_ref: + required: true + type: string + outputs: + source: + value: ${{ jobs.changes.outputs.source }} + dependencies: + value: ${{ jobs.changes.outputs.dependencies }} + internal_events: + value: ${{ jobs.changes.outputs.internal_events }} + cue: + value: ${{ jobs.changes.outputs.cue }} + component_docs: + value: ${{ jobs.changes.outputs.component_docs }} + markdown: + value: ${{ jobs.changes.outputs.markdown }} + install: + value: ${{ jobs.changes.outputs.install }} + k8s: + value: ${{ jobs.changes.outputs.k8s }} + all-int: + value: ${{ jobs.changes.outputs.all-int }} + amqp: + value: ${{ jobs.changes.outputs.amqp }} + appsignal: + value: ${{ jobs.changes.outputs.appsignal }} + aws: + value: ${{ jobs.changes.outputs.aws }} + axiom: + value: ${{ jobs.changes.outputs.axiom }} + azure: + value: ${{ jobs.changes.outputs.azure }} + clickhouse: + value: ${{ jobs.changes.outputs.clickhouse }} + databend: + value: ${{ jobs.changes.outputs.databend }} + datadog: + value: ${{ jobs.changes.outputs.datadog }} + dnstap: + value: ${{ jobs.changes.outputs.dnstap }} + docker-logs: + value: ${{ jobs.changes.outputs.docker-logs }} + elasticsearch: + value: ${{ jobs.changes.outputs.elasticsearch }} + eventstoredb: + value: ${{ jobs.changes.outputs.eventstoredb }} + fluent: + value: ${{ jobs.changes.outputs.fluent }} + gcp: + value: ${{ jobs.changes.outputs.gcp }} + humio: + value: ${{ jobs.changes.outputs.humio }} + http-client: + value: ${{ jobs.changes.outputs.http-client }} + influxdb: + value: ${{ jobs.changes.outputs.influxdb }} + kafka: + value: ${{ jobs.changes.outputs.kafka }} + logstash: + value: ${{ jobs.changes.outputs.logstash }} + loki: + value: ${{ jobs.changes.outputs.loki }} + mongodb: + value: ${{ jobs.changes.outputs.mongodb }} + nats: + value: ${{ jobs.changes.outputs.nats }} + nginx: + value: ${{ jobs.changes.outputs.nginx }} + opentelemetry: + value: ${{ jobs.changes.outputs.opentelemetry }} + postgres: + value: ${{ jobs.changes.outputs.postgres }} + prometheus: + value: ${{ jobs.changes.outputs.prometheus }} + pulsar: + value: ${{ jobs.changes.outputs.pulsar }} + redis: + value: ${{ jobs.changes.outputs.redis }} + splunk: + value: ${{ jobs.changes.outputs.splunk }} + webhdfs: + value: ${{ jobs.changes.outputs.webhdfs }} + +jobs: + changes: + runs-on: ubuntu-20.04 + # Set job outputs to values from filter step + outputs: + # General source code + source: ${{ steps.filter.outputs.source }} + dependencies: ${{ steps.filter.outputs.dependencies }} + internal_events: ${{ steps.filter.outputs.internal_events }} + cue: ${{ steps.filter.outputs.cue }} + component_docs: ${{ steps.filter.outputs.component_docs }} + markdown: ${{ steps.filter.outputs.markdown }} + install: ${{ steps.filter.outputs.install }} + # K8s + k8s: ${{ steps.filter.outputs.k8s }} + # Integrations + all-int: ${{ steps.filter.outputs.all-int }} + amqp: ${{ steps.filter.outputs.amqp }} + appsignal: ${{ steps.filter.outputs.appsignal}} + aws: ${{ steps.filter.outputs.aws }} + axiom: ${{ steps.filter.outputs.axiom }} + azure: ${{ steps.filter.outputs.azure }} + clickhouse: ${{ steps.filter.outputs.clickhouse }} + databend: ${{ steps.filter.outputs.databend }} + datadog: ${{ steps.filter.outputs.datadog }} + dnstap: ${{ steps.filter.outputs.dnstap }} + docker-logs: ${{ steps.filter.outputs.docker-logs }} + elasticsearch: ${{ steps.filter.outputs.elasticsearch }} + eventstoredb: ${{ steps.filter.outputs.eventstoredb }} + fluent: ${{ steps.filter.outputs.fluent }} + gcp: ${{ steps.filter.outputs.gcp }} + humio: ${{ steps.filter.outputs.humio }} + http-client: ${{ steps.filter.outputs.http-client }} + influxdb: ${{ steps.filter.outputs.influxdb }} + kafka: ${{ steps.filter.outputs.kafka }} + logstash: ${{ steps.filter.outputs.logstash }} + loki: ${{ steps.filter.outputs.loki }} + mongodb: ${{ steps.filter.outputs.mongodb }} + nats: ${{ steps.filter.outputs.nats }} + nginx: ${{ steps.filter.outputs.nginx }} + opentelemetry: ${{ steps.filter.outputs.opentelemetry }} + postgres: ${{ steps.filter.outputs.postgres }} + prometheus: ${{ steps.filter.outputs.prometheus }} + pulsar: ${{ steps.filter.outputs.pulsar }} + redis: ${{ steps.filter.outputs.redis }} + splunk: ${{ steps.filter.outputs.splunk }} + webhdfs: ${{ steps.filter.outputs.webhdfs }} + steps: + - uses: actions/checkout@v3 + + - uses: dorny/paths-filter@v2 + id: filter + with: + base: ${{ inputs.base_ref }} + ref: ${{ inputs.head_ref }} + filters: | + source: + - ".github/workflows/test.yml" + - ".cargo/**" + - "benches/**" + - "lib/**" + - "proto/**" + - "scripts/**" + - "src/**" + - "tests/**" + - "build.rs" + - "Cargo.lock" + - "Cargo.toml" + - "Makefile" + - "rust-toolchain.toml" + - "vdev/**" + deny: + - 'deny.toml' + - "vdev/**" + dependencies: + - ".cargo/**" + - 'Cargo.toml' + - 'Cargo.lock' + - 'rust-toolchain.toml' + - '.github/workflows/pr.yml' + - 'Makefile' + - 'scripts/cross/**' + - "vdev/**" + cue: + - 'website/cue/**' + - "vdev" + component_docs: + - 'scripts/generate-component-docs.rb' + - "vdev/**" + markdown: + - '**/**.md' + - "vdev/**" + internal_events: + - 'src/internal_events/**' + - "vdev/**" + docker: + - 'distribution/docker/**' + - "vdev/**" + install: + - ".github/workflows/install-sh.yml" + - "distribution/install.sh" + k8s: + - "src/sources/kubernetes_logs/**" + all-int: + - "lib/vector-core/**" + amqp: + - "src/amqp.rs" + - "src/internal_events/amqp.rs" + - "src/sinks/amqp/**" + - "src/sources/amqp.rs" + - "src/sources/util/**" + - "src/sinks/util/**" + appsignal: + - "src/sinks/appsignal/**" + - "src/sinks/util/**" + aws: + - "src/aws_**" + - "src/internal_events/aws_**" + - "src/sources/aws_**" + - "src/sources/util/**" + - "src/sinks/aws_**" + - "src/sinks/util/**" + - "src/transforms/aws_**" + axiom: + - "src/sinks/axiom.rs" + - "src/sinks/util/**" + azure: + - "src/sinks/azure_**" + - "src/sinks/util/**" + clickhouse: + - "src/sinks/clickhouse/**" + - "src/sinks/util/**" + databend: + - "src/sinks/databend/**" + - "src/sinks/util/**" + datadog: + - "src/common/datadog.rs" + - "src/internal_events/datadog_*" + - "src/sources/datadog_agent/**" + - "src/sinks/datadog/**" + - "src/sinks/datadog_archives.rs" + - "src/sinks/util/**" + docker-logs: + - "src/docker.rs" + - "src/internal_events/docker_logs.rs" + - "src/sources/docker_logs/**" + - "src/sources/util/**" + elasticsearch: + - "src/sinks/elasticsearch/**" + - "src/sinks/util/**" + eventstoredb: + - "src/internal_events/eventstoredb_metrics.rs" + - "src/sources/eventstoredb_metrics/**" + - "src/sources/util/**" + fluent: + - "src/internal_events/fluent.rs" + - "src/sources/fluent/**" + - "src/sources/util/**" + gcp: + - "src/internal_events/gcp_pubsub.rs" + - "src/sources/gcp_pubsub.rs" + - "src/sources/util/**" + - "src/sinks/gcp/**" + - "src/sinks/util/**" + - "src/gcp.rs" + humio: + - "src/sinks/humio/**" + - "src/sinks/util/**" + http-client: + - "src/sinks/http-client/**" + influxdb: + - "src/internal_events/influxdb.rs" + - "src/sinks/influxdb/**" + - "src/sinks/util/**" + kafka: + - "src/internal_events/kafka.rs" + - "src/sinks/kafka/**" + - "src/sinks/util/**" + - "src/sources/kafka.rs" + - "src/sources/util/**" + - "src/kafka.rs" + logstash: + - "src/sources/logstash.rs" + - "src/sources/util/**" + loki: + - "src/internal_events/loki.rs" + - "src/sinks/loki/**" + - "src/sinks/util/**" + mongodb: + - "src/internal_events/mongodb_metrics.rs" + - "src/sources/mongodb_metrics/**" + - "src/sources/util/**" + nats: + - "src/internal_events/nats.rs" + - "src/sources/nats.rs" + - "src/sources/util/**" + - "src/sinks/nats.rs" + - "src/sinks/util/**" + - "src/nats.rs" + nginx: + - "src/internal_events/nginx_metrics.rs" + - "src/sources/nginx_metrics/**" + - "src/sources/util/**" + opentelemetry: + - "src/sources/opentelemetry/**" + - "src/sources/util/**" + postgres: + - "src/internal_events/postgresql_metrics.rs" + - "src/sources/postgresql_metrics.rs" + - "src/sources/util/**" + prometheus: + - "src/internal_events/prometheus.rs" + - "src/sources/prometheus/**" + - "src/sources/util/**" + - "src/sinks/prometheus/**" + - "src/sinks/util/**" + pulsar: + - "src/internal_events/pulsar.rs" + - "src/sinks/pulsar/**" + - "src/sinks/util/**" + redis: + - "src/internal_events/redis.rs" + - "src/sources/redis/**" + - "src/sources/util/**" + - "src/sinks/redis.rs" + - "src/sinks/util/**" + splunk: + - "src/internal_events/splunk_hec.rs" + - "src/sources/splunk_hec/**" + - "src/sources/util/**" + - "src/sinks/splunk_hec/**" + - "src/sinks/util/**" + webhdfs: + - "src/sinks/webhdfs/**" + - "src/sinks/util/**" + diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml new file mode 100644 index 0000000000000..8ec1cf0ba8cc5 --- /dev/null +++ b/.github/workflows/cli.yml @@ -0,0 +1,63 @@ +name: CLI - Linux + +on: + workflow_call: + +jobs: + test-cli: + runs-on: [linux, ubuntu-20.04-8core] + env: + CARGO_INCREMENTAL: 0 + steps: + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + if: ${{ github.event_name == 'issue_comment' }} + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: CLI - Linux + status: pending + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} + uses: actions/checkout@v3 + + - name: Cache Cargo registry + index + uses: actions/cache@v3 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh + - run: bash scripts/environment/prepare.sh + - run: echo "::add-matcher::.github/matchers/rust.json" + - run: make test-cli + - name: Upload test results + run: scripts/upload-test-results.sh + if: always() + + - name: (PR comment) Set latest commit status as ${{ job.status }} + uses: myrotvorets/set-commit-status-action@1.1.6 + if: always() && github.event_name == 'issue_comment' + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: CLI - Linux + status: ${{ job.status }} diff --git a/.github/workflows/comment-trigger.yml b/.github/workflows/comment-trigger.yml new file mode 100644 index 0000000000000..e9c2c234233bb --- /dev/null +++ b/.github/workflows/comment-trigger.yml @@ -0,0 +1,118 @@ +# Comment Trigger +# +# This workflow is a central point for triggering workflow runs that normally run only as part of the merge queue, +# on demand by a comment. The exception being the integration tests, which have their own workflow file for +# comment triggers as the logic is a bit more complex. +# +# The available triggers are: +# +# /ci-run-all : runs all of the below +# /ci-run-cli : runs CLI - Linux +# /ci-run-misc : runs Miscellaneous - Linux +# /ci-run-component-features : runs Component Features - Linux +# /ci-run-cross : runs Cross +# /ci-run-unit-mac : runs Unit - Mac +# /ci-run-unit-windows : runs Unit - Windows +# /ci-run-environment : runs Environment Suite +# /ci-run-install : runs Update install.sh Suite +# /ci-run-regression : runs Regression Detection Suite + +name: Comment Trigger + +on: + issue_comment: + types: [created] + +env: + DD_ENV: "ci" + RUST_BACKTRACE: full + TEST_LOG: vector=debug + VERBOSE: true + CI: true + PROFILE: debug + # observing issues fetching boringssl via HTTPS in the OSX build, seeing if this helps + # can be removed when we switch back to the upstream openssl-sys crate + CARGO_NET_GIT_FETCH_WITH_CLI: true + +concurrency: + group: ${{ github.workflow }}-${{ github.event.issue_comment.issue.id }}-${{ github.event.comment.body }} + cancel-in-progress: true + +jobs: + validate: + name: Validate comment + runs-on: ubuntu-latest + if: | + github.event.issue.pull_request && ( contains(github.event.comment.body, '/ci-run-all') + || contains(github.event.comment.body, '/ci-run-cli') + || contains(github.event.comment.body, '/ci-run-misc') + || contains(github.event.comment.body, '/ci-run-component-features') + || contains(github.event.comment.body, '/ci-run-cross') + || contains(github.event.comment.body, '/ci-run-unit-mac') + || contains(github.event.comment.body, '/ci-run-unit-windows') + || contains(github.event.comment.body, '/ci-run-environment') + || contains(github.event.comment.body, '/ci-run-install') + || contains(github.event.comment.body, '/ci-run-regression') + ) + steps: + - name: Validate issue comment + id: comment + uses: tspascoal/get-user-teams-membership@v2 + with: + username: ${{ github.actor }} + team: 'Vector' + GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + + cli: + needs: validate + if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-cli') + uses: ./.github/workflows/cli.yml + secrets: inherit + + misc: + needs: validate + if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-misc') + uses: ./.github/workflows/misc.yml + secrets: inherit + + component-features: + needs: validate + if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-component-features') + uses: ./.github/workflows/component_features.yml + secrets: inherit + + cross: + needs: validate + if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-cross') + uses: ./.github/workflows/cross.yml + secrets: inherit + + unit-mac: + needs: validate + if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-unit-mac') + uses: ./.github/workflows/unit_mac.yml + secrets: inherit + + unit-windows: + needs: validate + if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-unit-windows') + uses: ./.github/workflows/unit_windows.yml + secrets: inherit + + environment: + needs: validate + if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-environment') + uses: ./.github/workflows/environment.yml + secrets: inherit + + install: + needs: validate + if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-install') + uses: ./.github/workflows/install-sh.yml + secrets: inherit + + regression: + needs: validate + if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-regression') + uses: ./.github/workflows/regression.yml + secrets: inherit diff --git a/.github/workflows/baseline.yml b/.github/workflows/compilation-timings.yml similarity index 97% rename from .github/workflows/baseline.yml rename to .github/workflows/compilation-timings.yml index 2bc12be8c05c3..e96bea65ea946 100644 --- a/.github/workflows/baseline.yml +++ b/.github/workflows/compilation-timings.yml @@ -1,8 +1,8 @@ -# Executes various builds of vector to time the results in order to track build times. +# Executes various builds of vector to time the results in order to track compilation times. # # This workflow is unrelated to the Regression workflow. -name: Baseline Timings +name: Compilation Timings on: workflow_dispatch: diff --git a/.github/workflows/component_features.yml b/.github/workflows/component_features.yml new file mode 100644 index 0000000000000..747483c1316cd --- /dev/null +++ b/.github/workflows/component_features.yml @@ -0,0 +1,46 @@ +name: Component Features - Linux + +on: + workflow_call: + +jobs: + check-component-features: + runs-on: [linux, ubuntu-20.04-8core] + steps: + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + uses: myrotvorets/set-commit-status-action@1.1.6 + if: ${{ github.event_name == 'issue_comment' }} + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Component Features - Linux + status: pending + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} + uses: actions/checkout@v3 + + - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh + - run: bash scripts/environment/prepare.sh + - run: echo "::add-matcher::.github/matchers/rust.json" + - run: make check-component-features + + - name: (PR comment) Set latest commit status as ${{ job.status }} + uses: myrotvorets/set-commit-status-action@1.1.6 + if: always() && github.event_name == 'issue_comment' + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Component Features - Linux + status: ${{ job.status }} diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml new file mode 100644 index 0000000000000..e5fcdceb84ffb --- /dev/null +++ b/.github/workflows/cross.yml @@ -0,0 +1,102 @@ +name: Cross + +on: + workflow_call: + +jobs: + cross-linux: + name: Cross - ${{ matrix.target }} + runs-on: [linux, ubuntu-20.04-8core] + env: + CARGO_INCREMENTAL: 0 + strategy: + matrix: + target: + - x86_64-unknown-linux-gnu + - x86_64-unknown-linux-musl + - aarch64-unknown-linux-gnu + - aarch64-unknown-linux-musl + - armv7-unknown-linux-gnueabihf + - armv7-unknown-linux-musleabihf + steps: + + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + if: ${{ github.event_name == 'issue_comment' }} + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Cross + status: pending + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} + uses: actions/checkout@v3 + + - uses: actions/cache@v3 + name: Cache Cargo registry + index + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - run: echo "::add-matcher::.github/matchers/rust.json" + - run: 'cargo install cross --version 0.2.4 --force --locked' + # Why is this build, not check? Because we need to make sure the linking phase works. + # aarch64 and musl in particular are notoriously hard to link. + # While it may be tempting to slot a `check` in here for quickness, please don't. + - run: make cross-build-${{ matrix.target }} + - uses: actions/upload-artifact@v3 + with: + name: "vector-debug-${{ matrix.target }}" + path: "./target/${{ matrix.target }}/debug/vector" + + - name: (PR comment) Set latest commit status as failed + uses: myrotvorets/set-commit-status-action@1.1.6 + if: failure() && github.event_name == 'issue_comment' + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Cross + status: 'failure' + + update-pr-status: + name: (PR comment) Signal result to PR + runs-on: ubuntu-20.04 + needs: cross-linux + if: needs.cross-linux.result == 'success' && github.event_name == 'issue_comment' + steps: + - name: Validate issue comment + uses: tspascoal/get-user-teams-membership@v2 + with: + username: ${{ github.actor }} + team: 'Vector' + GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + + - name: (PR comment) Get PR branch + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Submit PR result as success + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Cross + status: 'success' diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 2e08442f4eeef..e8c64e4fca5ab 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -1,11 +1,11 @@ name: Environment Suite on: - pull_request: {} + workflow_call: + workflow_dispatch: push: branches: - master - workflow_dispatch: env: VERBOSE: true @@ -15,8 +15,30 @@ jobs: publish-new-environment: runs-on: ubuntu-20.04 steps: - - name: Checkout + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + if: ${{ github.event_name == 'issue_comment' }} + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Environment Suite + status: pending + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} + uses: actions/checkout@v3 + - name: Set up QEMU uses: docker/setup-qemu-action@v2.1.0 - name: Set up Docker Buildx @@ -48,3 +70,12 @@ jobs: push: ${{ github.ref == 'refs/heads/master' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} + + - name: (PR comment) Set latest commit status as ${{ job.status }} + uses: myrotvorets/set-commit-status-action@1.1.6 + if: always() && github.event_name == 'issue_comment' + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Environment Suite + status: ${{ job.status }} diff --git a/.github/workflows/install-sh.yml b/.github/workflows/install-sh.yml index f9619dba9e542..c19ea3e9310a8 100644 --- a/.github/workflows/install-sh.yml +++ b/.github/workflows/install-sh.yml @@ -1,46 +1,71 @@ name: Update install.sh Suite on: - push: - branches: - - master - paths: - - '.github/workflows/install-sh.yml' - - 'distribution/install.sh' + workflow_call: workflow_dispatch: - jobs: + sync-install: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v3 + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + if: ${{ github.event_name == 'issue_comment' }} + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Update install.sh Suite + status: pending + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} + uses: actions/checkout@v3 + - run: pip3 install awscli --upgrade --user - env: AWS_ACCESS_KEY_ID: "${{ secrets.CI_AWS_ACCESS_KEY_ID }}" AWS_SECRET_ACCESS_KEY: "${{ secrets.CI_AWS_SECRET_ACCESS_KEY }}" run: make sync-install + - name: (PR comment) Set latest commit status as failed + uses: myrotvorets/set-commit-status-action@1.1.6 + if: failure() && github.event_name == 'issue_comment' + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Update install.sh Suite + status: 'failure' + test-install: - needs: - - sync-install + needs: sync-install runs-on: ubuntu-20.04 steps: - run: sudo apt-get install --yes curl bc - run: curl --proto '=https' --tlsv1.2 -sSf https://sh.vector.dev | bash -s -- -y - run: ~/.vector/bin/vector --version - install-shell-failure: - name: install-shell-failure - if: failure() - needs: - - sync-install - - test-install - runs-on: ubuntu-20.04 - steps: - - name: Discord notification - env: - DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} - uses: Ilshidur/action-discord@0.3.2 - with: - args: "Update of sh.vector.dev failed: " + - name: (PR comment) Get PR branch + if: github.event_name == 'issue_comment' + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as ${{ job.status }} + if: github.event_name == 'issue_comment' + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Update install.sh Suite + status: ${{ job.status }} diff --git a/.github/workflows/integration-comment.yml b/.github/workflows/integration-comment.yml new file mode 100644 index 0000000000000..6481699d569f8 --- /dev/null +++ b/.github/workflows/integration-comment.yml @@ -0,0 +1,175 @@ +# Integration Test Comment +# +# This workflow runs one or more integration tests triggered by a comment in a PR. +# The comment include '/ci-run-integration'. +# Then, any if the integration names will trigger that specific integration. +# 'all' will trigger every integration to run. +# The order does not matter and can be anywhere inside the comment body. +# +# Examples: +# +# 1. Run a single integration test: +# +# /ci-run-integration amqp +# +# 2. Run three specific integration tests: +# +# /ci-run-integration dnstap redis amqp +# +# 3. Run all integration tests: +# +# /ci-run-integration all + +name: Integration Test Comment + +on: + issue_comment: + types: [created] + +env: + AWS_ACCESS_KEY_ID: "dummy" + AWS_SECRET_ACCESS_KEY: "dummy" + AXIOM_TOKEN: ${{ secrets.AXIOM_TOKEN }} + TEST_APPSIGNAL_PUSH_API_KEY: ${{ secrets.TEST_APPSIGNAL_PUSH_API_KEY }} + CONTAINER_TOOL: "docker" + DD_ENV: "ci" + DD_API_KEY: ${{ secrets.DD_API_KEY }} + RUST_BACKTRACE: full + TEST_LOG: vector=debug + VERBOSE: true + CI: true + PROFILE: debug + +concurrency: + group: ${{ github.workflow }}-${{ github.event.issue.id }} + cancel-in-progress: true + +jobs: + prep-pr: + name: (PR comment) Signal pending to PR + runs-on: ubuntu-latest + if: contains(github.event.comment.body, '/ci-run-integration') || contains(github.event.comment.body, '/ci-run-all') + steps: + - name: Validate issue comment + if: github.event_name == 'issue_comment' + uses: tspascoal/get-user-teams-membership@v2 + with: + username: ${{ github.actor }} + team: 'Vector' + GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + + - name: (PR comment) Get PR branch + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + status: pending + + test-integration: + uses: ./.github/workflows/integration-test.yml + with: + if: ${{ matrix.run.if }} + test_name: ${{ matrix.run.test_name }} + needs: prep-pr + secrets: inherit + strategy: + fail-fast: false + matrix: + run: + - test_name: 'amqp' + if: ${{ contains(github.event.comment.body, 'amqp') || contains(github.event.comment.body, 'all') }} + - test_name: 'appsignal' + if: ${{ contains(github.event.comment.body, 'appsignal') || contains(github.event.comment.body, 'all') }} + - test_name: 'aws' + if: ${{ contains(github.event.comment.body, 'aws') || contains(github.event.comment.body, 'all') }} + - test_name: 'axiom' + if: ${{ contains(github.event.comment.body, 'axiom') || contains(github.event.comment.body, 'all') }} + - test_name: 'azure' + if: ${{ contains(github.event.comment.body, 'azure') || contains(github.event.comment.body, 'all') }} + - test_name: 'clickhouse' + if: ${{ contains(github.event.comment.body, 'clickhouse') || contains(github.event.comment.body, 'all') }} + - test_name: 'databend' + if: ${{ contains(github.event.comment.body, 'databend') || contains(github.event.comment.body, 'all') }} + - test_name: 'datadog-agent' + if: ${{ contains(github.event.comment.body, 'datadog') || contains(github.event.comment.body, 'all') }} + - test_name: 'datadog-logs' + if: ${{ contains(github.event.comment.body, 'datadog') || contains(github.event.comment.body, 'all') }} + - test_name: 'datadog-metrics' + if: ${{ contains(github.event.comment.body, 'datadog') || contains(github.event.comment.body, 'all') }} + - test_name: 'datadog-traces' + if: ${{ contains(github.event.comment.body, 'datadog') || contains(github.event.comment.body, 'all') }} + - test_name: 'dnstap' + if: ${{ contains(github.event.comment.body, 'dnstap') || contains(github.event.comment.body, 'all') }} + - test_name: 'docker-logs' + if: ${{ contains(github.event.comment.body, 'docker-logs') || contains(github.event.comment.body, 'all') }} + - test_name: 'elasticsearch' + if: ${{ contains(github.event.comment.body, 'elasticsearch') || contains(github.event.comment.body, 'all') }} + - test_name: 'eventstoredb' + if: ${{ contains(github.event.comment.body, 'eventstoredb') || contains(github.event.comment.body, 'all') }} + - test_name: 'fluent' + if: ${{ contains(github.event.comment.body, 'fluent') || contains(github.event.comment.body, 'all') }} + - test_name: 'gcp' + if: ${{ contains(github.event.comment.body, 'gcp') || contains(github.event.comment.body, 'all') }} + - test_name: 'humio' + if: ${{ contains(github.event.comment.body, 'humio') || contains(github.event.comment.body, 'all') }} + - test_name: 'http-client' + if: ${{ contains(github.event.comment.body, 'http-client') || contains(github.event.comment.body, 'all') }} + - test_name: 'influxdb' + if: ${{ contains(github.event.comment.body, 'influxdb') || contains(github.event.comment.body, 'all') }} + - test_name: 'kafka' + if: ${{ contains(github.event.comment.body, 'kafka') || contains(github.event.comment.body, 'all') }} + - test_name: 'logstash' + if: ${{ contains(github.event.comment.body, 'logstash') || contains(github.event.comment.body, 'all') }} + - test_name: 'loki' + if: ${{ contains(github.event.comment.body, 'loki') || contains(github.event.comment.body, 'all') }} + - test_name: 'mongodb' + if: ${{ contains(github.event.comment.body, 'mongodb') || contains(github.event.comment.body, 'all') }} + - test_name: 'nats' + if: ${{ contains(github.event.comment.body, 'nats') || contains(github.event.comment.body, 'all') }} + - test_name: 'nginx' + if: ${{ contains(github.event.comment.body, 'nginx') || contains(github.event.comment.body, 'all') }} + - test_name: 'opentelemetry' + if: ${{ contains(github.event.comment.body, 'opentelemetry') || contains(github.event.comment.body, 'all') }} + - test_name: 'postgres' + if: ${{ contains(github.event.comment.body, 'postgres') || contains(github.event.comment.body, 'all') }} + - test_name: 'prometheus' + if: ${{ contains(github.event.comment.body, 'prometheus') || contains(github.event.comment.body, 'all') }} + - test_name: 'pulsar' + if: ${{ contains(github.event.comment.body, 'pulsar') || contains(github.event.comment.body, 'all') }} + - test_name: 'redis' + if: ${{ contains(github.event.comment.body, 'redis') || contains(github.event.comment.body, 'all') }} + - test_name: 'shutdown' + if: ${{ contains(github.event.comment.body, 'shutdown') || contains(github.event.comment.body, 'all') }} + - test_name: 'splunk' + if: ${{ contains(github.event.comment.body, 'splunk') || contains(github.event.comment.body, 'all') }} + - test_name: 'webhdfs' + if: ${{ contains(github.event.comment.body, 'webhdfs') || contains(github.event.comment.body, 'all') }} + + update-pr-status: + name: Signal result to PR + runs-on: ubuntu-latest + needs: test-integration + if: always() && (contains(github.event.comment.body, '/ci-run-integration') || contains(github.event.comment.body, '/ci-run-all')) + steps: + - name: Validate issue comment + if: github.event_name == 'issue_comment' + uses: tspascoal/get-user-teams-membership@v2 + with: + username: ${{ github.actor }} + team: 'Vector' + GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + + - name: (PR comment) Get PR branch + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Submit PR result as ${{ needs.test-integration.result }} + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + status: ${{ needs.test-integration.result }} diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 4fe09304eba32..f8e26564855e1 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -1,34 +1,28 @@ -name: Integration Test Suite +# This workflow is used to run an integration test. +# The most common use case is that it is triggered by another workflow, +# such as the Master Merge Queue Suite, or the Integration Comment. +# +# It can also be triggered on manual dispatch in CI however. +# In that use case, an input for the test name needs to be provided. +# TODO: check if the input is "all" , and run all, without a timeout? + +name: Integration Test on: + workflow_call: + inputs: + if: + required: false + type: boolean + test_name: + required: true + type: string workflow_dispatch: - push: - branches: - - master - paths: - - ".github/workflows/integration-test.yml" - - ".cargo/**" - - "benches/**" - - "lib/**" - - "proto/**" - - "scripts/**" - - "src/**" - - "tests/**" - - "build.rs" - - "Cargo.lock" - - "Cargo.toml" - - "Makefile" - - "rust-toolchain" - pull_request: - types: [opened, synchronize, reopened, labeled] - -concurrency: - # For pull requests, cancel running workflows, for master, run all - # - # `github.event.number` exists for pull requests, otherwise fall back to SHA - # for master - group: ${{ github.workflow }}-${{ github.event.number || github.sha }} - cancel-in-progress: true + inputs: + test_name: + description: "Which integration to test." + required: true + type: string env: AWS_ACCESS_KEY_ID: "dummy" @@ -46,85 +40,36 @@ env: jobs: test-integration: - name: Integration - Linux, ${{ matrix.test }} runs-on: [linux, ubuntu-20.04-8core] - if: | - !github.event.pull_request - || contains(github.event.pull_request.labels.*.name, 'ci-condition: integration tests enable') - strategy: - fail-fast: false - matrix: - include: - - test: 'amqp' - - test: 'appsignal' - - test: 'aws' - - test: 'axiom' - - test: 'azure' - - test: 'clickhouse' - - test: 'databend' - - test: 'datadog-agent' - - test: 'datadog-logs' - - test: 'datadog-metrics' - - test: 'datadog-traces' - - test: 'dnstap' - - test: 'docker-logs' - - test: 'elasticsearch' - - test: 'eventstoredb' - - test: 'fluent' - - test: 'gcp' - - test: 'humio' - - test: 'http-client' - - test: 'influxdb' - - test: 'kafka' - - test: 'logstash' - - test: 'loki' - - test: 'mongodb' - - test: 'nats' - - test: 'nginx' - - test: 'opentelemetry' - - test: 'postgres' - - test: 'prometheus' - - test: 'pulsar' - - test: 'redis' - - test: 'shutdown' - - test: 'splunk' - - test: 'webhdfs' timeout-minutes: 30 + if: inputs.if || github.event_name == 'workflow_dispatch' steps: - - uses: actions/checkout@v3 + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} + uses: actions/checkout@v3 + - run: sudo npm -g install @datadog/datadog-ci - - run: make test-integration-${{ matrix.test }} + + - run: make test-integration-${{ inputs.test_name }} env: TEST_DATADOG_API_KEY: ${{ secrets.CI_TEST_DATADOG_API_KEY }} - SPLUNK_VERSION: ${{ matrix.env.SPLUNK_VERSION }} + - name: Upload test results run: scripts/upload-test-results.sh if: always() - - run: make test-integration-${{ matrix.test }}-cleanup + + - run: make test-integration-${{ inputs.test_name }}-cleanup if: ${{ always() }} env: TEST_DATADOG_API_KEY: ${{ secrets.CI_TEST_DATADOG_API_KEY }} - SPLUNK_VERSION: ${{ matrix.env.SPLUNK_VERSION }} - - test-integration-check: - name: test-integration-check - runs-on: ubuntu-20.04 - needs: - - test-integration - steps: - - name: validate - run: echo "OK" - - master-failure: - name: master-failure - if: failure() && github.ref == 'refs/heads/master' - needs: - - test-integration-check - runs-on: ubuntu-20.04 - steps: - - name: Discord notification - env: - DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} - uses: Ilshidur/action-discord@0.3.2 - with: - args: "Master integration tests failed: " diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml new file mode 100644 index 0000000000000..1dc2ef8140160 --- /dev/null +++ b/.github/workflows/integration.yml @@ -0,0 +1,140 @@ +# Integration Test Suite +# +# This workflow runs the integration tests. If the workflow is triggered in the merge queue, all integration tests +# are run. If the workflow is triggered in a PR commit, then the files changed in the PR are evaluated to determine +# if any integration tests will run. + +name: Integration Test Suite + +on: + pull_request: + merge_group: + types: [checks_requested] + +concurrency: + # `github.event.number` exists for pull requests, otherwise fall back to SHA for merge queue + group: ${{ github.workflow }}-${{ github.event.number || github.event.merge_group.base_sha }} + cancel-in-progress: true + +env: + AWS_ACCESS_KEY_ID: "dummy" + AWS_SECRET_ACCESS_KEY: "dummy" + CONTAINER_TOOL: "docker" + DD_ENV: "ci" + DD_API_KEY: ${{ secrets.DD_API_KEY }} + RUST_BACKTRACE: full + TEST_LOG: vector=debug + VERBOSE: true + CI: true + PROFILE: debug + # observing issues fetching boringssl via HTTPS in the OSX build, seeing if this helps + # can be removed when we switch back to the upstream openssl-sys crate + CARGO_NET_GIT_FETCH_WITH_CLI: true + +jobs: + + changes: + if: github.event_name == 'pull_request' + uses: ./.github/workflows/changes.yml + with: + base_ref: ${{ github.event.pull_request.base.ref }} + head_ref: ${{ github.event.pull_request.head.ref }} + secrets: inherit + + # Calls the Integration Test workflow for each integration that was detected to have files changed that impact it. + integration-matrix: + if: always() + uses: ./.github/workflows/integration-test.yml + with: + if: ${{ matrix.run.if }} + test_name: ${{ matrix.run.test_name }} + secrets: inherit + needs: changes + strategy: + fail-fast: false + matrix: + run: + - test_name: 'amqp' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.amqp == 'true' }} + - test_name: 'appsignal' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.appsignal == 'true' }} + - test_name: 'aws' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.aws == 'true' }} + - test_name: 'axiom' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.axiom == 'true' }} + - test_name: 'azure' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.azure == 'true' }} + - test_name: 'clickhouse' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.clickhouse == 'true' }} + - test_name: 'databend' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.databend == 'true' }} + - test_name: 'datadog-agent' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.datadog == 'true' }} + - test_name: 'datadog-logs' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.datadog == 'true' }} + - test_name: 'datadog-metrics' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.datadog == 'true' }} + - test_name: 'datadog-traces' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.datadog == 'true' }} + - test_name: 'dnstap' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.dnstap == 'true' }} + - test_name: 'docker-logs' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.docker-logs == 'true' }} + - test_name: 'elasticsearch' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.elasticsearch == 'true' }} + - test_name: 'eventstoredb' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.eventstoredb == 'true' }} + - test_name: 'fluent' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.fluent == 'true' }} + - test_name: 'gcp' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.gcp == 'true' }} + - test_name: 'humio' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.humio == 'true' }} + - test_name: 'http-client' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.http-client == 'true' }} + - test_name: 'influxdb' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.influxdb == 'true' }} + - test_name: 'kafka' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.kafka == 'true' }} + - test_name: 'logstash' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.logstash == 'true' }} + - test_name: 'loki' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.loki == 'true' }} + - test_name: 'mongodb' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.mongodb == 'true' }} + - test_name: 'nats' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.nats == 'true' }} + - test_name: 'nginx' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.nginx == 'true' }} + - test_name: 'opentelemetry' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.opentelemetry == 'true' }} + - test_name: 'postgres' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.postgres == 'true' }} + - test_name: 'prometheus' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.prometheus == 'true' }} + - test_name: 'pulsar' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.pulsar == 'true' }} + - test_name: 'redis' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.redis == 'true' }} + - test_name: 'shutdown' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' }} + - test_name: 'splunk' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.splunk == 'true' }} + - test_name: 'webhdfs' + if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.webhdfs == 'true' }} + + integration: + name: Integration Test Suite + runs-on: ubuntu-latest + needs: + - integration-matrix + env: + FAILED: ${{ contains(needs.*.result, 'failure') }} + steps: + - run: | + echo "failed=${{ env.FAILED }}" + if [[ "$FAILED" == "true" ]] ; then + exit 1 + else + exit 0 + fi diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index dd1a0a13ac490..b37e03ba12088 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -1,33 +1,33 @@ +# K8s E2E Suite +# +# This workflow runs under any of the following conditions: +# - manual dispatch in GH UI +# - on a PR commit if the kubernetes_logs source was changed +# - in the merge queue +# - on a schedule at midnight UTC Tue-Sat +# - on demand by either of the following comments in a PR: +# - '/ci-run-k8s' +# - '/ci-run-all' +# +# If the workflow trigger is the nightly schedule, all the k8s versions +# are run in the matrix, otherwise, only the latest is run. + name: K8S E2E Suite on: workflow_dispatch: - push: - branches: - - master - paths: - - ".github/workflows/k8s_e2e.yml" - - ".cargo/**" - - "benches/**" - - "lib/**" - - "proto/**" - - "scripts/**" - - "src/**" - - "tests/**" - - "build.rs" - - "Cargo.lock" - - "Cargo.toml" - - "Makefile" - - "rust-toolchain" - - "distribution/**" pull_request: + issue_comment: + types: [created] + merge_group: + types: [checks_requested] + schedule: + # At midnight UTC Tue-Sat + - cron: '0 0 * * 2-6' concurrency: - # For pull requests, cancel running workflows, for master, run all - # - # `github.event.number` exists for pull requests, otherwise fall back to SHA - # for master - group: ${{ github.workflow }}-${{ github.event.number || github.sha }} + group: ${{ github.workflow }}-${{ github.event.number || github.event.issue.id || github.event.merge_group.base_sha || github.event.schedule || github.sha }} + cancel-in-progress: true env: @@ -42,35 +42,82 @@ env: PROFILE: debug jobs: + changes: + if: github.event_name != 'issue_comment' || (github.event.issue.pull_request && + (contains(github.event.comment.body, '/ci-run-k8s') || contains(github.event.comment.body, '/ci-run-all'))) + uses: ./.github/workflows/changes.yml + with: + base_ref: ${{ github.event.pull_request.base.ref }} + head_ref: ${{ github.event.pull_request.head.ref }} + secrets: inherit + build-x86_64-unknown-linux-gnu: name: Build - x86_64-unknown-linux-gnu runs-on: [linux, ubuntu-20.04-8core] - if: | - !github.event.pull_request - || contains(github.event.pull_request.labels.*.name, 'ci-condition: k8s e2e tests enable') - || contains(github.event.pull_request.labels.*.name, 'ci-condition: k8s e2e all targets') + needs: changes + if: github.event_name != 'pull_request' || needs.changes.outputs.k8s == 'true' # cargo-deb requires a release build, but we don't need optimizations for tests env: CARGO_PROFILE_RELEASE_OPT_LEVEL: 0 CARGO_PROFILE_RELEASE_CODEGEN_UNITS: 256 CARGO_INCREMENTAL: 0 steps: - - uses: actions/checkout@v3 + - name: Validate issue comment + if: github.event_name == 'issue_comment' + uses: tspascoal/get-user-teams-membership@v2 + with: + username: ${{ github.actor }} + team: 'Vector' + GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + if: ${{ github.event_name == 'issue_comment' }} + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + status: pending + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} + uses: actions/checkout@v3 + - uses: actions/cache@v3 with: path: | ~/.cargo/registry ~/.cargo/git key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: echo "::add-matcher::.github/matchers/rust.json" - run: VECTOR_VERSION="$(cargo vdev version)" make package-deb-x86_64-unknown-linux-gnu + - uses: actions/upload-artifact@v3 with: name: e2e-test-deb-package path: target/artifacts/* + - name: (PR comment) Set latest commit status as 'failure' + uses: myrotvorets/set-commit-status-action@1.1.6 + if: failure() && github.event_name == 'issue_comment' + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + status: 'failure' + # GitHub Actions don't support `matrix` at the job-level `if:` condition. # We apply this workaround - compute `matrix` in a preceding job, and assign # it's value dynamically at the actual test job. @@ -80,13 +127,19 @@ jobs: compute-k8s-test-plan: name: Compute K8s test plan runs-on: [linux, ubuntu-20.04-8core] + needs: changes + if: github.event_name != 'pull_request' || needs.changes.outputs.k8s == 'true' outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} - if: | - !github.event.pull_request - || contains(github.event.pull_request.labels.*.name, 'ci-condition: k8s e2e tests enable') - || contains(github.event.pull_request.labels.*.name, 'ci-condition: k8s e2e all targets') steps: + - name: Validate issue comment + if: github.event_name == 'issue_comment' + uses: tspascoal/get-user-teams-membership@v2 + with: + username: ${{ github.actor }} + team: 'Vector' + GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + - uses: actions/github-script@v6.4.1 id: set-matrix with: @@ -101,11 +154,11 @@ jobs: // https://cloud.google.com/kubernetes-engine/docs/release-notes // https://docs.microsoft.com/en-us/azure/aks/supported-kubernetes-versions?tabs=azure-cli#aks-kubernetes-release-calendar const kubernetes_version = [ - { version: "v1.23.3", is_essential: true }, - { version: "v1.22.5", is_essential: true }, - { version: "v1.21.8", is_essential: true }, - { version: "v1.20.14", is_essential: true }, - { version: "v1.19.8" }, + { version: "v1.23.3", is_essential: true }, + { version: "v1.22.5", is_essential: false }, + { version: "v1.21.8", is_essential: false }, + { version: "v1.20.14", is_essential: false }, + { version: "v1.19.8", is_essential: false }, ] const container_runtime = [ "docker", @@ -113,15 +166,10 @@ jobs: // https://github.com/kubernetes/minikube/issues/12928 // "crio", ] - const ci_condition_label = 'ci-condition: k8s e2e all targets' - // Planing. - const is_in_pull_request = !!context.payload.pull_request; - const should_test_all_targets = ( - !is_in_pull_request || - context.payload.pull_request.labels.some(label => label.name === ci_condition_label) - ) - const filter_targets = array => array.filter(val => should_test_all_targets || val.is_essential) + // Run all versions if triggered by nightly schedule. Otherwise only run latest. + const run_all = context.eventName == "schedule"; + const filter_targets = array => array.filter(val => run_all || val.is_essential) const matrix = { minikube_version, @@ -147,36 +195,82 @@ jobs: matrix: ${{ fromJson(needs.compute-k8s-test-plan.outputs.matrix) }} fail-fast: false steps: - - name: Checkout + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 with: name: e2e-test-deb-package path: target/artifacts + - name: Setup Minikube run: scripts/ci-setup-minikube.sh env: KUBERNETES_VERSION: ${{ matrix.kubernetes_version.version }} MINIKUBE_VERSION: ${{ matrix.minikube_version }} CONTAINER_RUNTIME: ${{ matrix.container_runtime }} + - run: make test-e2e-kubernetes env: USE_MINIKUBE_CACHE: "true" SKIP_PACKAGE_DEB: "true" CARGO_INCREMENTAL: 0 - master-failure: - name: master-failure - if: failure() && github.ref == 'refs/heads/master' - needs: - - build-x86_64-unknown-linux-gnu - - compute-k8s-test-plan - - test-e2e-kubernetes - runs-on: ubuntu-20.04 + - name: (PR comment) Set latest commit status as failure + uses: myrotvorets/set-commit-status-action@1.1.6 + if: failure() && github.event_name == 'issue_comment' + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + status: 'failure' + + final-result: + name: K8s E2E Suite + runs-on: ubuntu-latest + needs: test-e2e-kubernetes + if: | + always() && (github.event_name != 'issue_comment' || (github.event.issue.pull_request + && (contains(github.event.comment.body, '/ci-run-k8s') || contains(github.event.comment.body, '/ci-run-all')))) + env: + FAILED: ${{ contains(needs.*.result, 'failure') }} steps: - - name: Discord notification - env: - DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} - uses: Ilshidur/action-discord@0.3.2 - with: - args: "Master k8s e2e tests failed: " + - name: Validate issue comment + if: github.event_name == 'issue_comment' + uses: tspascoal/get-user-teams-membership@v2 + with: + username: ${{ github.actor }} + team: 'Vector' + GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + + - name: (PR comment) Get PR branch + if: success() && github.event_name == 'issue_comment' + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Submit PR result as success + if: success() && github.event_name == 'issue_comment' + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + status: 'success' + + - run: | + echo "failed=${{ env.FAILED }}" + if [[ "$FAILED" == "true" ]] ; then + exit 1 + else + exit 0 + fi diff --git a/.github/workflows/master_merge_queue.yml b/.github/workflows/master_merge_queue.yml new file mode 100644 index 0000000000000..6e38bd7f29a24 --- /dev/null +++ b/.github/workflows/master_merge_queue.yml @@ -0,0 +1,153 @@ +# Master Merge Queue Test Suite +# +# This workflow orchestrates a collection of workflows that are required for the merge queue check. +# +# Most of the workflows that are jobs within this one, are able to be run on demand +# by issuing a PR comment with the respective command to trigger said workflow. +# +# The design of this workflow relies on the first real job "changes" to detect file +# changes against the base, and each downstream workflow after that will only be +# called if the files for that area have changed. +# + +name: Master Merge Queue Test Suite + +on: + # Only want to run this on merge queue, but because GH doesn't allow specifying different required checks + # for pull request and merge queue, we need to "run" it in pull request, but in the jobs we will just auto pass. + pull_request: + merge_group: + types: [checks_requested] + +concurrency: + # `github.event.number` exists for pull requests, otherwise fall back to SHA for merge queue + group: ${{ github.workflow }}-${{ github.event.number || github.event.merge_group.base_sha }} + cancel-in-progress: true + +env: + AWS_ACCESS_KEY_ID: "dummy" + AWS_SECRET_ACCESS_KEY: "dummy" + CONTAINER_TOOL: "docker" + DD_ENV: "ci" + DD_API_KEY: ${{ secrets.DD_API_KEY }} + RUST_BACKTRACE: full + TEST_LOG: vector=debug + VERBOSE: true + CI: true + PROFILE: debug + # observing issues fetching boringssl via HTTPS in the OSX build, seeing if this helps + # can be removed when we switch back to the upstream openssl-sys crate + CARGO_NET_GIT_FETCH_WITH_CLI: true + +jobs: + # This is the entry job which is required for all the actual tests in this workflow. + # If we don't run this job (such as in a pull request), then by consequence all downstream + # test jobs are not run. This allows us to not have to check for merge group in each job. + changes: + if: ${{ github.event_name == 'merge_group' }} + uses: ./.github/workflows/changes.yml + with: + base_ref: ${{ github.event.merge_group.base_ref }} + head_ref: ${{ github.event.merge_group.head_ref }} + secrets: inherit + + test-cli: + if: needs.changes.outputs.source == 'true' + uses: ./.github/workflows/cli.yml + needs: changes + secrets: inherit + + test-misc: + if: needs.changes.outputs.source == 'true' + uses: ./.github/workflows/misc.yml + needs: changes + secrets: inherit + + test-environment: + uses: ./.github/workflows/environment.yml + needs: changes + secrets: inherit + + check-msrv: + if: needs.changes.outputs.source == 'true' + uses: ./.github/workflows/msrv.yml + needs: changes + secrets: inherit + + check-component-features: + if: needs.changes.outputs.source == 'true' + uses: ./.github/workflows/component_features.yml + needs: changes + secrets: inherit + + cross-linux: + # We run cross checks when dependencies change to ensure they still build. + # This helps us avoid adopting dependencies that aren't compatible with other architectures. + if: needs.changes.outputs.dependencies == 'true' + uses: ./.github/workflows/cross.yml + needs: changes + secrets: inherit + + unit-mac: + if: needs.changes.outputs.source == 'true' + uses: ./.github/workflows/unit_mac.yml + needs: changes + secrets: inherit + + unit-windows: + if: needs.changes.outputs.source == 'true' + uses: ./.github/workflows/unit_windows.yml + needs: changes + secrets: inherit + + install-sh: + if: needs.changes.outputs.install == 'true' + uses: ./.github/workflows/install-sh.yml + needs: changes + secrets: inherit + + # TODO: in a followup PR, run the regression workflow here, as a single reusable workflow. + # + # NOTE: This design of passing in the pr-number to the Regression workflow requires that the merge queue + # be configured contain a maximum of one PR per execution. This is so that the regression report generated + # by the workflow can be posted as a comment to the PR. + # At a later time, we may want to revisit this in order to allow multiple PRs to be included in a merge + # queue execution. At such time, the logic of uploading of the report will need to change to account for + # multiple PRs. + # regression: + # if: needs.changes.outputs.source == 'true' + # uses: ./.github/workflows/regression.yml + # with: + # pr_number: ${{ needs.changes.outputs.pr-number }} + # base_sha: ${{ github.event.merge_group.base_sha }} + # head_sha: ${{ github.event.merge_group.head_sha }} + # needs: changes + # secrets: inherit + + master-merge-queue-check: + name: Master Merge Queue Suite + # Always run this so that pull_request triggers are marked as success. + if: always() + runs-on: ubuntu-20.04 + needs: + - changes + - test-cli + - test-misc + - test-environment + - check-msrv + - check-component-features + - cross-linux + - unit-mac + - unit-windows + - install-sh + env: + FAILED: ${{ contains(needs.*.result, 'failure') }} + steps: + - name: exit + run: | + echo "failed=${{ env.FAILED }}" + if [[ "$FAILED" == "true" ]] ; then + exit 1 + else + exit 0 + fi diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml new file mode 100644 index 0000000000000..f66c577ec7c54 --- /dev/null +++ b/.github/workflows/misc.yml @@ -0,0 +1,62 @@ +name: Miscellaneous - Linux + +on: + workflow_call: + +jobs: + test-misc: + runs-on: [linux, ubuntu-20.04-8core] + env: + CARGO_INCREMENTAL: 0 + steps: + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + if: ${{ github.event_name == 'issue_comment' }} + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Miscellaneous - Linux + status: pending + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} + uses: actions/checkout@v3 + + - uses: actions/cache@v3 + name: Cache Cargo registry + index + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh + - run: bash scripts/environment/prepare.sh + - run: echo "::add-matcher::.github/matchers/rust.json" + - run: make test-behavior + - run: make check-examples + - run: make test-docs + + - name: (PR comment) Set latest commit status as ${{ job.status }} + uses: myrotvorets/set-commit-status-action@1.1.6 + if: always() && github.event_name == 'issue_comment' + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Miscellaneous - Linux + status: ${{ job.status }} diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml new file mode 100644 index 0000000000000..361aa9cd4a3b8 --- /dev/null +++ b/.github/workflows/msrv.yml @@ -0,0 +1,21 @@ +name: Check minimum supported Rust version + +on: + workflow_call: + +env: + RUST_BACKTRACE: full + CI: true + PROFILE: debug + # observing issues fetching boringssl via HTTPS in the OSX build, seeing if this helps + # can be removed when we switch back to the upstream openssl-sys crate + CARGO_NET_GIT_FETCH_WITH_CLI: true + +jobs: + check-msrv: + runs-on: [ubuntu-20.04] + steps: + - uses: actions/checkout@v3 + - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh + - run: cargo install cargo-msrv --version 0.15.1 + - run: cargo msrv verify diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index ec11abd37f65c..40764c5586577 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -1,86 +1,246 @@ -# Regression Detection +# Regression Detection Suite +# +# This workflow runs under the following conditions: +# - in the merge queue if any source files were modified, added or deleted. +# - on demand by a PR comment matching either of: +# - '/ci-run-regression' +# - '/ci-run-all' +# (the comment issuer must be a member of the Vector GH team) # # This workflow runs our regression detection experiments, which are relative -# evaluations of the base SHA for the PR to whatever SHA was just pushed into -# the project (unless that SHA happens to be master branch HEAD). The goal is to -# give quick-ish feedback on all-up Vector for a variety of configs as to -# whether throughput performance has gone down, gotten more variable in the +# evaluations of the base SHA and head SHA, whose determination depends on how +# the workflow is invoked. +# +# The goal is to give quick-ish feedback on all-up Vector for a variety of configs +# as to whether throughput performance has gone down, gotten more variable in the # pushed SHA. # # Regression detection is always done relative to the pushed SHA, meaning any # changes you introduce to the experiment will be picked up both for the base -# SHA variant and your current SHA. Tags are SHA-SHA. The first SHA is the one -# that triggered this workflow, the second is the one of the Vector being -# tested. For comparison images the two SHAs are identical. +# SHA variant and your current SHA. +# +# Docker image tags are SHA-SHA. The first SHA is the one that triggered this +# workflow, the second is the one of the Vector being tested. +# For comparison images the two SHAs are identical. -name: Regression Detector +name: Regression Detection Suite on: - pull_request: - paths-ignore: - - "docs/**" - - "rfcs/**" - - "website/**" merge_group: types: [checks_requested] + workflow_call: + # Don't want to run this on each PR commit, but because GH doesn't allow specifying different required checks + # for pull request and merge queue, we need to "run" it in pull request, but in the jobs we will just auto pass. + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.issue.id || github.event.merge_group.base_sha || github.sha }} + cancel-in-progress: true jobs: - cancel-previous: - runs-on: ubuntu-22.04 - timeout-minutes: 3 + + # Only run this workflow if files changed in areas that could possibly introduce a regression + should-run: + runs-on: ubuntu-latest + if: github.event_name != 'pull_request' + outputs: + source_changed: ${{ steps.filter.outputs.SOURCE_CHANGED }} + comment_valid: ${{ steps.comment.outputs.isTeamMember }} steps: - - uses: styfle/cancel-workflow-action@0.11.0 - with: - access_token: ${{ secrets.GITHUB_TOKEN }} - all_but_latest: true # can cancel workflows scheduled later + - uses: actions/checkout@v3 + + - name: Collect file changes + id: changes + if: github.event_name == 'merge_group' + uses: dorny/paths-filter@v2 + with: + base: ${{ github.event.merge_group.base_ref }} + ref: ${{ github.event.merge_group.head_ref }} + list-files: shell + filters: | + all_changed: + - added|deleted|modified: "**" + ignore: + - "distribution/**" + - "rust-doc/**" + - "docs/**" + - "rfcs/**" + - "testing/**" + - "tilt/**" + - "website/**" + - "*.md" + - "Tiltfile" + - "netlify.toml" + - "NOTICE" + - "LICENSE-3rdparty.csv" + - "LICENSE" + + # This step allows us to conservatively run the tests if we added a new + # file or directory for source code, but forgot to add it to this workflow. + # Instead, we may unnecessarily run the test on new file or dir additions that + # wouldn't likely introduce regressions. + - name: Determine if should not run due to irrelevant file changes + id: filter + if: github.event_name == 'merge_group' + env: + ALL: ${{ steps.changes.outputs.all_changed_files }} + IGNORE: ${{ steps.changes.outputs.ignore_files }} + run: | + echo "ALL='${{ env.ALL }}'" + echo "IGNORE='${{ env.IGNORE }}'" + export SOURCE_CHANGED=$(comm -2 -3 <(printf "%s\n" "${{ env.ALL }}") <(printf "%s\n" "${{ env.IGNORE }}")) + echo "SOURCE_CHANGED='${SOURCE_CHANGED}'" + + if [ "${SOURCE_CHANGED}" == "" ]; then + export SOURCE_CHANGED="false" + else + export SOURCE_CHANGED="true" + fi + + echo "SOURCE_CHANGED='${SOURCE_CHANGED}'" + echo "SOURCE_CHANGED=${SOURCE_CHANGED}" >> $GITHUB_OUTPUT compute-metadata: - name: Compute metadata for regression experiments + name: Compute metadata runs-on: ubuntu-22.04 + needs: should-run + if: github.event_name != 'merge_group' || needs.should-run.outputs.source_changed == 'true' outputs: - pr-number: ${{ steps.pr-metadata.outputs.PR_NUMBER }} + pr-number: ${{ steps.pr-metadata-merge-queue.outputs.PR_NUMBER || steps.pr-metadata-comment.outputs.PR_NUMBER }} + baseline-sha: ${{ steps.pr-metadata-merge-queue.outputs.BASELINE_SHA || steps.pr-metadata-comment.outputs.BASELINE_SHA }} + baseline-tag: ${{ steps.pr-metadata-merge-queue.outputs.BASELINE_TAG || steps.pr-metadata-comment.outputs.BASELINE_TAG }} + comparison-sha: ${{ steps.pr-metadata-merge-queue.outputs.COMPARISON_SHA || steps.pr-metadata-comment.outputs.COMPARISON_SHA }} + comparison-tag: ${{ steps.pr-metadata-merge-queue.outputs.COMPARISON_TAG || steps.pr-metadata-comment.outputs.COMPARISON_TAG }} + + # below are used in the experiment/analyze jobs + cpus: ${{ steps.system.outputs.CPUS }} + memory: ${{ steps.system.outputs.MEMORY }} + vector-cpus: ${{ steps.system.outputs.VECTOR_CPUS }} - comparison-sha: ${{ steps.comparison.outputs.COMPARISON }} - comparison-tag: ${{ steps.comparison.outputs.COMPARISON_TAG }} - baseline-sha: ${{ steps.baseline.outputs.BASELINE }} - baseline-tag: ${{ steps.baseline.outputs.BASELINE_TAG }} + replicas: ${{ steps.experimental-meta.outputs.REPLICAS }} + warmup-seconds: ${{ steps.experimental-meta.outputs.WARMUP_SECONDS }} + total-samples: ${{ steps.experimental-meta.outputs.TOTAL_SAMPLES }} + p-value: ${{ steps.experimental-meta.outputs.P_VALUE }} + smp-version: ${{ steps.experimental-meta.outputs.SMP_CRATE_VERSION }} + lading-version: ${{ steps.experimental-meta.outputs.LADING_VERSION }} steps: + - uses: actions/checkout@v3 - with: - ref: ${{ github.base_ref }} - path: baseline-vector - - name: Setup PR metadata - id: pr-metadata + # If triggered by issue comment, the event payload doesn't directly contain the head and base sha from the PR. + # But, we can retrieve this info from some commands. + - name: Get PR metadata (issue_comment) + id: pr-metadata-comment + if: github.event_name == 'issue_comment' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - echo "PR_NUMBER=${{ github.event.number }}" >> $GITHUB_OUTPUT + export PR_NUMBER=${{ github.event.issue.number }} + echo "PR_NUMBER=${PR_NUMBER}" >> $GITHUB_OUTPUT - - name: Setup baseline variables - id: baseline - run: | - pushd baseline-vector - export BASELINE_SHA=$(git rev-parse HEAD) - popd + gh pr checkout ${PR_NUMBER} + + export BASELINE_SHA=$(git merge-base --fork-point master) + echo "BASELINE_SHA=${BASELINE_SHA}" >> $GITHUB_OUTPUT + + export COMPARISON_SHA=$(git rev-parse HEAD) + echo "COMPARISON_SHA=${COMPARISON_SHA}" >> $GITHUB_OUTPUT + + export BASELINE_TAG="${PR_NUMBER}-${COMPARISON_SHA}-${BASELINE_SHA}" + echo "BASELINE_TAG=${BASELINE_TAG}" >> $GITHUB_OUTPUT + + export COMPARISON_TAG="${PR_NUMBER}-${COMPARISON_SHA}-${COMPARISON_SHA}" + echo "COMPARISON_TAG=${COMPARISON_TAG}" >> $GITHUB_OUTPUT + + echo "pr number is: ${PR_NUMBER}" - export BASELINE_TAG="${{ github.event.pull_request.head.sha }}-${BASELINE_SHA}" echo "baseline sha is: ${BASELINE_SHA}" echo "baseline tag is: ${BASELINE_TAG}" - echo "BASELINE=${BASELINE_SHA}" >> $GITHUB_OUTPUT - echo "BASELINE_TAG=${BASELINE_TAG}" >> $GITHUB_OUTPUT + echo "comparison sha is: ${COMPARISON_SHA}" + echo "comparison tag is: ${COMPARISON_TAG}" - - name: Setup comparison variables - id: comparison + # If triggered by merge queue, the PR number is not available in the payload. While we restrict the number of PRs in the + # queue to 1, we can get the PR number by parsing the merge queue temp branch's ref. + - name: Get PR metadata (merge queue) + id: pr-metadata-merge-queue + if: github.event_name != 'issue_comment' run: | - export COMPARISON_SHA=${{ github.event.pull_request.head.sha }} - export COMPARISON_TAG="${{ github.event.pull_request.head.sha }}-${{ github.event.pull_request.head.sha }}" + export PR_NUMBER=$(echo "${{ github.ref }}" | sed -n 's|^refs/heads/gh-readonly-queue/master/pr-\([0-9]*\)-.*$|\1|p') + echo "PR_NUMBER=${PR_NUMBER}" >> $GITHUB_OUTPUT + + export BASELINE_SHA=${{ github.event.merge_group.base_sha }} + echo "BASELINE_SHA=${BASELINE_SHA}" >> $GITHUB_OUTPUT + + export COMPARISON_SHA=${{ github.event.merge_group.head_sha }} + echo "COMPARISON_SHA=${COMPARISON_SHA}" >> $GITHUB_OUTPUT + + export BASELINE_TAG="${PR_NUMBER}-${COMPARISON_SHA}-${BASELINE_SHA}" + echo "BASELINE_TAG=${BASELINE_TAG}" >> $GITHUB_OUTPUT + + export COMPARISON_TAG="${PR_NUMBER}-${COMPARISON_SHA}-${COMPARISON_SHA}" + echo "COMPARISON_TAG=${COMPARISON_TAG}" >> $GITHUB_OUTPUT + + echo "pr number is: ${PR_NUMBER}" + + echo "baseline sha is: ${BASELINE_SHA}" + echo "baseline tag is: ${BASELINE_TAG}" echo "comparison sha is: ${COMPARISON_SHA}" echo "comparison tag is: ${COMPARISON_TAG}" - echo "COMPARISON=${COMPARISON_SHA}" >> $GITHUB_OUTPUT - echo "COMPARISON_TAG=${COMPARISON_TAG}" >> $GITHUB_OUTPUT + - name: Setup experimental metadata + id: experimental-meta + run: | + export WARMUP_SECONDS="45" + export REPLICAS="10" + export TOTAL_SAMPLES="600" + export P_VALUE="0.1" + export SMP_CRATE_VERSION="0.7.3" + export LADING_VERSION="0.12.0" + + echo "warmup seconds: ${WARMUP_SECONDS}" + echo "replicas: ${REPLICAS}" + echo "total samples: ${TOTAL_SAMPLES}" + echo "regression p-value: ${P_VALUE}" + echo "smp crate version: ${SMP_CRATE_VERSION}" + echo "lading version: ${LADING_VERSION}" + + echo "WARMUP_SECONDS=${WARMUP_SECONDS}" >> $GITHUB_OUTPUT + echo "REPLICAS=${REPLICAS}" >> $GITHUB_OUTPUT + echo "TOTAL_SAMPLES=${TOTAL_SAMPLES}" >> $GITHUB_OUTPUT + echo "P_VALUE=${P_VALUE}" >> $GITHUB_OUTPUT + echo "SMP_CRATE_VERSION=${SMP_CRATE_VERSION}" >> $GITHUB_OUTPUT + echo "LADING_VERSION=${LADING_VERSION}" >> $GITHUB_OUTPUT + + - name: Setup system details + id: system + run: | + export CPUS="7" + export MEMORY="30g" + export VECTOR_CPUS="4" + + echo "cpus total: ${CPUS}" + echo "memory total: ${MEMORY}" + echo "vector cpus: ${VECTOR_CPUS}" + echo "CPUS=${CPUS}" >> $GITHUB_OUTPUT + echo "MEMORY=${MEMORY}" >> $GITHUB_OUTPUT + echo "VECTOR_CPUS=${VECTOR_CPUS}" >> $GITHUB_OUTPUT + + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + if: ${{ github.event_name == 'issue_comment' }} + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.pr-metadata-comment.outputs.COMPARISON_SHA }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Regression Detection Suite + status: pending ## ## BUILD ## @@ -114,7 +274,7 @@ jobs: builder: ${{ steps.buildx.outputs.name }} outputs: type=docker,dest=${{ runner.temp }}/baseline-image.tar tags: | - vector:${{ needs.compute-metadata.outputs.pr-number }}-${{ needs.compute-metadata.outputs.baseline-tag }} + vector:${{ needs.compute-metadata.outputs.baseline-tag }} - name: Upload image as artifact uses: actions/upload-artifact@v3 @@ -151,7 +311,7 @@ jobs: builder: ${{ steps.buildx.outputs.name }} outputs: type=docker,dest=${{ runner.temp }}/comparison-image.tar tags: | - vector:${{ needs.compute-metadata.outputs.pr-number }}-${{ needs.compute-metadata.outputs.comparison-tag }} + vector:${{ needs.compute-metadata.outputs.comparison-tag }} - name: Upload image as artifact uses: actions/upload-artifact@v3 @@ -159,25 +319,472 @@ jobs: name: comparison-image path: "${{ runner.temp }}/comparison-image.tar" - transmit-metadata: - name: Transmit metadata to trusted workflow + confirm-valid-credentials: + name: Confirm AWS credentials are minimally valid + runs-on: ubuntu-22.04 + needs: + - compute-metadata + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2.0.0 + with: + aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} + aws-region: us-west-2 + + - name: Download SMP binary + run: | + aws s3 cp s3://smp-cli-releases/v${{ needs.compute-metadata.outputs.smp-version }}/x86_64-unknown-linux-gnu/smp ${{ runner.temp }}/bin/smp + + ## + ## SUBMIT + ## + + upload-baseline-image-to-ecr: + name: Upload baseline images to ECR + runs-on: ubuntu-22.04 + needs: + - compute-metadata + - confirm-valid-credentials + - build-baseline + steps: + - name: 'Download baseline image' + uses: actions/download-artifact@v3 + with: + name: baseline-image + + - name: Load baseline image + run: | + docker load --input baseline-image.tar + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2.0.0 + with: + aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} + aws-region: us-west-2 + + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + + - name: Docker Login to ECR + uses: docker/login-action@v2 + with: + registry: ${{ steps.login-ecr.outputs.registry }} + + - name: Tag & push baseline image + run: | + docker tag vector:${{ needs.compute-metadata.outputs.baseline-tag }} ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.baseline-tag }} + docker push ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.baseline-tag }} + + upload-comparison-image-to-ecr: + name: Upload comparison images to ECR + runs-on: ubuntu-22.04 + needs: + - compute-metadata + - confirm-valid-credentials + - build-comparison + steps: + - name: 'Download comparison image' + uses: actions/download-artifact@v3 + with: + name: comparison-image + + - name: Load comparison image + run: | + docker load --input comparison-image.tar + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2.0.0 + with: + aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} + aws-region: us-west-2 + + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + + - name: Docker Login to ECR + uses: docker/login-action@v2 + with: + registry: ${{ steps.login-ecr.outputs.registry }} + + - name: Tag & push comparison image + run: | + docker tag vector:${{ needs.compute-metadata.outputs.comparison-tag }} ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.comparison-tag }} + docker push ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.comparison-tag }} + + submit-job: + name: Submit regression job + runs-on: ubuntu-22.04 + needs: + - compute-metadata + - upload-baseline-image-to-ecr + - upload-comparison-image-to-ecr + steps: + - name: Check status, in-progress + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='pending' \ + -f description='Experiments submitted to the Regression Detection cluster.' \ + -f context='Regression Detection Suite / submission' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + - uses: actions/checkout@v3 + with: + ref: ${{ needs.compute-metadata.outputs.comparison-sha }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2.0.0 + with: + aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} + aws-region: us-west-2 + + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + + - name: Download SMP binary + run: | + aws s3 cp s3://smp-cli-releases/v${{ needs.compute-metadata.outputs.smp-version }}/x86_64-unknown-linux-gnu/smp ${{ runner.temp }}/bin/smp + + - name: Submit job + env: + RUST_LOG: info + run: | + chmod +x ${{ runner.temp }}/bin/smp + + ${{ runner.temp }}/bin/smp --team-id ${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }} job submit \ + --lading-version ${{ needs.compute-metadata.outputs.lading-version }} \ + --total-samples ${{ needs.compute-metadata.outputs.total-samples }} \ + --warmup-seconds ${{ needs.compute-metadata.outputs.warmup-seconds }} \ + --replicas ${{ needs.compute-metadata.outputs.replicas }} \ + --baseline-image ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.baseline-tag }} \ + --comparison-image ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.comparison-tag }} \ + --baseline-sha ${{ needs.compute-metadata.outputs.baseline-sha }} \ + --comparison-sha ${{ needs.compute-metadata.outputs.comparison-sha }} \ + --target-command "/usr/local/bin/vector" \ + --target-config-dir ${{ github.workspace }}/regression/ \ + --target-cpu-allotment "${{ needs.compute-metadata.outputs.cpus }}" \ + --target-memory-allotment "${{ needs.compute-metadata.outputs.memory }}" \ + --target-environment-variables "VECTOR_THREADS=${{ needs.compute-metadata.outputs.vector-cpus }},VECTOR_REQUIRE_HEALTHY=true" \ + --target-name vector \ + --submission-metadata ${{ runner.temp }}/submission-metadata + + - uses: actions/upload-artifact@v3 + with: + name: vector-submission-metadata + path: ${{ runner.temp }}/submission-metadata + + - name: Await job + timeout-minutes: 120 + env: + RUST_LOG: info + run: | + chmod +x ${{ runner.temp }}/bin/smp + + ${{ runner.temp }}/bin/smp --team-id ${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }} job status \ + --wait \ + --wait-delay-seconds 60 \ + --wait-timeout-minutes 90 \ + --submission-metadata ${{ runner.temp }}/submission-metadata + + - name: Handle cancellation if necessary + if: ${{ cancelled() }} + env: + RUST_LOG: info + run: | + chmod +x ${{ runner.temp }}/bin/smp + ${{ runner.temp }}/bin/smp --team-id ${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }} job cancel \ + --submission-metadata ${{ runner.temp }}/submission-metadata + + - name: Check status, cancelled + if: ${{ cancelled() }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='failure' \ + -f description='Experiments submitted to the Regression Detection cluster cancelled.' \ + -f context='Regression Detection Suite / submission' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + - name: Check status, success + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='success' \ + -f description='Experiments submitted to the Regression Detection cluster successfully.' \ + -f context='Regression Detection Suite / submission' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + - name: Check status, failure + if: ${{ failure() }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='success' \ + -f description='Experiments submitted to the Regression Detection Suite failed.' \ + -f context='Regression Detection Suite / submission' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + ## + ## ANALYZE + ## + + detect-regression: + name: Determine regression status runs-on: ubuntu-22.04 needs: + - submit-job - compute-metadata steps: - - name: Write out metadata - run: | - echo "COMPARISON_TAG=${{ needs.compute-metadata.outputs.pr-number }}-${{ needs.compute-metadata.outputs.comparison-tag }}" > ${{ runner.temp }}/meta - echo "COMPARISON_SHA=${{ needs.compute-metadata.outputs.comparison-sha }}" >> ${{ runner.temp }}/meta - echo "BASELINE_TAG=${{ needs.compute-metadata.outputs.pr-number }}-${{ needs.compute-metadata.outputs.baseline-tag }}" >> ${{ runner.temp }}/meta - echo "BASELINE_SHA=${{ needs.compute-metadata.outputs.baseline-sha }}" >> ${{ runner.temp }}/meta - echo "CHECKOUT_SHA=${{ github.sha }}" >> ${{ runner.temp }}/meta - echo "HEAD_SHA=${{ github.event.pull_request.head.sha }}" >> ${{ runner.temp }}/meta - echo "BASE_SHA=${{ github.event.pull_request.base.sha }}" >> ${{ runner.temp }}/meta - echo "GITHUB_EVENT_NUMBER=${{ github.event.number }}" >> ${{ runner.temp }}/meta - - - name: Upload metadata + - uses: actions/checkout@v3 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2.0.0 + with: + aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} + aws-region: us-west-2 + + - name: Download SMP binary + run: | + aws s3 cp s3://smp-cli-releases/v${{ needs.compute-metadata.outputs.smp-version }}/x86_64-unknown-linux-gnu/smp ${{ runner.temp }}/bin/smp + + - name: Download submission metadata + uses: actions/download-artifact@v3 + with: + name: vector-submission-metadata + path: ${{ runner.temp }}/ + + - name: Determine if PR introduced a regression + env: + RUST_LOG: info + run: | + chmod +x ${{ runner.temp }}/bin/smp + + ${{ runner.temp }}/bin/smp --team-id ${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }} job result \ + --submission-metadata ${{ runner.temp }}/submission-metadata + + - name: Check status, cancelled + if: ${{ cancelled() }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='failure' \ + -f description='Analyze experimental results from Regression Detection Suite cancelled.' \ + -f context='Regression Detection Suite / detect-regression' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + - name: Check status, success + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='success' \ + -f description='Analyze experimental results from Regression Detection Suite succeeded.' \ + -f context='Regression Detection Suite / detect-regression' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + - name: Check status, failure + if: ${{ failure() }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='failure' \ + -f description='Analyze experimental results from Regression Detection Suite failed.' \ + -f context='Regression Detection Suite / detect-regression' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + analyze-experiment: + name: Download regression analysis & upload report + runs-on: ubuntu-22.04 + needs: + - submit-job + - compute-metadata + steps: + - name: Check status, in-progress + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='pending' \ + -f description='Analyze experimental results from Regression Detection Suite.' \ + -f context='Regression Detection Suite / analyze-experiment' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + - uses: actions/checkout@v3 + with: + ref: ${{ needs.compute-metadata.outputs.comparison-sha }} + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2.0.0 + with: + aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} + aws-region: us-west-2 + + - name: Download SMP binary + run: | + aws s3 cp s3://smp-cli-releases/v${{ needs.compute-metadata.outputs.smp-version }}/x86_64-unknown-linux-gnu/smp ${{ runner.temp }}/bin/smp + + - name: Download submission metadata + uses: actions/download-artifact@v3 + with: + name: vector-submission-metadata + path: ${{ runner.temp }}/ + + - name: Sync regression report to local system + env: + RUST_LOG: info + run: | + chmod +x ${{ runner.temp }}/bin/smp + + ${{ runner.temp }}/bin/smp --team-id ${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }} job sync \ + --submission-metadata ${{ runner.temp }}/submission-metadata \ + --output-path "${{ runner.temp }}/outputs" + + - name: Read regression report + id: read-analysis + uses: juliangruber/read-file-action@v1 + with: + path: ${{ runner.temp }}/outputs/report.html + + - name: Post report to PR + uses: peter-evans/create-or-update-comment@v3 + with: + issue-number: ${{ needs.compute-metadata.outputs.pr-number }} + edit-mode: append + body: ${{ steps.read-analysis.outputs.content }} + + - name: Upload regression report to artifacts uses: actions/upload-artifact@v3 with: - name: meta - path: "${{ runner.temp }}/meta" + name: capture-artifacts + path: ${{ runner.temp }}/outputs/* + + - name: Check status, cancelled + if: ${{ cancelled() }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='failure' \ + -f description='Analyze experimental results from Regression Detection Suite cancelled.' \ + -f context='Regression Detection Suite / analyze-experiment' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + - name: Check status, success + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='success' \ + -f description='Analyze experimental results from Regression Detection Suite succeeded.' \ + -f context='Regression Detection Suite / analyze-experiment' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + - name: Check status, failure + if: ${{ failure() }} + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.comparison-sha }} \ + -f state='failure' \ + -f description='Analyze experimental results from Regression Detection Suite failed.' \ + -f context='Regression Detection Suite / analyze-experiment' \ + -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + + # This job always runs- if an issue_comment triggered it, we need to update the check status in the PR, + # and if a pull_request triggered it, we need to flag the check status as a success. + regression-detection-suite: + name: Regression Detection Suite + runs-on: ubuntu-latest + if: always() + needs: + - compute-metadata + - build-baseline + - build-comparison + - confirm-valid-credentials + - upload-baseline-image-to-ecr + - upload-comparison-image-to-ecr + - submit-job + - detect-regression + - analyze-experiment + env: + FAILED: ${{ contains(needs.*.result, 'failure') }} + steps: + - name: (PR comment) Get PR branch + if: github.event_name == 'issue_comment' + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Submit PR result as failed + if: github.event_name == 'issue_comment' && env.FAILED == 'true' + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.compute-metadata.outputs.comparison-sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Regression Detection Suite + status: 'failure' + + - name: (PR comment) Submit PR result as success + if: github.event_name == 'issue_comment' && env.FAILED != 'true' + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Regression Detection Suite + status: 'success' + + - name: exit + run: | + echo "failed=${{ env.FAILED }}" + if [[ "$FAILED" == "true" ]] ; then + exit 1 + else + exit 0 + fi diff --git a/.github/workflows/regression_trusted.yml b/.github/workflows/regression_trusted.yml deleted file mode 100644 index 52de4d14d3236..0000000000000 --- a/.github/workflows/regression_trusted.yml +++ /dev/null @@ -1,595 +0,0 @@ -name: Regression Detector (trusted) - -on: - workflow_run: - workflows: ["Regression Detector"] - types: - - completed - -jobs: - compute-metadata: - name: Compute metadata for regression experiments - runs-on: ubuntu-22.04 - if: > - github.event.workflow_run.event == 'pull_request' && - github.event.workflow_run.conclusion == 'success' - outputs: - cpus: ${{ steps.system.outputs.CPUS }} - memory: ${{ steps.system.outputs.MEMORY }} - vector-cpus: ${{ steps.system.outputs.VECTOR_CPUS }} - - comparison-sha: ${{ steps.metadata.outputs.COMPARISON_SHA }} - comparison-tag: ${{ steps.metadata.outputs.COMPARISON_TAG }} - baseline-sha: ${{ steps.metadata.outputs.BASELINE_SHA }} - baseline-tag: ${{ steps.metadata.outputs.BASELINE_TAG }} - head-sha: ${{ steps.metadata.outputs.HEAD_SHA }} - checkout-sha: ${{ steps.metadata.outputs.CHECKOUT_SHA }} - github-event-number: ${{ steps.metadata.outputs.GITHUB_EVENT_NUMBER }} - - replicas: ${{ steps.experimental-meta.outputs.REPLICAS }} - warmup-seconds: ${{ steps.experimental-meta.outputs.WARMUP_SECONDS }} - total-samples: ${{ steps.experimental-meta.outputs.TOTAL_SAMPLES }} - p-value: ${{ steps.experimental-meta.outputs.P_VALUE }} - smp-version: ${{ steps.experimental-meta.outputs.SMP_CRATE_VERSION }} - lading-version: ${{ steps.experimental-meta.outputs.LADING_VERSION }} - - steps: - - name: Setup experimental metadata - id: experimental-meta - run: | - export WARMUP_SECONDS="45" - export REPLICAS="10" - export TOTAL_SAMPLES="600" - export P_VALUE="0.1" - export SMP_CRATE_VERSION="0.7.3" - export LADING_VERSION="0.12.0" - - echo "warmup seconds: ${WARMUP_SECONDS}" - echo "replicas: ${REPLICAS}" - echo "total samples: ${TOTAL_SAMPLES}" - echo "regression p-value: ${P_VALUE}" - echo "smp crate version: ${SMP_CRATE_VERSION}" - echo "lading version: ${LADING_VERSION}" - - echo "WARMUP_SECONDS=${WARMUP_SECONDS}" >> $GITHUB_OUTPUT - echo "REPLICAS=${REPLICAS}" >> $GITHUB_OUTPUT - echo "TOTAL_SAMPLES=${TOTAL_SAMPLES}" >> $GITHUB_OUTPUT - echo "P_VALUE=${P_VALUE}" >> $GITHUB_OUTPUT - echo "SMP_CRATE_VERSION=${SMP_CRATE_VERSION}" >> $GITHUB_OUTPUT - echo "LADING_VERSION=${LADING_VERSION}" >> $GITHUB_OUTPUT - - - name: Setup system details - id: system - run: | - export CPUS="7" - export MEMORY="30g" - export VECTOR_CPUS="4" - - echo "cpus total: ${CPUS}" - echo "memory total: ${MEMORY}" - echo "vector cpus: ${VECTOR_CPUS}" - - echo "CPUS=${CPUS}" >> $GITHUB_OUTPUT - echo "MEMORY=${MEMORY}" >> $GITHUB_OUTPUT - echo "VECTOR_CPUS=${VECTOR_CPUS}" >> $GITHUB_OUTPUT - - # github.rest.actions.listWorkflowRunArtifacts only returns first 30 - # artifacts, and returns a { data, headers, status, url } object. The - # "data" part of this object contains the artifact data we care about. - # The fields of this data object correspond to the fields in the - # "Example Response" JSON object in - # https://docs.github.com/en/rest/actions/artifacts#list-workflow-run-artifacts. - # To return more than 30 responses, use the github.paginate API in - # https://octokit.github.io/rest.js/v19#custom-requests - # `github-script` aliases `octokit` to the `github` namespace. - - name: 'Download metadata' - uses: actions/github-script@v6.4.1 - with: - script: | - var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ - owner: context.repo.owner, - repo: context.repo.repo, - run_id: ${{github.event.workflow_run.id }}, - }); - - var matchArtifact = artifacts.data.artifacts.filter((artifact) => { - return artifact.name == "meta" - })[0]; - - console.log("Downloading artifact %s", matchArtifact.id); - - var download = await github.rest.actions.downloadArtifact({ - owner: context.repo.owner, - repo: context.repo.repo, - artifact_id: matchArtifact.id, - archive_format: 'zip', - }); - var fs = require('fs'); - fs.writeFileSync('${{github.workspace}}/meta.zip', Buffer.from(download.data)); - - - run: unzip meta.zip - - - name: Setup metadata - id: metadata - run: | - cat meta - cat meta >> $GITHUB_OUTPUT - - confirm-valid-credentials: - name: Confirm AWS credentials are minimally valid - runs-on: ubuntu-22.04 - needs: - - compute-metadata - steps: - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 - with: - aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} - aws-region: us-west-2 - - - name: Download SMP binary - run: | - aws s3 cp s3://smp-cli-releases/v${{ needs.compute-metadata.outputs.smp-version }}/x86_64-unknown-linux-gnu/smp ${{ runner.temp }}/bin/smp - - ## - ## SUBMIT - ## - - upload-baseline-image-to-ecr: - name: Upload images to ECR - runs-on: ubuntu-22.04 - needs: - - compute-metadata - - confirm-valid-credentials - steps: - # github.rest.actions.listWorkflowRunArtifacts only returns first 30 - # artifacts, and returns a { data, headers, status, url } object. The - # "data" part of this object contains the artifact data we care about. - # The fields of this data object correspond to the fields in the - # "Example Response" JSON object in - # https://docs.github.com/en/rest/actions/artifacts#list-workflow-run-artifacts. - # To return more than 30 responses, use the github.paginate API in - # https://octokit.github.io/rest.js/v19#custom-requests - # `github-script` aliases `octokit` to the `github` namespace. - - name: 'Download baseline image' - uses: actions/github-script@v6.4.1 - with: - script: | - var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ - owner: context.repo.owner, - repo: context.repo.repo, - run_id: ${{github.event.workflow_run.id }}, - }); - - var matchArtifact = artifacts.data.artifacts.filter((artifact) => { - return artifact.name == "baseline-image" - })[0]; - - console.log("Downloading artifact %s", matchArtifact.id); - - var download = await github.rest.actions.downloadArtifact({ - owner: context.repo.owner, - repo: context.repo.repo, - artifact_id: matchArtifact.id, - archive_format: 'zip', - }); - var fs = require('fs'); - fs.writeFileSync('${{github.workspace}}/baseline-image.zip', Buffer.from(download.data)); - - - run: unzip baseline-image.zip - - - name: Load baseline image - run: | - docker load --input baseline-image.tar - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 - with: - aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} - aws-region: us-west-2 - - - name: Login to Amazon ECR - id: login-ecr - uses: aws-actions/amazon-ecr-login@v1 - - - name: Docker Login to ECR - uses: docker/login-action@v2 - with: - registry: ${{ steps.login-ecr.outputs.registry }} - - - name: Tag & push baseline image - run: | - docker tag vector:${{ needs.compute-metadata.outputs.baseline-tag }} ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.baseline-tag }} - docker push ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.baseline-tag }} - - upload-comparison-image-to-ecr: - name: Upload images to ECR - runs-on: ubuntu-22.04 - needs: - - compute-metadata - - confirm-valid-credentials - steps: - # github.rest.actions.listWorkflowRunArtifacts only returns first 30 - # artifacts, and returns a { data, headers, status, url } object. The - # "data" part of this object contains the artifact data we care about. - # The fields of this data object correspond to the fields in the - # "Example Response" JSON object in - # https://docs.github.com/en/rest/actions/artifacts#list-workflow-run-artifacts. - # To return more than 30 responses, use the github.paginate API in - # https://octokit.github.io/rest.js/v19#custom-requests - # `github-script` aliases `octokit` to the `github` namespace. - - name: 'Download comparison image' - uses: actions/github-script@v6.4.1 - with: - script: | - var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ - owner: context.repo.owner, - repo: context.repo.repo, - run_id: ${{github.event.workflow_run.id }}, - }); - - var matchArtifact = artifacts.data.artifacts.filter((artifact) => { - return artifact.name == "comparison-image" - })[0]; - - console.log("Downloading artifact %s", matchArtifact.id); - - var download = await github.rest.actions.downloadArtifact({ - owner: context.repo.owner, - repo: context.repo.repo, - artifact_id: matchArtifact.id, - archive_format: 'zip', - }); - var fs = require('fs'); - fs.writeFileSync('${{github.workspace}}/comparison-image.zip', Buffer.from(download.data)); - - - run: unzip comparison-image.zip - - - name: Load comparison image - run: | - docker load --input comparison-image.tar - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 - with: - aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} - aws-region: us-west-2 - - - name: Login to Amazon ECR - id: login-ecr - uses: aws-actions/amazon-ecr-login@v1 - - - name: Docker Login to ECR - uses: docker/login-action@v2 - with: - registry: ${{ steps.login-ecr.outputs.registry }} - - - name: Tag & push comparison image - run: | - docker tag vector:${{ needs.compute-metadata.outputs.comparison-tag }} ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.comparison-tag }} - docker push ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.comparison-tag }} - - submit-job: - name: Submit regression job - runs-on: ubuntu-22.04 - needs: - - compute-metadata - - upload-baseline-image-to-ecr - - upload-comparison-image-to-ecr - steps: - - name: Check status, in-progress - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='pending' \ - -f description='Experiments submitted to the Regression Detector cluster.' \ - -f context='Regression Detector / submission' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - - uses: actions/checkout@v3 - with: - ref: ${{ needs.compute-metadata.outputs.checkout-sha }} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 - with: - aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} - aws-region: us-west-2 - - - name: Login to Amazon ECR - id: login-ecr - uses: aws-actions/amazon-ecr-login@v1 - - - name: Download SMP binary - run: | - aws s3 cp s3://smp-cli-releases/v${{ needs.compute-metadata.outputs.smp-version }}/x86_64-unknown-linux-gnu/smp ${{ runner.temp }}/bin/smp - - - name: Submit job - env: - RUST_LOG: info - run: | - chmod +x ${{ runner.temp }}/bin/smp - - ${{ runner.temp }}/bin/smp --team-id ${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }} job submit \ - --lading-version ${{ needs.compute-metadata.outputs.lading-version }} \ - --total-samples ${{ needs.compute-metadata.outputs.total-samples }} \ - --warmup-seconds ${{ needs.compute-metadata.outputs.warmup-seconds }} \ - --replicas ${{ needs.compute-metadata.outputs.replicas }} \ - --baseline-image ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.baseline-tag }} \ - --comparison-image ${{ steps.login-ecr.outputs.registry }}/${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }}-vector:${{ needs.compute-metadata.outputs.comparison-tag }} \ - --baseline-sha ${{ needs.compute-metadata.outputs.baseline-sha }} \ - --comparison-sha ${{ needs.compute-metadata.outputs.comparison-sha }} \ - --target-command "/usr/local/bin/vector" \ - --target-config-dir ${{ github.workspace }}/regression/ \ - --target-cpu-allotment "${{ needs.compute-metadata.outputs.cpus }}" \ - --target-memory-allotment "${{ needs.compute-metadata.outputs.memory }}" \ - --target-environment-variables "VECTOR_THREADS=${{ needs.compute-metadata.outputs.vector-cpus }},VECTOR_REQUIRE_HEALTHY=true" \ - --target-name vector \ - --submission-metadata ${{ runner.temp }}/submission-metadata - - - uses: actions/upload-artifact@v3 - with: - name: vector-submission-metadata - path: ${{ runner.temp }}/submission-metadata - - - name: Await job - timeout-minutes: 120 - env: - RUST_LOG: info - run: | - chmod +x ${{ runner.temp }}/bin/smp - - ${{ runner.temp }}/bin/smp --team-id ${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }} job status \ - --wait \ - --wait-delay-seconds 60 \ - --wait-timeout-minutes 90 \ - --submission-metadata ${{ runner.temp }}/submission-metadata - - - name: Handle cancellation if necessary - if: ${{ cancelled() }} - env: - RUST_LOG: info - run: | - chmod +x ${{ runner.temp }}/bin/smp - ${{ runner.temp }}/bin/smp --team-id ${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }} job cancel \ - --submission-metadata ${{ runner.temp }}/submission-metadata - - - name: Check status, cancelled - if: ${{ cancelled() }} - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='failure' \ - -f description='Experiments submitted to the Regression Detector cluster cancelled.' \ - -f context='Regression Detector / submission' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - - name: Check status, success - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='success' \ - -f description='Experiments submitted to the Regression Detector cluster successfully.' \ - -f context='Regression Detector / submission' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - - name: Check status, failure - if: ${{ failure() }} - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='success' \ - -f description='Experiments submitted to the Regression Detector cluster failed.' \ - -f context='Regression Detector / submission' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - ## - ## ANALYZE - ## - - detect-regression: - name: Determine regression status - runs-on: ubuntu-22.04 - needs: - - submit-job - - compute-metadata - steps: - - uses: actions/checkout@v3 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 - with: - aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} - aws-region: us-west-2 - - - name: Download SMP binary - run: | - aws s3 cp s3://smp-cli-releases/v${{ needs.compute-metadata.outputs.smp-version }}/x86_64-unknown-linux-gnu/smp ${{ runner.temp }}/bin/smp - - - name: Download submission metadata - uses: actions/download-artifact@v3 - with: - name: vector-submission-metadata - path: ${{ runner.temp }}/ - - - name: Determine if PR introduced a regression - env: - RUST_LOG: info - run: | - chmod +x ${{ runner.temp }}/bin/smp - - ${{ runner.temp }}/bin/smp --team-id ${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }} job result \ - --submission-metadata ${{ runner.temp }}/submission-metadata - - - name: Check status, cancelled - if: ${{ cancelled() }} - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='failure' \ - -f description='Analyze experimental results from Regression Detector cancelled.' \ - -f context='Regression Detector / detect-regression' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - - name: Check status, success - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='success' \ - -f description='Analyze experimental results from Regression Detector succeeded.' \ - -f context='Regression Detector / detect-regression' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - - name: Check status, failure - if: ${{ failure() }} - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='success' \ - -f description='Analyze experimental results from Regression Detector failed.' \ - -f context='Regression Detector / detect-regression' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - analyze-experiment: - name: Download regression analysis & upload report - runs-on: ubuntu-22.04 - needs: - - submit-job - - compute-metadata - steps: - - name: Check status, in-progress - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='pending' \ - -f description='Analyze experimental results from Regression Detector.' \ - -f context='Regression Detector / analyze-experiment' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - - uses: actions/checkout@v3 - with: - ref: ${{ needs.compute-metadata.outputs.checkout-sha }} - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 - with: - aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} - aws-region: us-west-2 - - - name: Download SMP binary - run: | - aws s3 cp s3://smp-cli-releases/v${{ needs.compute-metadata.outputs.smp-version }}/x86_64-unknown-linux-gnu/smp ${{ runner.temp }}/bin/smp - - - name: Download submission metadata - uses: actions/download-artifact@v3 - with: - name: vector-submission-metadata - path: ${{ runner.temp }}/ - - - name: Sync regression report to local system - env: - RUST_LOG: info - run: | - chmod +x ${{ runner.temp }}/bin/smp - - ${{ runner.temp }}/bin/smp --team-id ${{ secrets.SINGLE_MACHINE_PERFORMANCE_TEAM_ID }} job sync \ - --submission-metadata ${{ runner.temp }}/submission-metadata \ - --output-path "${{ runner.temp }}/outputs" - - - name: Read regression report - id: read-analysis - uses: juliangruber/read-file-action@v1 - with: - path: ${{ runner.temp }}/outputs/report.html - - - name: Post report to PR - uses: peter-evans/create-or-update-comment@v3 - with: - issue-number: ${{ needs.compute-metadata.outputs.github-event-number }} - edit-mode: append - body: ${{ steps.read-analysis.outputs.content }} - - - name: Upload regression report to artifacts - uses: actions/upload-artifact@v3 - with: - name: capture-artifacts - path: ${{ runner.temp }}/outputs/* - - - name: Check status, cancelled - if: ${{ cancelled() }} - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='failure' \ - -f description='Analyze experimental results from Regression Detector cancelled.' \ - -f context='Regression Detector / analyze-experiment' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - - name: Check status, success - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='success' \ - -f description='Analyze experimental results from Regression Detector succeeded.' \ - -f context='Regression Detector / analyze-experiment' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - - name: Check status, failure - if: ${{ failure() }} - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - /repos/${{ github.repository }}/statuses/${{ needs.compute-metadata.outputs.head-sha }} \ - -f state='success' \ - -f description='Analyze experimental results from Regression Detector failed.' \ - -f context='Regression Detector / analyze-experiment' \ - -f target_url=${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 24d78bfae85e0..1219da7a73d61 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,16 +4,10 @@ on: pull_request: merge_group: types: [checks_requested] - push: - branches: - - master concurrency: - # For pull requests, cancel running workflows, for master, run all - # - # `github.event.number` exists for pull requests, otherwise fall back to SHA - # for master - group: ${{ github.workflow }}-${{ github.event.number || github.sha }} + # `github.event.number` exists for pull requests, otherwise fall back to SHA for merge queue + group: ${{ github.workflow }}-${{ github.event.number || github.event.merge_group.base_sha }} cancel-in-progress: true env: @@ -33,63 +27,11 @@ env: jobs: changes: - runs-on: ubuntu-20.04 - # Set job outputs to values from filter step - outputs: - source: ${{ steps.filter.outputs.source }} - dependencies: ${{ steps.filter.outputs.dependencies }} - internal_events: ${{ steps.filter.outputs.internal_events }} - cue: ${{ steps.filter.outputs.cue }} - component_docs: ${{ steps.filter.outputs.component_docs }} - markdown: ${{ steps.filter.outputs.markdown }} - steps: - - uses: actions/checkout@v3 - - uses: dorny/paths-filter@v2 - id: filter - with: - filters: | - source: - - ".github/workflows/test.yml" - - ".cargo/**" - - "benches/**" - - "lib/**" - - "proto/**" - - "scripts/**" - - "src/**" - - "tests/**" - - "build.rs" - - "Cargo.lock" - - "Cargo.toml" - - "Makefile" - - "rust-toolchain.toml" - - "vdev/**" - deny: - - 'deny.toml' - - "vdev/**" - dependencies: - - ".cargo/**" - - 'Cargo.toml' - - 'Cargo.lock' - - 'rust-toolchain.toml' - - '.github/workflows/pr.yml' - - 'Makefile' - - 'scripts/cross/**' - - "vdev/**" - cue: - - 'website/cue/**' - - "vdev" - component_docs: - - 'scripts/generate-component-docs.rb' - - "vdev/**" - markdown: - - '**/**.md' - - "vdev/**" - internal_events: - - 'src/internal_events/**' - - "vdev/**" - docker: - - 'distribution/docker/**' - - "vdev/**" + uses: ./.github/workflows/changes.yml + secrets: inherit + with: + base_ref: ${{ github.event.merge_group.base_ref || github.event.pull_request.base.ref }} + head_ref: ${{ github.event.merge_group.head_ref || github.event.pull_request.head.ref }} # Remove this once https://github.com/vectordotdev/vector/issues/3771 is closed. # Then, modify the `cross-linux` job to run `test` instead of `build`. @@ -123,153 +65,6 @@ jobs: run: scripts/upload-test-results.sh if: always() - test-cli: - name: CLI - Linux - runs-on: [linux, ubuntu-20.04-8core] - needs: changes - env: - CARGO_INCREMENTAL: 0 - if: ${{ needs.changes.outputs.source == 'true' }} - steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 - name: Cache Cargo registry + index - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - restore-keys: | - ${{ runner.os }}-cargo- - - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - - run: bash scripts/environment/prepare.sh - - run: echo "::add-matcher::.github/matchers/rust.json" - - run: make test-cli - - name: Upload test results - run: scripts/upload-test-results.sh - if: always() - - test-misc: - name: Miscellaneous - Linux - runs-on: [linux, ubuntu-20.04-8core] - needs: changes - env: - CARGO_INCREMENTAL: 0 - if: ${{ needs.changes.outputs.source == 'true' }} - steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 - name: Cache Cargo registry + index - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - restore-keys: | - ${{ runner.os }}-cargo- - - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - - run: bash scripts/environment/prepare.sh - - run: echo "::add-matcher::.github/matchers/rust.json" - - run: make test-behavior - - run: make check-examples - - run: make test-docs - - cross-linux: - name: Cross - ${{ matrix.target }} - runs-on: [linux, ubuntu-20.04-8core] - needs: changes - env: - CARGO_INCREMENTAL: 0 - strategy: - matrix: - target: - - x86_64-unknown-linux-gnu - - x86_64-unknown-linux-musl - - aarch64-unknown-linux-gnu - - aarch64-unknown-linux-musl - - armv7-unknown-linux-gnueabihf - - armv7-unknown-linux-musleabihf - - # We run cross checks when dependencies change to ensure they still build. - # This helps us avoid adopting dependencies that aren't compatible with other architectures. - if: ${{ needs.changes.outputs.dependencies == 'true' }} - steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 - name: Cache Cargo registry + index - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - restore-keys: | - ${{ runner.os }}-cargo- - - run: echo "::add-matcher::.github/matchers/rust.json" - - run: 'cargo install cross --version 0.2.4 --force --locked' - # Why is this build, not check? Because we need to make sure the linking phase works. - # aarch64 and musl in particular are notoriously hard to link. - # While it may be tempting to slot a `check` in here for quickness, please don't. - - run: make cross-build-${{ matrix.target }} - - uses: actions/upload-artifact@v3 - with: - name: "vector-debug-${{ matrix.target }}" - path: "./target/${{ matrix.target }}/debug/vector" - - cross-linux-check: - if: ${{ needs.changes.outputs.dependencies == 'true' }} - runs-on: ubuntu-20.04 - name: Cross - Linux - needs: cross-linux - steps: - - name: Check cross matrix status - if: ${{ needs.cross-linux.result != 'success' }} - run: exit 1 - - test-mac: - name: Unit - Mac - # Full CI suites for this platform were only recently introduced. - # Some failures are permitted until we can properly correct them. - continue-on-error: true - runs-on: macos-11 - needs: changes - env: - CARGO_INCREMENTAL: 0 - if: ${{ needs.changes.outputs.source == 'true' }} - steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 - name: Cache Cargo registry + index - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - restore-keys: | - ${{ runner.os }}-cargo- - - run: bash scripts/environment/bootstrap-macos-10.sh - - run: bash scripts/environment/prepare.sh - - run: echo "::add-matcher::.github/matchers/rust.json" - - run: make test - - run: make test-behavior - - test-windows: - name: Unit - Windows - runs-on: [windows, windows-2019-8core] - needs: changes - if: ${{ needs.changes.outputs.source == 'true' }} - steps: - - uses: actions/checkout@v3 - - run: .\scripts\environment\bootstrap-windows-2019.ps1 - - run: make test - test-vrl: name: VRL - Linux continue-on-error: true @@ -282,29 +77,6 @@ jobs: - run: bash scripts/environment/prepare.sh - run: cargo vdev test-vrl - check-component-features: - name: Component Features - Linux - runs-on: [linux, ubuntu-20.04-8core] - needs: changes - if: ${{ needs.changes.outputs.source == 'true' }} - steps: - - uses: actions/checkout@v3 - - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - - run: bash scripts/environment/prepare.sh - - run: echo "::add-matcher::.github/matchers/rust.json" - - run: make check-component-features - - check-msrv: - name: Check minimum supported Rust version - runs-on: [linux, ubuntu-20.04-8core] - needs: changes - if: ${{ needs.changes.outputs.source == 'true' }} - steps: - - uses: actions/checkout@v3 - - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - - run: cargo install cargo-msrv --version 0.15.1 - - run: cargo msrv verify - checks: name: Checks runs-on: [linux, ubuntu-20.04-8core] @@ -367,24 +139,14 @@ jobs: path: "/tmp/vector-config-schema.json" if: success() || failure() - master-failure: - name: master-failure - if: failure() && github.ref == 'refs/heads/master' + all-checks: + name: Test Suite + runs-on: ubuntu-20.04 needs: - - changes - - cross-linux - - test-misc - - test-linux - - test-mac - - test-windows - - test-vrl - - check-component-features - checks - runs-on: ubuntu-20.04 + - test-vrl + - test-linux steps: - - name: Discord notification - env: - DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} - uses: Ilshidur/action-discord@0.3.2 - with: - args: "Master tests failed: " + - name: validate + run: echo "OK" + diff --git a/.github/workflows/unit_mac.yml b/.github/workflows/unit_mac.yml new file mode 100644 index 0000000000000..d922f05da370b --- /dev/null +++ b/.github/workflows/unit_mac.yml @@ -0,0 +1,65 @@ +name: Unit - Mac + +on: + workflow_call: + +jobs: + unit-mac: + # Full CI suites for this platform were only recently introduced. + # Some failures are permitted until we can properly correct them. + continue-on-error: true + runs-on: macos-11 + env: + CARGO_INCREMENTAL: 0 + steps: + + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + if: ${{ github.event_name == 'issue_comment' }} + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Unit - Mac + status: pending + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} + uses: actions/checkout@v3 + + - uses: actions/cache@v3 + name: Cache Cargo registry + index + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - run: bash scripts/environment/bootstrap-macos-10.sh + - run: bash scripts/environment/prepare.sh + - run: echo "::add-matcher::.github/matchers/rust.json" + - run: make test + - run: make test-behavior + + - name: (PR comment) Set latest commit status as ${{ job.status }} + uses: myrotvorets/set-commit-status-action@1.1.6 + if: always() && github.event_name == 'issue_comment' + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Unit - Mac + status: ${{ job.status }} diff --git a/.github/workflows/unit_windows.yml b/.github/workflows/unit_windows.yml new file mode 100644 index 0000000000000..1971670c5dbe6 --- /dev/null +++ b/.github/workflows/unit_windows.yml @@ -0,0 +1,53 @@ +name: Unit - Windows + +on: + workflow_call: + +jobs: + + test-windows: + runs-on: [windows, windows-2019-8core] + steps: + - name: Validate issue comment + if: github.event_name == 'issue_comment' + uses: tspascoal/get-user-teams-membership@v2 + with: + username: ${{ github.actor }} + team: 'Vector' + GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + + - name: (PR comment) Get PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: xt0rted/pull-request-comment-branch@v1 + id: comment-branch + + - name: (PR comment) Set latest commit status as pending + if: ${{ github.event_name == 'issue_comment' }} + uses: myrotvorets/set-commit-status-action@1.1.6 + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Unit - Windows + status: pending + + - name: (PR comment) Checkout PR branch + if: ${{ github.event_name == 'issue_comment' }} + uses: actions/checkout@v3 + with: + ref: ${{ steps.comment-branch.outputs.head_ref }} + + - name: Checkout branch + if: ${{ github.event_name != 'issue_comment' }} + uses: actions/checkout@v3 + + - run: .\scripts\environment\bootstrap-windows-2019.ps1 + - run: make test + + - name: (PR comment) Set latest commit status as ${{ job.status }} + uses: myrotvorets/set-commit-status-action@1.1.6 + if: always() && github.event_name == 'issue_comment' + with: + sha: ${{ steps.comment-branch.outputs.head_sha }} + token: ${{ secrets.GITHUB_TOKEN }} + context: Unit - Windows + status: ${{ job.status }} diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index cebda9652551b..bc87acb59fec1 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -67,6 +67,13 @@ To merge a new source, sink, or transform, you need to: see some [example of instrumentation in existing integrations](https://github.com/vectordotdev/vector/tree/master/src/internal_events). - [ ] Add documentation. You can see [examples in the `docs` directory](https://github.com/vectordotdev/vector/blob/master/docs). +When adding new integration tests, the following changes are needed in the github workflows: + +- in `.github/workflows/integration.yml`, add another entry in the matrix definition for the new integration. +- in `.github/workflows/integration-comment.yml`, add another entry in the matrix definition for the new integration. +- in `.github/workflows/changes.yml`, add a new filter definition for files changed, and update the `changes` job +outputs to reference the filter, and finally update the outputs of `workflow_call` to include the new filter. + ## Workflow ### Git Branches From 8e40b6850a57f874476f071d4ec98d699a99a65e Mon Sep 17 00:00:00 2001 From: neuronull Date: Mon, 22 May 2023 10:37:49 -0600 Subject: [PATCH 016/236] chore(ci): temporarily disable flakey `aws_s3` integration test case `handles_errored_status` (#17455) This test case is flakey in CI so temporarily disabling it until we can sort it out. --- src/sources/aws_s3/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/sources/aws_s3/mod.rs b/src/sources/aws_s3/mod.rs index 49a1378c25991..17ec4cb45cd82 100644 --- a/src/sources/aws_s3/mod.rs +++ b/src/sources/aws_s3/mod.rs @@ -667,6 +667,9 @@ mod integration_tests { .await; } + // TODO: re-enable this after figuring out why it is so flakey in CI + // https://github.com/vectordotdev/vector/issues/17456 + #[ignore] #[tokio::test] async fn handles_errored_status() { trace_init(); From 7554d9c8cc7b9b7134c7879dc941f8f55bc837e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 16:56:53 +0000 Subject: [PATCH 017/236] chore(deps): bump bstr from 1.4.0 to 1.5.0 (#17453) Bumps [bstr](https://github.com/BurntSushi/bstr) from 1.4.0 to 1.5.0.
Commits
  • b3cab19 1.5.0
  • f639abd api: add more Borrow and BorrowMut trait impls
  • 7d96589 imp: gate UnescapeBytes struct on alloc feature
  • 7c369ae imp: this removes the only target pointer width specific code
  • b38820e ci: update OSes
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=bstr&package-manager=cargo&previous-version=1.4.0&new-version=1.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- lib/file-source/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 73fc72074ec44..92f900d37abc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -284,7 +284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86d6b683edf8d1119fe420a94f8a7e389239666aa72e65495d91c00462510151" dependencies = [ "anstyle 1.0.0", - "bstr 1.4.0", + "bstr 1.5.0", "doc-comment", "predicates", "predicates-core", @@ -1624,9 +1624,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" +checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" dependencies = [ "memchr", "once_cell", @@ -3162,7 +3162,7 @@ dependencies = [ name = "file-source" version = "0.1.0" dependencies = [ - "bstr 1.4.0", + "bstr 1.5.0", "bytes 1.4.0", "chrono", "crc", diff --git a/lib/file-source/Cargo.toml b/lib/file-source/Cargo.toml index 21ff50cea5b50..9a2329b8a9e18 100644 --- a/lib/file-source/Cargo.toml +++ b/lib/file-source/Cargo.toml @@ -19,7 +19,7 @@ vector-config-common = { path = "../vector-config-common", default-features = fa vector-config-macros = { path = "../vector-config-macros", default-features = false } [dependencies.bstr] -version = "1.4" +version = "1.5" default-features = false features = [] From 95cbba9116f12e1aa3665f89050132a28f9a0327 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 17:26:37 +0000 Subject: [PATCH 018/236] chore(deps): bump base64 from 0.21.0 to 0.21.1 (#17451) Bumps [base64](https://github.com/marshallpierce/rust-base64) from 0.21.0 to 0.21.1.
Changelog

Sourced from base64's changelog.

0.21.1

  • Remove the possibility of panicking during decoded length calculations
  • DecoderReader no longer sometimes erroneously ignores padding #226

Breaking changes

  • Engine.internal_decode return type changed
  • Update MSRV to 1.60.0
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=base64&package-manager=cargo&previous-version=0.21.0&new-version=0.21.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 4 ++-- lib/vector-core/Cargo.toml | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 92f900d37abc0..51a6e03626ef3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1369,9 +1369,9 @@ checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "3f1e31e207a6b8fb791a38ea3105e6cb541f55e4d029902d3039a4ad07cc4105" [[package]] name = "base64-simd" @@ -1503,7 +1503,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af254ed2da4936ef73309e9597180558821cb16ae9bba4cb24ce6b612d8d80ed" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", "bollard-stubs", "bytes 1.4.0", "chrono", @@ -4408,7 +4408,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd990069640f9db34b3b0f7a1afc62a05ffaa3be9b66aa3c313f58346df7f788" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", "bytes 1.4.0", "chrono", "http", @@ -5583,7 +5583,7 @@ dependencies = [ "async-compat", "async-trait", "backon", - "base64 0.21.0", + "base64 0.21.1", "bytes 1.4.0", "chrono", "flagset", @@ -6831,7 +6831,7 @@ version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", "bytes 1.4.0", "encoding_rs", "futures-core", @@ -8520,7 +8520,7 @@ dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.0", + "base64 0.21.1", "bytes 1.4.0", "flate2", "futures-core", @@ -9227,7 +9227,7 @@ dependencies = [ "azure_identity", "azure_storage", "azure_storage_blobs", - "base64 0.21.0", + "base64 0.21.1", "bloom", "bollard", "bytes 1.4.0", @@ -9544,7 +9544,7 @@ version = "0.1.0" dependencies = [ "async-graphql", "async-trait", - "base64 0.21.0", + "base64 0.21.1", "bitmask-enum", "bytes 1.4.0", "chrono", @@ -9809,7 +9809,7 @@ source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0 dependencies = [ "aes", "base16", - "base64 0.21.0", + "base64 0.21.1", "bytes 1.4.0", "cbc", "cfb-mode", @@ -10380,7 +10380,7 @@ checksum = "bd7b0b5b253ebc0240d6aac6dd671c495c467420577bf634d3064ae7e6fa2b4c" dependencies = [ "assert-json-diff", "async-trait", - "base64 0.21.0", + "base64 0.21.1", "deadpool", "futures 0.3.28", "futures-timer", diff --git a/Cargo.toml b/Cargo.toml index bd54413ed613c..6816650a3458f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -233,7 +233,7 @@ arc-swap = { version = "1.6", default-features = false, optional = true } async-compression = { version = "0.4.0", default-features = false, features = ["tokio", "gzip", "zstd"], optional = true } apache-avro = { version = "0.14.0", default-features = false, optional = true } axum = { version = "0.6.18", default-features = false } -base64 = { version = "0.21.0", default-features = false, optional = true } +base64 = { version = "0.21.1", default-features = false, optional = true } bloom = { version = "0.3.2", default-features = false, optional = true } bollard = { version = "0.14.0", default-features = false, features = ["ssl", "chrono"], optional = true } bytes = { version = "1.4.0", default-features = false, features = ["serde"] } @@ -343,7 +343,7 @@ azure_core = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b azure_identity = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["enable_reqwest"] } azure_storage_blobs = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["azurite_workaround"] } azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["azurite_workaround"] } -base64 = "0.21.0" +base64 = "0.21.1" criterion = { version = "0.4.0", features = ["html_reports", "async_tokio"] } itertools = { version = "0.10.5", default-features = false } libc = "0.2.144" diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 488322f0786c1..097b5b49daf4d 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -77,7 +77,7 @@ schannel = "0.1.21" prost-build = "0.11" [dev-dependencies] -base64 = "0.21.0" +base64 = "0.21.1" chrono-tz = { version = "0.8.2", default-features = false } criterion = { version = "0.4.0", features = ["html_reports"] } env-test-util = "1.0.1" From af2b2afdd95c8ed092beabc443fbd5c5d263a53e Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 19 May 2023 11:29:47 -0700 Subject: [PATCH 019/236] chore(releasing): Prepare v0.30.0 release Signed-off-by: Jesse Szwedko --- distribution/install.sh | 2 +- website/content/en/releases/0.30.0.md | 4 + .../administration/interfaces/kubectl.cue | 2 +- website/cue/reference/releases/0.30.0.cue | 364 ++++++++++++++++++ website/cue/reference/versions.cue | 1 + 5 files changed, 371 insertions(+), 2 deletions(-) create mode 100644 website/content/en/releases/0.30.0.md create mode 100644 website/cue/reference/releases/0.30.0.cue diff --git a/distribution/install.sh b/distribution/install.sh index e1bb5466690c2..b4f72e03bcccd 100755 --- a/distribution/install.sh +++ b/distribution/install.sh @@ -12,7 +12,7 @@ set -u # If PACKAGE_ROOT is unset or empty, default it. PACKAGE_ROOT="${PACKAGE_ROOT:-"https://packages.timber.io/vector"}" -VECTOR_VERSION="0.29.1" +VECTOR_VERSION="0.30.0" _divider="--------------------------------------------------------------------------------" _prompt=">>>" _indent=" " diff --git a/website/content/en/releases/0.30.0.md b/website/content/en/releases/0.30.0.md new file mode 100644 index 0000000000000..104bd3322e7b3 --- /dev/null +++ b/website/content/en/releases/0.30.0.md @@ -0,0 +1,4 @@ +--- +title: Vector v0.30.0 release notes +weight: 21 +--- diff --git a/website/cue/reference/administration/interfaces/kubectl.cue b/website/cue/reference/administration/interfaces/kubectl.cue index ca69d0807bdd4..8eeddaf8b6b06 100644 --- a/website/cue/reference/administration/interfaces/kubectl.cue +++ b/website/cue/reference/administration/interfaces/kubectl.cue @@ -19,7 +19,7 @@ administration: interfaces: kubectl: { role_implementations: [Name=string]: { commands: { _deployment_variant: string - _vector_version: "0.29" + _vector_version: "0.30" _namespace: string | *"vector" _controller_resource_type: string _controller_resource_name: string | *_deployment_variant diff --git a/website/cue/reference/releases/0.30.0.cue b/website/cue/reference/releases/0.30.0.cue new file mode 100644 index 0000000000000..0c887b1b751c3 --- /dev/null +++ b/website/cue/reference/releases/0.30.0.cue @@ -0,0 +1,364 @@ +package metadata + +releases: "0.30.0": { + date: "2023-05-22" + codename: "" + + description: """ + The Vector team is pleased to announce version 0.30.0! + + In addition to the usual smaller enhancements and bug fixes, this release also includes + a refresh of the component statuses in the docs following the [stability + guarantees](/docs/about/under-the-hood/guarantees/#stability-guarantees). + + Be sure to check out the [upgrade guide](/highlights/2023-05-23-0-30-0-upgrade-guide) for + breaking changes in this release. + """ + + known_issues: [] + + changelog: [ + { + type: "fix" + scopes: ["disk buffers"] + description: """ + Disk buffers now recover from partial writes that can occur during unclean shutdowns. + """ + pr_numbers: [17099] + }, + { + type: "enhancement" + scopes: ["pulsar sink"] + description: """ + The `pulsar` sink supports a few new features: + + - Dynamic topics using a topic template + - Can receive both logs and metrics + - Dynamic message `properties` can be set via `properties_key` + + This brings functionality in-line with that which is supported by the `kafka` sink. + """ + contributors: ["addisonj"] + pr_numbers: [14345] + }, + { + type: "enhancement" + scopes: ["kubernetes_logs source"] + description: """ + The `kubernetes_logs` source supports a new `use_apiserver_cache` option to have + requests from Vector hit the Kubernetes API server cache rather than always hitting + etcd. It can significantly reduce Kubernetes control plane memory pressure in + exchange for a chance of receiving stale data. + """ + contributors: ["nabokihms"] + pr_numbers: [17095] + }, + { + type: "enhancement" + scopes: ["appsignal sink"] + description: """ + The `appsignal` sink now allows configuration of TLS options via the `tls` config + field. This brings it in-line with other sinks that support TLS. + """ + contributors: ["tombruijn"] + pr_numbers: [17122] + }, + { + type: "fix" + scopes: ["influxdb_logs sink"] + description: """ + The `influxdb_logs` sink now correctly encodes logs when `tags` are present. + """ + contributors: ["juvenn"] + pr_numbers: [17029] + }, + { + type: "fix" + scopes: ["loki sink"] + description: """ + The `loki` sink now warns when added `labels` collide via wildcard + expansion. + """ + contributors: ["hargut"] + pr_numbers: [17052] + }, + { + type: "chore" + scopes: ["socket source"] + description: """ + The deprecated `max_length` option of the `socket` source was removed. Please see + the [upgrade + guide](/highlights/2023-05-23-0-30-0-upgrade-guide#socket-source-max-length) for + more details. + """ + breaking: true + pr_numbers: [17162] + }, + { + type: "enhancement" + scopes: ["amqp sink"] + description: """ + The `amqp` sink now allows configuration of the `content_encoding` and + `content_type` message properties via the new `properties` configuration option. + """ + contributors: ["arouene"] + pr_numbers: [17174] + }, + { + type: "enhancement" + scopes: ["docker_logs source"] + description: """ + The `docker_logs` source now supports usage of the `tcp://` scheme for the `host` + option. The connection is the same as-if the `http://` scheme was used. + """ + contributors: ["OrangeFlag"] + pr_numbers: [17124] + }, + { + type: "enhancement" + scopes: ["releasing"] + description: """ + Vector's distroless libc docker images (tags ending in `-distroless-libc`) are now + based on Debian 11 rather than Debian 10. This matches Vector's published Debian + images (tags ending in `-debian`). + """ + contributors: ["SIPR-octo"] + pr_numbers: [17160] + }, + { + type: "enhancement" + scopes: ["aws_s3 source", "aws_s3 sink"] + description: """ + The `aws_s3` source and `aws_s3` sink now have full support for codecs and can + receive/send any event type allowing `aws_s3` to be used as a transport layer + between Vector instances. + """ + pr_numbers: [17098] + }, + { + type: "fix" + scopes: ["elasticsearch sink"] + description: """ + The `elasticsearch` sink now uses the correct API to automatically determine the + version of the downstream Elasticsearch instance (when `api_version = "auto"`). + """ + contributors: ["syedriko"] + pr_numbers: [17227] + }, + { + type: "enhancement" + scopes: ["tag_cardinality_limit transform", "observability"] + description: """ + The `tag_cardinality_limit` now includes the `metric_name` field on logs it produces + to more easily identify the metric that was limited. + """ + contributors: ["nomonamo"] + pr_numbers: [17295] + }, + { + type: "fix" + scopes: ["gcp_stackdriver_metrics sink"] + description: """ + The `gcp_stackdriver_metrics` sink now correctly refreshes the authentication token before it expires. + """ + pr_numbers: [17297] + }, + { + type: "enhancement" + scopes: ["http sink"] + description: """ + HTTP-based sinks now log the underlying error if an unexpected error condition is + hit. This makes debugging easier. + """ + pr_numbers: [17327] + }, + { + type: "fix" + scopes: ["observability"] + description: """ + Vector's internal logs were updated to use "suppress" rather than "rate-limit" in + the hopes that it makes it clearer that it is only Vector's log output that is being + suppressed, rather than data processing being throttled. + """ + pr_numbers: [17394] + }, + { + type: "fix" + scopes: ["kafka source"] + description: """ + The `kafka` source now attempts to send any pending acknowledgements to the Kafka + server before reading additional messages to process. + """ + pr_numbers: [17380] + }, + { + type: "enhancement" + scopes: ["aws provider"] + description: """ + AWS components now allow configuring `auth.region` without any of the other + authentication options so that a different region can be given to the default + authentication provider chain than the region that the component is otherwise + connecting to. + """ + pr_numbers: [17414] + }, + ] + + commits: [ + {sha: "cbc17be42af382dc200d8f1516be29f231485026", date: "2023-04-07 21:45:24 UTC", description: "bump enumflags2 from 0.7.5 to 0.7.6", pr_number: 17079, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 6, deletions_count: 6}, + {sha: "c29c8171bdcea02f991ef9bdc3cbd3ea0b8adedb", date: "2023-04-07 21:46:16 UTC", description: "bump async-stream from 0.3.4 to 0.3.5", pr_number: 17076, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 8, deletions_count: 8}, + {sha: "eafba69a355c8b7ae099134392c6ebd7cab6dcce", date: "2023-04-08 04:15:40 UTC", description: "bump tonic from 0.8.3 to 0.9.1", pr_number: 17077, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 82, deletions_count: 23}, + {sha: "5d886550784e1fe49ba5d670f81161c5b8614abc", date: "2023-04-08 05:43:35 UTC", description: "Load compose files and inject network block", pr_number: 17025, scopes: ["vdev"], type: "enhancement", breaking_change: false, author: "Jonathan Padilla", files_count: 32, insertions_count: 80, deletions_count: 132}, + {sha: "9a56ed8226a764fa09dcfe9f4e8d968646555bf9", date: "2023-04-10 22:54:06 UTC", description: "bump openssl from 0.10.48 to 0.10.50", pr_number: 17087, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 6, deletions_count: 7}, + {sha: "623b838b2193e019173ad5d223fb217bbf5d94bd", date: "2023-04-10 22:54:44 UTC", description: "bump chrono-tz from 0.8.1 to 0.8.2", pr_number: 17088, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 5, deletions_count: 5}, + {sha: "e0906105bc0c6ed297ed96ab8c545535c4fa83b2", date: "2023-04-10 22:55:29 UTC", description: "bump prettydiff from 0.6.2 to 0.6.4", pr_number: 17089, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 12, deletions_count: 2}, + {sha: "adbf4d54e5b11a562b1323d3dcbc2587c855b093", date: "2023-04-10 22:56:14 UTC", description: "bump serde_with from 2.3.1 to 2.3.2", pr_number: 17090, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 13, deletions_count: 13}, + {sha: "9cc2f1de1cce6c43e335ec1815363f510e111fbd", date: "2023-04-10 22:56:51 UTC", description: "bump uuid from 1.3.0 to 1.3.1", pr_number: 17091, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "64d560d7737e553190d473dbbb07ae001cf169b3", date: "2023-04-10 23:43:39 UTC", description: "correctly mark some sinks as stateful", pr_number: 17085, scopes: ["external docs"], type: "chore", breaking_change: false, author: "neuronull", files_count: 6, insertions_count: 10, deletions_count: 6}, + {sha: "51312aaa919cbe4e0d25dcfc202a6e9f618389a3", date: "2023-04-11 00:23:07 UTC", description: "bump wiremock from 0.5.17 to 0.5.18", pr_number: 17092, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 4, deletions_count: 4}, + {sha: "887d6d7971c86e17448054484e7956b8fd393be7", date: "2023-04-11 07:14:19 UTC", description: "Reset dependencies bumped by a61dea1", pr_number: 17100, scopes: ["deps"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 256, deletions_count: 352}, + {sha: "1bdb24d04329aabb7212942b08f503e910ed60ff", date: "2023-04-12 00:59:35 UTC", description: "Transform outputs hash table of OutputId -> Definition", pr_number: 17059, scopes: ["topology"], type: "chore", breaking_change: false, author: "Stephen Wakely", files_count: 24, insertions_count: 283, deletions_count: 212}, + {sha: "42f298b3721098aca7754b1759cf6abd84a1e6fc", date: "2023-04-11 22:47:37 UTC", description: "bump num_enum from 0.5.11 to 0.6.0", pr_number: 17106, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 25, deletions_count: 4}, + {sha: "6f745234ed3c7d22cd446769fcac86549c105416", date: "2023-04-11 22:49:55 UTC", description: "bump proc-macro2 from 1.0.55 to 1.0.56", pr_number: 17103, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 65, deletions_count: 65}, + {sha: "d53240b53a789edec8bd6700953dccbe660c7a65", date: "2023-04-11 22:51:31 UTC", description: "bump getrandom from 0.2.8 to 0.2.9", pr_number: 17101, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 13, deletions_count: 13}, + {sha: "a791595b0cfcae36d0c46708a91d5e2813ed38eb", date: "2023-04-12 01:27:05 UTC", description: "correctly handle partial writes in reader seek during initialization", pr_number: 17099, scopes: ["buffers"], type: "fix", breaking_change: false, author: "Toby Lawrence", files_count: 3, insertions_count: 163, deletions_count: 12}, + {sha: "edaa6124bd7a47cbb551127168b764d496bf73c2", date: "2023-04-12 02:26:21 UTC", description: "tidy up some of the module level docs for `disk_v2`", pr_number: 17093, scopes: ["buffer"], type: "chore", breaking_change: false, author: "Toby Lawrence", files_count: 2, insertions_count: 96, deletions_count: 61}, + {sha: "fd13d64c7b911f7fa4cb901640dbe6b1042018cc", date: "2023-04-12 01:53:39 UTC", description: "Regenerate Kubernetes manifests for 0.21.2", pr_number: 17108, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 18, insertions_count: 22, deletions_count: 22}, + {sha: "dd9608a40da7758ab06f1a298093130abfc72418", date: "2023-04-12 07:55:24 UTC", description: "bump libc from 0.2.140 to 0.2.141", pr_number: 17104, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 4, deletions_count: 4}, + {sha: "f3b5d42cd5d01acf86235e6edc17f5b0d3b837c4", date: "2023-04-12 02:07:09 UTC", description: "Disable `appsignal` integration test until CA issues are resolved", pr_number: 17109, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 2, deletions_count: 1}, + {sha: "48fc574e7177bfcc5acf2f9aac85474cb38faef8", date: "2023-04-12 04:17:23 UTC", description: "re-enable `appsignal` integration test", pr_number: 17111, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 2}, + {sha: "2d72f82b22054a3a7c392061010f94eec15c66be", date: "2023-04-12 07:24:54 UTC", description: "improve config schema output with more precise `unevaluatedProperties` + schema ref flattening", pr_number: 17026, scopes: ["config"], type: "chore", breaking_change: false, author: "Toby Lawrence", files_count: 17, insertions_count: 1523, deletions_count: 103}, + {sha: "1e97a2fc5c5cbdee8b27aa34ca14dde67eac2166", date: "2023-04-12 05:27:09 UTC", description: "Refactor to use StreamSink", pr_number: 14345, scopes: ["pulsar sink"], type: "enhancement", breaking_change: false, author: "Addison Higham", files_count: 16, insertions_count: 1000, deletions_count: 601}, + {sha: "e7c481558373625e04d763ea34451f219f7656d9", date: "2023-04-12 06:27:49 UTC", description: "update unsupported ubuntu version runners", pr_number: 17113, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "7a40c817151819ba72ed2e31d5860956f693fa8d", date: "2023-04-12 08:06:47 UTC", description: "use python v3.8 in ubuntu 20.04 runner", pr_number: 17116, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "f56d1ef50d57a5057807b1d122032980bbc70d8d", date: "2023-04-13 03:12:21 UTC", description: "remove unnecessary dep install", pr_number: 17128, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 0, deletions_count: 1}, + {sha: "f90b3b305f23bcb9e4c03d7199a6ad3d4a27045b", date: "2023-04-13 03:51:48 UTC", description: "bump cached from 0.42.0 to 0.43.0", pr_number: 17118, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "7b15d191b9b019dfdfea8dd743ff5fa07a19b82f", date: "2023-04-13 04:01:08 UTC", description: "add `appsignal` to codeowners", pr_number: 17127, scopes: ["administration"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "3834612cb052edcae99f22aecbf07fdad32f816c", date: "2023-04-13 06:35:34 UTC", description: "Bump Vector version to 0.30.0", pr_number: 17134, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "e7ea0a82132d7572aad66c6d0b1297777d1196c6", date: "2023-04-13 07:52:27 UTC", description: "Regenerate manifests for 0.22.0 chart", pr_number: 17135, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 18, insertions_count: 22, deletions_count: 22}, + {sha: "8762563a3b19d5b65df3172a5f7bdcd670102eee", date: "2023-04-13 07:06:24 UTC", description: "bump opendal from 0.30.5 to 0.31.0", pr_number: 17119, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 20, deletions_count: 250}, + {sha: "dbb3f251ce952bcbe47e996d72a00972b12e1095", date: "2023-04-13 22:50:58 UTC", description: "bump aws-sigv4 from 0.55.0 to 0.55.1", pr_number: 17138, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 10, deletions_count: 10}, + {sha: "db39d837e5083fe2788ea729dd20abf20234cc72", date: "2023-04-13 22:57:49 UTC", description: "bump socket2 from 0.4.7 to 0.5.2", pr_number: 17121, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 102, deletions_count: 26}, + {sha: "ba63e2148afeb3824fc413d63ed165c3c5068eee", date: "2023-04-14 01:57:04 UTC", description: "Add a quickfix to handle special capitalization cases", pr_number: 17141, scopes: [], type: "docs", breaking_change: false, author: "Spencer Gilbert", files_count: 1, insertions_count: 19, deletions_count: 4}, + {sha: "d245927f570bca42082f9495844ca4eddef715f2", date: "2023-04-14 03:05:49 UTC", description: "Remove skaffold from project", pr_number: 17145, scopes: [], type: "chore", breaking_change: false, author: "Spencer Gilbert", files_count: 8, insertions_count: 0, deletions_count: 54}, + {sha: "e46fae798120f7d3ce762382dcf9cfd3b79e4a55", date: "2023-04-14 12:56:23 UTC", description: "use kube-apiserver cache for list requests", pr_number: 17095, scopes: ["kubernetes_logs"], type: "feat", breaking_change: false, author: "Maksim Nabokikh", files_count: 4, insertions_count: 80, deletions_count: 65}, + {sha: "198068cf55732a3bfe034697d9dc5c9abadb1b63", date: "2023-04-14 11:37:02 UTC", description: "Add TLS config option", pr_number: 17122, scopes: ["appsignal sink"], type: "fix", breaking_change: false, author: "Tom de Bruijn", files_count: 2, insertions_count: 90, deletions_count: 2}, + {sha: "5247972ed8ae85dc384571c2bcc473aa9cb8e661", date: "2023-04-14 05:50:54 UTC", description: "add unit test (ignored) for support for encoding special chars in `ProxyConfig`", pr_number: 17148, scopes: ["core"], type: "enhancement", breaking_change: false, author: "neuronull", files_count: 2, insertions_count: 24, deletions_count: 0}, + {sha: "aad811540ff2a544c8d1fd7410d2c029099845f0", date: "2023-04-15 00:04:58 UTC", description: "begin laying out primitives for programmatically querying schema", pr_number: 17130, scopes: ["config"], type: "chore", breaking_change: false, author: "Toby Lawrence", files_count: 26, insertions_count: 1303, deletions_count: 162}, + {sha: "c3aa14fd4d2b72a3863b8a8f6590c8ba65cc6c56", date: "2023-04-15 13:08:25 UTC", description: "encode influx line when no tags present", pr_number: 17029, scopes: ["influxdb_logs"], type: "fix", breaking_change: false, author: "Juvenn Woo", files_count: 2, insertions_count: 24, deletions_count: 14}, + {sha: "71d1bf6bae80b4d4518e9ea3f87d0d6ecd000984", date: "2023-04-15 01:10:01 UTC", description: "recurse through schema refs when determining eligibility for unevaluated properties", pr_number: 17150, scopes: ["config"], type: "fix", breaking_change: false, author: "Toby Lawrence", files_count: 4, insertions_count: 64, deletions_count: 24}, + {sha: "10fce656f624344facf662c7a54282dc46d63303", date: "2023-04-14 23:18:52 UTC", description: "true up cargo.lock", pr_number: 17149, scopes: ["deps"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "730c9386f66b6348c64a268ef37e752343d8fb9a", date: "2023-04-15 05:56:20 UTC", description: "Adjust doc comment locations", pr_number: 17154, scopes: [], type: "docs", breaking_change: false, author: "Spencer Gilbert", files_count: 2, insertions_count: 31, deletions_count: 33}, + {sha: "f06692b27ac480eb258faab14adce1f7b500f030", date: "2023-04-15 12:41:32 UTC", description: "warn on label expansions and collisions", pr_number: 17052, scopes: ["loki sink"], type: "chore", breaking_change: false, author: "Harald Gutmann", files_count: 1, insertions_count: 151, deletions_count: 23}, + {sha: "65a8856ab08296bf6da22f7dbf3b9a6da64aff6a", date: "2023-04-15 07:04:27 UTC", description: "make doc style edits", pr_number: 17155, scopes: ["docs"], type: "chore", breaking_change: false, author: "May Lee", files_count: 21, insertions_count: 44, deletions_count: 44}, + {sha: "c1691313e34fc77af5c37abdefa1322ee20e3398", date: "2023-04-15 06:50:22 UTC", description: "update the `v0.28.0` upgrade guide with note about `datadog_logs` sink `hostname` key", pr_number: 17156, scopes: ["docs"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 8, deletions_count: 1}, + {sha: "854d71e48883b703b1eb67b538e7ac3b21037fae", date: "2023-04-19 03:19:16 UTC", description: "bump docker/metadata-action from 4.3.0 to 4.4.0", pr_number: 17170, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "9ecfc8c8159d4093a28de270885e880628a90127", date: "2023-04-19 23:22:53 UTC", description: "Remove deprecated `max_length` setting from `tcp` and `unix` modes.", pr_number: 17162, scopes: ["socket source"], type: "chore", breaking_change: false, author: "neuronull", files_count: 8, insertions_count: 86, deletions_count: 97}, + {sha: "3c9255658c994a002b024db89c9cc32dd718de9c", date: "2023-04-20 01:25:50 UTC", description: "Remove trailing, unmatched quote", pr_number: 17163, scopes: ["docs"], type: "chore", breaking_change: false, author: "Mark Lodato", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "3b38ba82c3727eac93c0d0a992f248b72435dac6", date: "2023-04-20 02:52:30 UTC", description: "emit human-friendly version of enum variant/property names in schema", pr_number: 17171, scopes: ["config"], type: "chore", breaking_change: false, author: "Toby Lawrence", files_count: 14, insertions_count: 454, deletions_count: 135}, + {sha: "68b54a9bc0ae07d916ec48e997a03f7681e54ccc", date: "2023-04-20 13:34:16 UTC", description: "pulsar-rs bump to v5.1.1", pr_number: 17159, scopes: ["pulsar"], type: "chore", breaking_change: false, author: "kannar", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "1f0de6b5b90734b99b2c44ea500767f2c013e25c", date: "2023-04-21 05:11:50 UTC", description: "Regenerate manifests for 0.21.1 chart", pr_number: 17187, scopes: ["releasing"], type: "chore", breaking_change: false, author: "neuronull", files_count: 18, insertions_count: 22, deletions_count: 22}, + {sha: "a2882f384e24c13efc2dcf55885f609470e7e9e4", date: "2023-04-21 07:31:41 UTC", description: "Update h2", pr_number: 17189, scopes: ["deps"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "5dff0ed37a89e8cfc9db3ca499454dfe8198eadf", date: "2023-04-22 00:15:30 UTC", description: "remove the remove of source_ip", pr_number: 17184, scopes: ["syslog source"], type: "chore", breaking_change: false, author: "Stephen Wakely", files_count: 1, insertions_count: 0, deletions_count: 1}, + {sha: "c10d30bd35494ea336d90d0abf9977349c38d154", date: "2023-04-25 01:27:59 UTC", description: "Support AMQ Properties (content-type) in AMQP sink", pr_number: 17174, scopes: ["amqp sink"], type: "enhancement", breaking_change: false, author: "Aurélien Rouëné", files_count: 5, insertions_count: 75, deletions_count: 4}, + {sha: "c304a8c9b554a18dc39eadcd4d06f81d0d3baed1", date: "2023-04-25 09:40:24 UTC", description: "Upgrade Debian to bullseye for distroless image", pr_number: 17160, scopes: ["deps"], type: "chore", breaking_change: false, author: "SIPR", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "32a935b4d74bd38ba96c717291430f03fa80f4c4", date: "2023-04-25 01:57:12 UTC", description: "ignore `.helix` dir", pr_number: 17203, scopes: ["dev"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "d396320162e068d82f8f7d4e47bc8984503750e7", date: "2023-04-25 06:23:44 UTC", description: "Upgrade cue to 0.5.0", pr_number: 17204, scopes: ["deps"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 2, insertions_count: 4, deletions_count: 4}, + {sha: "ef15696292c80b80932e20093e833792d9b2aa71", date: "2023-04-25 05:24:19 UTC", description: "Upgrade rust to 1.69.0", pr_number: 17194, scopes: ["deps"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 87, insertions_count: 195, deletions_count: 213}, + {sha: "40c9afc584be350117ada03216cbdf43cbe8775d", date: "2023-04-26 04:48:03 UTC", description: "bump mock_instant from 0.2.1 to 0.3.0", pr_number: 17210, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "c80c5eb22c1f238903d5c291d944a2b8db7b73b9", date: "2023-04-26 08:24:54 UTC", description: "bump enumflags2 from 0.7.6 to 0.7.7", pr_number: 17206, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 5, deletions_count: 5}, + {sha: "aa9cbd078ff7f8ac35dc5555533b7394764b86ca", date: "2023-04-26 22:44:12 UTC", description: "bump tonic from 0.9.1 to 0.9.2", pr_number: 17221, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 5, deletions_count: 5}, + {sha: "410aa3cab29b91b59abadadceccffe14e022f06e", date: "2023-04-26 22:47:50 UTC", description: "bump regex from 1.7.3 to 1.8.1", pr_number: 17222, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 5, insertions_count: 27, deletions_count: 12}, + {sha: "40d543a6a4cfc70a870080df6e543257b4004198", date: "2023-04-27 00:34:10 UTC", description: "Add known issues fixed by 0.29.1", pr_number: 17218, scopes: ["releasing"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 2, insertions_count: 9, deletions_count: 3}, + {sha: "752d4245c7f4cfbb4513183aeada24ce8a0e4891", date: "2023-04-27 02:10:45 UTC", description: "Remove unneeded `yaml` dependency from website", pr_number: 17215, scopes: ["docs"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 2}, + {sha: "9031d0faa2811187874364e1b5a3305c9ed0c0da", date: "2023-04-28 01:58:17 UTC", description: "Re-add transform definitions", pr_number: 17152, scopes: [], type: "chore", breaking_change: false, author: "Stephen Wakely", files_count: 87, insertions_count: 2714, deletions_count: 2088}, + {sha: "29c34c073c0dde0e5d9f69c94193ae547538da5d", date: "2023-04-28 05:41:31 UTC", description: "(syslog source): add source_ip to some syslog tests", pr_number: 17235, scopes: [], type: "chore", breaking_change: false, author: "Stephen Wakely", files_count: 1, insertions_count: 39, deletions_count: 5}, + {sha: "8067f84ae38ad613af0063179e19e7bbf5448ca4", date: "2023-04-27 23:38:37 UTC", description: "bump tokio from 1.27.0 to 1.28.0", pr_number: 17231, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 9, insertions_count: 15, deletions_count: 15}, + {sha: "1e432089f4a3375b2a6dfefb1de3197af5f2313d", date: "2023-04-28 07:07:09 UTC", description: "Dont panic with non object field kinds", pr_number: 17140, scopes: ["schemas"], type: "fix", breaking_change: false, author: "Stephen Wakely", files_count: 29, insertions_count: 855, deletions_count: 119}, + {sha: "cfc387d8c4595bfd031cd28d88ac2483200cb53e", date: "2023-04-28 22:35:18 UTC", description: "bump dunce from 1.0.3 to 1.0.4", pr_number: 17244, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "d286d16dcccca67ea2c1bd994f5440cfca303732", date: "2023-04-29 05:51:58 UTC", description: "bump clap_complete from 4.2.0 to 4.2.1", pr_number: 17229, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "d648c86721a689f2e4add0da46c6c9b011e438d6", date: "2023-04-29 06:52:13 UTC", description: "Add full codec support to AWS S3 source/sink", pr_number: 17098, scopes: ["codecs"], type: "feat", breaking_change: false, author: "Nathan Fox", files_count: 5, insertions_count: 308, deletions_count: 101}, + {sha: "4b80c714b68bb9acc2869c48b71848d11954c6aa", date: "2023-04-29 09:58:15 UTC", description: "Install the correct `mold` based on CPU architecture", pr_number: 17248, scopes: ["dev"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "bc6f7fd5109242cc53d7f388ff264662b6a6c223", date: "2023-05-01 23:06:09 UTC", description: "bump uuid from 1.3.0 to 1.3.2", pr_number: 17256, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "41ac76ed03bfc7c08e2f8262eee66c7bae01d5af", date: "2023-05-01 23:07:28 UTC", description: "bump axum from 0.6.12 to 0.6.18", pr_number: 17257, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 5, deletions_count: 5}, + {sha: "a88aba49a357e547a43a7d985a9ebd8d5c58f9a2", date: "2023-05-02 03:32:38 UTC", description: "bump prost-build from 0.11.8 to 0.11.9", pr_number: 17260, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 8, deletions_count: 8}, + {sha: "7570bb31e2f471e3ff8bc818c24e9bde3090818c", date: "2023-05-02 03:32:55 UTC", description: "bump serde_json from 1.0.95 to 1.0.96", pr_number: 17258, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 6, insertions_count: 7, deletions_count: 7}, + {sha: "be69f5f361ce4621c01f522c7270c5f97b2b7069", date: "2023-05-02 22:51:36 UTC", description: "bump directories from 5.0.0 to 5.0.1", pr_number: 17271, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 15, deletions_count: 8}, + {sha: "036ad4ab17ddadfa1e24164ffbfa28b458e4c74e", date: "2023-05-02 22:52:34 UTC", description: "bump serde from 1.0.159 to 1.0.160", pr_number: 17270, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 8, insertions_count: 11, deletions_count: 11}, + {sha: "1406c087db2f377eff65065c5f2fbcb295d4d138", date: "2023-05-02 22:54:19 UTC", description: "bump tracing-subscriber from 0.3.16 to 0.3.17", pr_number: 17268, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 7, insertions_count: 9, deletions_count: 9}, + {sha: "f696e7bde782eac78d4692ad5d0de81a7e99c400", date: "2023-05-03 03:19:53 UTC", description: "bump num_enum from 0.6.0 to 0.6.1", pr_number: 17272, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 7, deletions_count: 7}, + {sha: "03e905e304d2253dfcd0019105337df23e72d80c", date: "2023-05-03 04:22:00 UTC", description: "add note to DEVELOPING.md re panics", pr_number: 17277, scopes: [], type: "chore", breaking_change: false, author: "Stephen Wakely", files_count: 1, insertions_count: 11, deletions_count: 0}, + {sha: "bc618a25e4c501857a0ac3747c4c7735a6192791", date: "2023-05-03 04:31:40 UTC", description: "bump libc from 0.2.141 to 0.2.142", pr_number: 17273, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "9b6ef243cac4abc758e288133fb732b7b504f032", date: "2023-05-03 00:40:40 UTC", description: " Elasticsearch sink with api_version set to \"auto\" does not recognize the API version of ES6 as V6 (#17226)", pr_number: 17227, scopes: ["elasticsearch sink"], type: "fix", breaking_change: false, author: "Sergey Yedrikov", files_count: 1, insertions_count: 27, deletions_count: 18}, + {sha: "61c0d764af78826c8d01c5295924bf0a31c810e2", date: "2023-05-03 00:39:32 UTC", description: "remove editors from gitignore", pr_number: 17267, scopes: ["dev"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 0, deletions_count: 3}, + {sha: "4335b0a34a44af82bb63739e8e9b3ffc72ecf3f7", date: "2023-05-03 04:48:47 UTC", description: "Disable scheduled runs of Baseline Timings workflow", pr_number: 17281, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 4, deletions_count: 2}, + {sha: "e0a07c6dfe3ecadb8f88fcd343d302d5c142761d", date: "2023-05-03 11:45:36 UTC", description: "bump tonic-build from 0.8.4 to 0.9.2", pr_number: 17274, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 6, deletions_count: 4}, + {sha: "3d419315987671c1d3867e357d921f266c549c71", date: "2023-05-03 23:46:34 UTC", description: "bump opendal from 0.31.0 to 0.33.2", pr_number: 17286, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "d8c1f12f4a65129cad225632c9a43b13ac354a7a", date: "2023-05-03 23:46:59 UTC", description: "bump warp from 0.3.4 to 0.3.5", pr_number: 17288, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "c4784fd6e62d6cec76ced412512d909df304d005", date: "2023-05-03 23:47:24 UTC", description: "bump assert_cmd from 2.0.10 to 2.0.11", pr_number: 17290, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 11, deletions_count: 5}, + {sha: "6a5af3b862b0ffdcb509bd8a49641e41294b77b8", date: "2023-05-04 23:03:12 UTC", description: "bump anyhow from 1.0.70 to 1.0.71", pr_number: 17300, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 5, deletions_count: 5}, + {sha: "c8e0e5febbffece0a9a2fd7776767fd93a04e0db", date: "2023-05-04 23:04:24 UTC", description: "bump typetag from 0.2.7 to 0.2.8", pr_number: 17302, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 6, deletions_count: 6}, + {sha: "61e6154fd5f4712dae0b60661ff34ae586ce8ac4", date: "2023-05-04 23:05:08 UTC", description: "bump syslog from 6.0.1 to 6.1.0", pr_number: 17301, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "0ecceb3ba95312ed2a22b7f4350547d875184be9", date: "2023-05-04 23:06:11 UTC", description: "bump openssl from 0.10.50 to 0.10.52", pr_number: 17299, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 6, deletions_count: 6}, + {sha: "43173403e7f01d169a9b10a53b0e462e77c9c0f0", date: "2023-05-05 06:28:17 UTC", description: "adds 'metric_name' field to internal logs for the tag_cardinality_limit component", pr_number: 17295, scopes: ["observability"], type: "feat", breaking_change: false, author: "Pablo Pérez Schröder", files_count: 2, insertions_count: 7, deletions_count: 0}, + {sha: "bf7904b4ff9dbe354c401b816f43123ba6d48335", date: "2023-05-05 00:33:31 UTC", description: "Call function to regenerate auth token", pr_number: 17297, scopes: ["gcp_stackdriver_metrics sink"], type: "fix", breaking_change: false, author: "Spencer Gilbert", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "a1ec68d302757a7fae1082cc90c27ce8aad2c6ea", date: "2023-05-05 23:00:15 UTC", description: "bump prettydiff from 0.6.2 to 0.6.4", pr_number: 17315, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 12, deletions_count: 2}, + {sha: "79e97a2bc96f424335c62fe3519c8e1501f65bcf", date: "2023-05-05 23:01:14 UTC", description: "bump serde from 1.0.160 to 1.0.162", pr_number: 17317, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 8, insertions_count: 11, deletions_count: 11}, + {sha: "09176ec3e98febbca0ee54985248c5ecd0fdb39d", date: "2023-05-05 23:01:47 UTC", description: "bump reqwest from 0.11.16 to 0.11.17", pr_number: 17316, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 4, deletions_count: 4}, + {sha: "ef1337024677d4c6ff25cf9cb571cbada39fbe55", date: "2023-05-05 23:02:32 UTC", description: "bump flate2 from 1.0.25 to 1.0.26", pr_number: 17320, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 5, deletions_count: 5}, + {sha: "e1f125a34c91b2344174298a1f508124a0a0b4dd", date: "2023-05-06 02:00:57 UTC", description: "Increase timeout for integration tests", pr_number: 17326, scopes: ["ci"], type: "chore", breaking_change: false, author: "Spencer Gilbert", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "4911d3600a3fcce81f70fd8cb427b8389aca0bfb", date: "2023-05-06 04:05:32 UTC", description: "Upgrade `VRL` to `0.3.0`", pr_number: 17325, scopes: [], type: "chore", breaking_change: false, author: "Nathan Fox", files_count: 146, insertions_count: 514, deletions_count: 635}, + {sha: "a9c8dc88ce7c35b75ab3d1bf903aca0a6feaee53", date: "2023-05-06 03:59:16 UTC", description: "Document event type conditions", pr_number: 17311, scopes: ["docs"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 17, deletions_count: 2}, + {sha: "6afe206bd595d7933c518342a1602fa15668c0c9", date: "2023-05-09 00:26:21 UTC", description: "bump libc from 0.2.142 to 0.2.143", pr_number: 17338, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "64f4f697ecaf8c67096d6ceb5a33e42042e57cdc", date: "2023-05-09 00:27:02 UTC", description: "bump mongodb from 2.4.0 to 2.5.0", pr_number: 17337, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 46, deletions_count: 5}, + {sha: "80c82470b309901d83de03529312fc3e733d8e3e", date: "2023-05-09 00:27:34 UTC", description: "bump tokio-stream from 0.1.12 to 0.1.14", pr_number: 17339, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 4, deletions_count: 4}, + {sha: "bf8376c3030e6d6df61ca245f2d8be87443bf075", date: "2023-05-08 23:58:26 UTC", description: "Log underlying error for unhandled HTTP errors", pr_number: 17327, scopes: ["observability"], type: "enhancement", breaking_change: false, author: "Jesse Szwedko", files_count: 2, insertions_count: 4, deletions_count: 4}, + {sha: "9a723e33cc161b680140c4ef230fedf071e68031", date: "2023-05-09 04:56:27 UTC", description: "bump metrics, metrics-tracing-context, metrics-util", pr_number: 17336, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 9, insertions_count: 39, deletions_count: 56}, + {sha: "99b8dc13bcff379062ac276119e650055e08d0fc", date: "2023-05-09 23:08:13 UTC", description: "bump libc from 0.2.143 to 0.2.144", pr_number: 17346, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "f81ff1837adcf1cc4419bc936fe539e7dd882dbb", date: "2023-05-09 23:08:40 UTC", description: "bump quote from 1.0.26 to 1.0.27", pr_number: 17348, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 67, deletions_count: 67}, + {sha: "c43dcfdba4781b81f6418e96b286f37323c7fb26", date: "2023-05-09 23:42:33 UTC", description: "bump hyper from 0.14.25 to 0.14.26", pr_number: 17347, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "5d3f619ef3295180657529ad5bd44d837cb123b5", date: "2023-05-10 00:49:27 UTC", description: "Increase timeout for integration tests to 30m", pr_number: 17350, scopes: ["ci"], type: "chore", breaking_change: false, author: "Spencer Gilbert", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "ae602da29daad0c1c0081cac0bc27440d28440ad", date: "2023-05-10 23:29:01 UTC", description: "bump opendal from 0.33.2 to 0.34.0", pr_number: 17354, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 4}, + {sha: "05a4f17c555c1d2bd25acd7f3173940d98224b53", date: "2023-05-10 23:29:27 UTC", description: "bump async-graphql from 5.0.7 to 5.0.8", pr_number: 17357, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 10, deletions_count: 10}, + {sha: "ea24b4d1695e2484ad54f7e03edb6fcd1b8d0971", date: "2023-05-10 23:29:57 UTC", description: "bump wasm-bindgen from 0.2.84 to 0.2.85", pr_number: 17356, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 12, deletions_count: 12}, + {sha: "dae0c6ad6882bf0bdfa75bde439e3e0f9f4a9dea", date: "2023-05-10 23:31:44 UTC", description: "bump memmap2 from 0.5.10 to 0.6.0", pr_number: 17355, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "97b862c4db77a0192da3b505accf43dcba1c8d59", date: "2023-05-10 23:32:30 UTC", description: "bump console-subscriber from 0.1.8 to 0.1.9", pr_number: 17358, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 10, deletions_count: 42}, + {sha: "565668ea6598992ba47a039e872a18b2ffd19844", date: "2023-05-10 23:32:56 UTC", description: "bump clap_complete from 4.2.1 to 4.2.2", pr_number: 17359, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "9852c1770bd2dceecc9b30ffa72b1f95c0dfd719", date: "2023-05-11 23:45:35 UTC", description: "bump serde from 1.0.162 to 1.0.163", pr_number: 17366, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 8, insertions_count: 11, deletions_count: 11}, + {sha: "693584eb5002fc0c00586afa1c058bb8cfd0d58e", date: "2023-05-11 23:57:25 UTC", description: "bump async-graphql-warp from 5.0.7 to 5.0.8", pr_number: 17367, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "b9aac475025905943c80dd710f833e2e445c9093", date: "2023-05-12 00:07:31 UTC", description: "bump async-compression from 0.3.15 to 0.4.0", pr_number: 17365, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 18, deletions_count: 5}, + {sha: "ae6a51b52d2a0f93b3cf16638fd10a52e33294c9", date: "2023-05-12 05:35:29 UTC", description: "bump tokio from 1.28.0 to 1.28.1", pr_number: 17368, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 9, insertions_count: 12, deletions_count: 12}, + {sha: "22cda94d3b8fa555533b51f3ee6de39932b04775", date: "2023-05-12 01:40:00 UTC", description: "Update component statuses 2023Q2", pr_number: 17362, scopes: ["docs"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 42, insertions_count: 43, deletions_count: 44}, + {sha: "58ba7411967af541199042f76590e306e4c8c41f", date: "2023-05-12 03:04:12 UTC", description: "bump memmap2 from 0.6.0 to 0.6.1", pr_number: 17364, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "7350e1a11805db510814d4fc357e84d0e8d2cf25", date: "2023-05-12 22:10:31 UTC", description: "Add 3rd party license file and CI checks", pr_number: 17344, scopes: ["deps"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 9, insertions_count: 687, deletions_count: 2}, + {sha: "d1e558800a570556372949fd332097c3e138a2e8", date: "2023-05-12 23:14:02 UTC", description: "Clarify `key_field` for `sample` and `throttle` transforms", pr_number: 17372, scopes: ["docs"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 4, insertions_count: 24, deletions_count: 18}, + {sha: "a2b890352bc42e9a9a30163e26a2f181f08c4a3b", date: "2023-05-13 00:33:31 UTC", description: "Fix up missing license", pr_number: 17379, scopes: ["deps"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 1, insertions_count: 1, deletions_count: 0}, + {sha: "c6839995e28fd17aefbe440f092046e660d2fd70", date: "2023-05-16 02:06:09 UTC", description: "Add source id to metadata", pr_number: 17369, scopes: ["topology"], type: "enhancement", breaking_change: false, author: "Stephen Wakely", files_count: 12, insertions_count: 258, deletions_count: 53}, + {sha: "6c57ca07aee4402582b7b7c9c37324f49c14bf65", date: "2023-05-16 00:52:37 UTC", description: "Regen docs for sample and throttle", pr_number: 17390, scopes: [], type: "chore", breaking_change: false, author: "Spencer Gilbert", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "6b3db04f7f7ca700e7696d3430b989efc2a4b3b4", date: "2023-05-16 00:23:43 UTC", description: "Try to fix apt retries", pr_number: 17393, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "111cd07702befce55242c3940c59f05e374d52cf", date: "2023-05-16 08:02:52 UTC", description: "bump clap_complete from 4.2.2 to 4.2.3", pr_number: 17383, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "1951535eefe7e0812952d3037b40216106350e95", date: "2023-05-16 02:12:49 UTC", description: "Update internal log rate limiting messages", pr_number: 17394, scopes: ["observability"], type: "enhancement", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 49, deletions_count: 47}, + {sha: "f3734e81cb6409e496e771c0f75f18101b5e9605", date: "2023-05-16 05:29:55 UTC", description: "Fix formatting in labels example", pr_number: 17396, scopes: ["loki sink"], type: "docs", breaking_change: false, author: "Spencer Gilbert", files_count: 1, insertions_count: 4, deletions_count: 4}, + {sha: "970318839d5722a3ab40e8276a0ee6982fa798b3", date: "2023-05-16 02:36:48 UTC", description: "bump rdkafka from 0.29.0 to 0.30.0", pr_number: 17387, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 8, deletions_count: 7}, + {sha: "e8d3002d4bcb226ab79ed8b3212d1a123833c535", date: "2023-05-16 10:50:16 UTC", description: "bump pin-project from 1.0.12 to 1.1.0", pr_number: 17385, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 5, insertions_count: 9, deletions_count: 9}, + {sha: "ac51b8a35d83e5c24ac0686eb57f4f4bb347773b", date: "2023-05-16 10:55:14 UTC", description: "bump socket2 from 0.5.2 to 0.5.3", pr_number: 17384, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 6, deletions_count: 6}, + {sha: "6088abdf6b956940fee4ee827eefb9dce3e84a43", date: "2023-05-16 12:11:30 UTC", description: "bump h2 from 0.3.18 to 0.3.19", pr_number: 17388, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "12871685d3f6261ee0d50171584426aba96264ee", date: "2023-05-16 21:23:09 UTC", description: "bump security-framework from 2.8.2 to 2.9.0", pr_number: 17386, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 5, deletions_count: 5}, + {sha: "a6e1ae737e6ad17f9d3deecc6c887e41a1d86099", date: "2023-05-16 21:23:21 UTC", description: "bump proc-macro2 from 1.0.56 to 1.0.57", pr_number: 17400, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 65, deletions_count: 65}, + {sha: "3a3fe6337d940af3d2667c7775b2fa2e657648fc", date: "2023-05-16 21:24:26 UTC", description: "bump uuid from 1.3.2 to 1.3.3", pr_number: 17403, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "ae1dd6e4a67d046037154dab425e4fe6bfd11087", date: "2023-05-16 21:24:38 UTC", description: "bump tokio-tungstenite from 0.18.0 to 0.19.0", pr_number: 17404, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 38, deletions_count: 7}, + {sha: "05181765a5d2c7610adfcf6cd1e44610eb7ed79e", date: "2023-05-16 21:24:49 UTC", description: "bump wasm-bindgen from 0.2.85 to 0.2.86", pr_number: 17402, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 10, deletions_count: 10}, + {sha: "539f379911f735656eaff3aadd4f6aeeb4b681d5", date: "2023-05-17 02:24:29 UTC", description: "Add note about generating licenses to CONTRIBUTING.md", pr_number: 17410, scopes: ["dev"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 3, insertions_count: 13, deletions_count: 1}, + {sha: "5b5ad1682dc827e17610eb086d68f4f56e17138d", date: "2023-05-17 03:19:09 UTC", description: "bump inventory from 0.3.5 to 0.3.6", pr_number: 17401, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 3, deletions_count: 15}, + {sha: "dc6e54c18cc3eb7754d3865602b54ae46ec1f67a", date: "2023-05-17 03:19:50 UTC", description: "Add UX note about encoding of log_schema keys", pr_number: 17266, scopes: [], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 8, deletions_count: 0}, + {sha: "5c33f999f1e0814c4cc1857cef67415f7bba5cb7", date: "2023-05-17 03:36:29 UTC", description: "Remove ci-sweep tasks", pr_number: 17415, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 3, insertions_count: 0, deletions_count: 15}, + {sha: "da36fb6f9df3724267b30d845e092d2f7628d359", date: "2023-05-17 03:49:02 UTC", description: "Fix event assertions for `aws_ec2_metadata` transform", pr_number: 17413, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 5, deletions_count: 4}, + {sha: "5184d50f115426306a236402b9c76b0e6aa12fe6", date: "2023-05-17 03:49:57 UTC", description: "Add Enterprise link and update Support link", pr_number: 17408, scopes: ["docs"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 6, deletions_count: 1}, + {sha: "b6c7e0ae43222cd173e3d3bae7a62c3dcc985639", date: "2023-05-17 06:34:37 UTC", description: "remove transform type coercion", pr_number: 17411, scopes: [], type: "chore", breaking_change: false, author: "Luke Steensen", files_count: 7, insertions_count: 37, deletions_count: 80}, + {sha: "01b3cd7698dd9a7bf5e2fce909d6e7ef1ffa1313", date: "2023-05-17 21:20:12 UTC", description: "bump hashlink from 0.8.1 to 0.8.2", pr_number: 17419, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 4, deletions_count: 4}, + {sha: "3320eda52e5144eb8c0214481705a97edc197e81", date: "2023-05-17 21:20:28 UTC", description: "bump nkeys from 0.2.0 to 0.3.0", pr_number: 17421, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 18, deletions_count: 3}, + {sha: "57f8bd4ea2cfdf305dab9875f49e3d5c348c2529", date: "2023-05-17 21:21:06 UTC", description: "bump mlua from 0.8.8 to 0.8.9", pr_number: 17423, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 6, deletions_count: 6}, + {sha: "58603b90ad595df96b6239c42c2dd9e4dce46475", date: "2023-05-17 21:23:23 UTC", description: "bump notify from 5.1.0 to 6.0.0", pr_number: 17422, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 4, deletions_count: 4}, + {sha: "c81ad30c3f6627a70586703e4e5e8db7625aeef7", date: "2023-05-17 23:45:29 UTC", description: "Let `region` be configured for default authentication", pr_number: 17414, scopes: ["aws provider"], type: "enhancement", breaking_change: false, author: "Jesse Szwedko", files_count: 3, insertions_count: 39, deletions_count: 1}, + ] +} diff --git a/website/cue/reference/versions.cue b/website/cue/reference/versions.cue index 46d1bbf76c8ff..0e7a2168f0805 100644 --- a/website/cue/reference/versions.cue +++ b/website/cue/reference/versions.cue @@ -2,6 +2,7 @@ package metadata // This has to be maintained manually because there's currently no way to sort versions programmatically versions: [string, ...string] & [ + "0.30.0", "0.29.1", "0.29.0", "0.28.2", From 80de738b8de91c378f1ab7a58a0a02201f4402fd Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Mon, 22 May 2023 12:58:14 -0700 Subject: [PATCH 020/236] chore(website): Fix upgrade guide dates So that it publishes. Signed-off-by: Jesse Szwedko --- ...-0-upgrade-guide.md => 2023-05-22-0-30-0-upgrade-guide.md} | 2 +- website/cue/reference/releases/0.30.0.cue | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename website/content/en/highlights/{2023-05-23-0-30-0-upgrade-guide.md => 2023-05-22-0-30-0-upgrade-guide.md} (98%) diff --git a/website/content/en/highlights/2023-05-23-0-30-0-upgrade-guide.md b/website/content/en/highlights/2023-05-22-0-30-0-upgrade-guide.md similarity index 98% rename from website/content/en/highlights/2023-05-23-0-30-0-upgrade-guide.md rename to website/content/en/highlights/2023-05-22-0-30-0-upgrade-guide.md index 4f8e7f7294e34..db23399591a9c 100644 --- a/website/content/en/highlights/2023-05-23-0-30-0-upgrade-guide.md +++ b/website/content/en/highlights/2023-05-22-0-30-0-upgrade-guide.md @@ -1,5 +1,5 @@ --- -date: "2023-05-23" +date: "2023-05-22" title: "0.30 Upgrade Guide" description: "An upgrade guide that addresses breaking changes in 0.30.0" authors: ["neuronull"] diff --git a/website/cue/reference/releases/0.30.0.cue b/website/cue/reference/releases/0.30.0.cue index 0c887b1b751c3..9270f861e6291 100644 --- a/website/cue/reference/releases/0.30.0.cue +++ b/website/cue/reference/releases/0.30.0.cue @@ -11,7 +11,7 @@ releases: "0.30.0": { a refresh of the component statuses in the docs following the [stability guarantees](/docs/about/under-the-hood/guarantees/#stability-guarantees). - Be sure to check out the [upgrade guide](/highlights/2023-05-23-0-30-0-upgrade-guide) for + Be sure to check out the [upgrade guide](/highlights/2023-05-22-0-30-0-upgrade-guide) for breaking changes in this release. """ @@ -88,7 +88,7 @@ releases: "0.30.0": { description: """ The deprecated `max_length` option of the `socket` source was removed. Please see the [upgrade - guide](/highlights/2023-05-23-0-30-0-upgrade-guide#socket-source-max-length) for + guide](/highlights/2023-05-22-0-30-0-upgrade-guide#socket-source-max-length) for more details. """ breaking: true From 85703e792fe0ff70a466380823cf2d4b14b21603 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Mon, 22 May 2023 11:07:21 -0700 Subject: [PATCH 021/236] chore(deps): Bump PR limit for Dependabot to 100 (#17459) We seem to be bumping up against this causing dependabot to space out updates. I don't see a great reason to limit it, but could be missing something since it does have a fairly low default limit of 5. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- .github/dependabot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0097bd40aa4f0..102567ca0e6a5 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,7 +9,7 @@ updates: - "domain: deps" commit-message: prefix: "chore(deps)" - open-pull-requests-limit: 10 + open-pull-requests-limit: 100 - package-ecosystem: "github-actions" directory: "/" schedule: From 299fd6ab53b1e818d09ae38f4321c20bdce4f30e Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Mon, 22 May 2023 11:22:01 -0700 Subject: [PATCH 022/236] chore(deps): Update fs_extra to 1.3.0 (#17458) Fixes a compile-time warning about code being incompatible with future versions of Rust. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 51a6e03626ef3..f0342e808e426 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3291,9 +3291,9 @@ dependencies = [ [[package]] name = "fs_extra" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" [[package]] name = "fsevent-sys" From 1f54415cb3fd4dc8f3f1b5989aa8d051cbe1faa5 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Mon, 22 May 2023 11:47:25 -0700 Subject: [PATCH 023/236] chore(deps): Bump lalrpop to 0.19.12 (#17457) Fixes compile-time warning about lalrpop using code that won't be compatible with newer versions of Rust. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- Cargo.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f0342e808e426..c07068f301b5d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4559,15 +4559,15 @@ dependencies = [ [[package]] name = "lalrpop" -version = "0.19.8" +version = "0.19.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30455341b0e18f276fa64540aff54deafb54c589de6aca68659c63dd2d5d823" +checksum = "0a1cbf952127589f2851ab2046af368fd20645491bb4b376f04b7f94d7a9837b" dependencies = [ "ascii-canvas", - "atty", "bit-set", "diff", "ena", + "is-terminal", "itertools", "lalrpop-util", "petgraph", @@ -4581,9 +4581,9 @@ dependencies = [ [[package]] name = "lalrpop-util" -version = "0.19.9" +version = "0.19.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c1f7869c94d214466c5fd432dfed12c379fd87786768d36455892d46b18edd" +checksum = "d3c48237b9604c5a4702de6b824e02006c3214327564636aef27c1028a8fa0ed" [[package]] name = "lapin" From 547783d17e8d2d3d351213a034e8d38fdcaa3047 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Mon, 22 May 2023 12:11:46 -0700 Subject: [PATCH 024/236] chore(docs): Clarify when component received and sent bytes events should be emitted (#17464) Also refactors the event lists so that they each have an `Emission` line. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- docs/specs/component.md | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/docs/specs/component.md b/docs/specs/component.md index eaa819c3f16b4..8a8856995dea5 100644 --- a/docs/specs/component.md +++ b/docs/specs/component.md @@ -114,7 +114,8 @@ _All components_ MUST emit a `ComponentEventsReceived` event that represents the reception of Vector events from an upstream component. - Emission - - MUST emit immediately after creating or receiving Vector events. + - MUST emit immediately after creating or receiving Vector events, before modification or metadata + is added. - Properties - `count` - The count of Vector events. - `byte_size` - The estimated JSON byte size of all events received. @@ -130,9 +131,11 @@ the reception of Vector events from an upstream component. #### ComponentBytesReceived -*Sources* MUST emit a `ComponentBytesReceived` event immediately after receiving, decompressing -and filtering bytes from the upstream source and before the creation of a Vector event. +*Sources* MUST emit a `ComponentBytesReceived` event that represent the reception of bytes. +- Emission + - MUST emit immediately after receiving, decompressing and filtering bytes from the upstream + source and before the creation of a Vector event. - Properties - `byte_size` - For UDP, TCP, and Unix protocols, the total number of bytes received from @@ -155,13 +158,13 @@ and filtering bytes from the upstream source and before the creation of a Vector #### ComponentBytesSent -*Sinks* that send events downstream, MUST emit a `ComponentBytesSent` event immediately after -sending bytes to the downstream target, if the transmission was successful. The reported bytes MUST -be before compression. - -Note that for sinks that simply expose data, but don't delete the data after -sending it, like the `prometheus_exporter` sink, SHOULD NOT publish this metric. +*Sinks* MUST emit a `ComponentBytesReceived` event that represent the transmission of bytes. +- Emission + - MUST emit a `ComponentBytesSent` event immediately after sending bytes to the downstream target, + if the transmission was successful. The reported bytes MUST be before compression. + - Note that sinks that simply expose data, but don't delete the data after sending it, like the + `prometheus_exporter` sink, SHOULD NOT emit this metric. - Properties - `byte_size` - For UDP, TCP, and Unix protocols, the total number of bytes placed on the From 78bbfbc0205d97b401b5ba3084fe71e2bfdd7f33 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Mon, 22 May 2023 13:49:14 -0700 Subject: [PATCH 025/236] chore: Bump version to 0.31.0 (#17466) Now that 0.30.0 has been released. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c07068f301b5d..4585a636687dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9194,7 +9194,7 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vector" -version = "0.30.0" +version = "0.31.0" dependencies = [ "apache-avro", "approx", diff --git a/Cargo.toml b/Cargo.toml index 6816650a3458f..efa84159fbc1c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "vector" -version = "0.30.0" +version = "0.31.0" authors = ["Vector Contributors "] edition = "2021" description = "A lightweight and ultra-fast tool for building observability pipelines" From 36998428099da9b3ce4bcf0fd6f8787be1920363 Mon Sep 17 00:00:00 2001 From: neuronull Date: Mon, 22 May 2023 15:43:33 -0600 Subject: [PATCH 026/236] chore(ci): fix failure notify job conditional in publish workflow (#17468) The syntax of the conditional for the publish workflow's failure notification job had a bug where an explicit expression was defined and was paired with an implicit expression. This resulted in the job running when it shouldn't. Removed the explicit expression syntax from the other job conditionals to avoid potentially doing that in the future. --- .github/workflows/publish.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 3f22f5cb5abc3..30d03d9b00b80 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -548,7 +548,7 @@ jobs: publish-github: name: Publish to GitHub # We only publish to GitHub for versioned releases, not nightlies. - if: ${{ inputs.channel == 'release' }} + if: inputs.channel == 'release' runs-on: ubuntu-20.04 needs: - generate-publish-metadata @@ -618,7 +618,7 @@ jobs: publish-homebrew: name: Publish to Homebrew # We only publish to Homebrew for versioned releases, not nightlies. - if: ${{ inputs.channel == 'release' }} + if: inputs.channel == 'release' runs-on: ubuntu-20.04 needs: - generate-publish-metadata @@ -638,7 +638,7 @@ jobs: publish-cloudsmith: name: Publish to Cloudsmith # We only publish to CloudSmith for versioned releases, not nightlies. - if: ${{ inputs.channel == 'release' }} + if: inputs.channel == 'release' runs-on: ubuntu-20.04 needs: - generate-publish-metadata @@ -751,7 +751,7 @@ jobs: publish-failure: name: Send Publish Failure Notification - if: ${{ inputs.channel != 'custom' }} && failure() + if: failure() && inputs.channel != 'custom' runs-on: ubuntu-20.04 needs: - generate-publish-metadata From f54787190119255c1f97b2fe603ea5e65355b1cd Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Mon, 22 May 2023 15:09:59 -0700 Subject: [PATCH 027/236] chore(kubernetes): Bump k8s manifests to 0.22.0 (#17467) Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- distribution/kubernetes/vector-agent/README.md | 2 +- distribution/kubernetes/vector-agent/configmap.yaml | 2 +- distribution/kubernetes/vector-agent/daemonset.yaml | 4 ++-- distribution/kubernetes/vector-agent/rbac.yaml | 4 ++-- distribution/kubernetes/vector-agent/service-headless.yaml | 2 +- distribution/kubernetes/vector-agent/serviceaccount.yaml | 2 +- distribution/kubernetes/vector-aggregator/README.md | 2 +- distribution/kubernetes/vector-aggregator/configmap.yaml | 2 +- .../kubernetes/vector-aggregator/service-headless.yaml | 2 +- distribution/kubernetes/vector-aggregator/service.yaml | 2 +- distribution/kubernetes/vector-aggregator/serviceaccount.yaml | 2 +- distribution/kubernetes/vector-aggregator/statefulset.yaml | 4 ++-- distribution/kubernetes/vector-stateless-aggregator/README.md | 2 +- .../kubernetes/vector-stateless-aggregator/configmap.yaml | 2 +- .../kubernetes/vector-stateless-aggregator/deployment.yaml | 4 ++-- .../vector-stateless-aggregator/service-headless.yaml | 2 +- .../kubernetes/vector-stateless-aggregator/service.yaml | 2 +- .../vector-stateless-aggregator/serviceaccount.yaml | 2 +- 18 files changed, 22 insertions(+), 22 deletions(-) diff --git a/distribution/kubernetes/vector-agent/README.md b/distribution/kubernetes/vector-agent/README.md index 4c23f60fef5e4..58bc8ff221193 100644 --- a/distribution/kubernetes/vector-agent/README.md +++ b/distribution/kubernetes/vector-agent/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.21.1 with the following `values.yaml`: +version 0.22.0 with the following `values.yaml`: ```yaml role: Agent diff --git a/distribution/kubernetes/vector-agent/configmap.yaml b/distribution/kubernetes/vector-agent/configmap.yaml index 444a9830b7cf9..d7c928b159f7c 100644 --- a/distribution/kubernetes/vector-agent/configmap.yaml +++ b/distribution/kubernetes/vector-agent/configmap.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" data: agent.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-agent/daemonset.yaml b/distribution/kubernetes/vector-agent/daemonset.yaml index 7c41dffdd16fc..8508420956313 100644 --- a/distribution/kubernetes/vector-agent/daemonset.yaml +++ b/distribution/kubernetes/vector-agent/daemonset.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" annotations: {} spec: selector: @@ -29,7 +29,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.29.1-distroless-libc" + image: "timberio/vector:0.30.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-agent/rbac.yaml b/distribution/kubernetes/vector-agent/rbac.yaml index b02da7d71b441..2ad572473e7d7 100644 --- a/distribution/kubernetes/vector-agent/rbac.yaml +++ b/distribution/kubernetes/vector-agent/rbac.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" rules: - apiGroups: - "" @@ -31,7 +31,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/distribution/kubernetes/vector-agent/service-headless.yaml b/distribution/kubernetes/vector-agent/service-headless.yaml index aa06b662acef9..18ea854b8f9e7 100644 --- a/distribution/kubernetes/vector-agent/service-headless.yaml +++ b/distribution/kubernetes/vector-agent/service-headless.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-agent/serviceaccount.yaml b/distribution/kubernetes/vector-agent/serviceaccount.yaml index f64c260c94d3a..18d7093b583f8 100644 --- a/distribution/kubernetes/vector-agent/serviceaccount.yaml +++ b/distribution/kubernetes/vector-agent/serviceaccount.yaml @@ -8,5 +8,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Agent - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" automountServiceAccountToken: true diff --git a/distribution/kubernetes/vector-aggregator/README.md b/distribution/kubernetes/vector-aggregator/README.md index 5f1fdd0160509..194d80cc3ebb1 100644 --- a/distribution/kubernetes/vector-aggregator/README.md +++ b/distribution/kubernetes/vector-aggregator/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.21.1 with the following `values.yaml`: +version 0.22.0 with the following `values.yaml`: ```yaml diff --git a/distribution/kubernetes/vector-aggregator/configmap.yaml b/distribution/kubernetes/vector-aggregator/configmap.yaml index 0fae6e2357dd9..028a2f273a075 100644 --- a/distribution/kubernetes/vector-aggregator/configmap.yaml +++ b/distribution/kubernetes/vector-aggregator/configmap.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" data: aggregator.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-aggregator/service-headless.yaml b/distribution/kubernetes/vector-aggregator/service-headless.yaml index fb06b330de493..06cad3551b635 100644 --- a/distribution/kubernetes/vector-aggregator/service-headless.yaml +++ b/distribution/kubernetes/vector-aggregator/service-headless.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-aggregator/service.yaml b/distribution/kubernetes/vector-aggregator/service.yaml index 897c1e1d59c24..449a24950bc1e 100644 --- a/distribution/kubernetes/vector-aggregator/service.yaml +++ b/distribution/kubernetes/vector-aggregator/service.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" annotations: spec: ports: diff --git a/distribution/kubernetes/vector-aggregator/serviceaccount.yaml b/distribution/kubernetes/vector-aggregator/serviceaccount.yaml index 0a45553520cf3..0bf2da2d58d3b 100644 --- a/distribution/kubernetes/vector-aggregator/serviceaccount.yaml +++ b/distribution/kubernetes/vector-aggregator/serviceaccount.yaml @@ -8,5 +8,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" automountServiceAccountToken: true diff --git a/distribution/kubernetes/vector-aggregator/statefulset.yaml b/distribution/kubernetes/vector-aggregator/statefulset.yaml index b1fceaa085f17..2eef56ffd5ad9 100644 --- a/distribution/kubernetes/vector-aggregator/statefulset.yaml +++ b/distribution/kubernetes/vector-aggregator/statefulset.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Aggregator - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" annotations: {} spec: replicas: 1 @@ -32,7 +32,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.29.1-distroless-libc" + image: "timberio/vector:0.30.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/README.md b/distribution/kubernetes/vector-stateless-aggregator/README.md index a921045ffdb63..2703746f5d435 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/README.md +++ b/distribution/kubernetes/vector-stateless-aggregator/README.md @@ -1,6 +1,6 @@ The kubernetes manifests found in this directory have been automatically generated from the [helm chart `vector/vector`](https://github.com/vectordotdev/helm-charts/tree/master/charts/vector) -version 0.21.1 with the following `values.yaml`: +version 0.22.0 with the following `values.yaml`: ```yaml role: Stateless-Aggregator diff --git a/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml b/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml index 2d5a0971e3d77..766c693669f7b 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/configmap.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" data: aggregator.yaml: | data_dir: /vector-data-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml b/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml index ecd7b3bfd61e7..6ff20dc958816 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/deployment.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" annotations: {} spec: replicas: 1 @@ -30,7 +30,7 @@ spec: dnsPolicy: ClusterFirst containers: - name: vector - image: "timberio/vector:0.29.1-distroless-libc" + image: "timberio/vector:0.30.0-distroless-libc" imagePullPolicy: IfNotPresent args: - --config-dir diff --git a/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml b/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml index 3a9b4da3bc67e..3230af57fbd76 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/service-headless.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" annotations: spec: clusterIP: None diff --git a/distribution/kubernetes/vector-stateless-aggregator/service.yaml b/distribution/kubernetes/vector-stateless-aggregator/service.yaml index 7096ca16a604a..a22d86ff0925f 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/service.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/service.yaml @@ -8,7 +8,7 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" annotations: spec: ports: diff --git a/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml b/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml index e41ed49a7442e..50bba163cf9f5 100644 --- a/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml +++ b/distribution/kubernetes/vector-stateless-aggregator/serviceaccount.yaml @@ -8,5 +8,5 @@ metadata: app.kubernetes.io/name: vector app.kubernetes.io/instance: vector app.kubernetes.io/component: Stateless-Aggregator - app.kubernetes.io/version: "0.29.1-distroless-libc" + app.kubernetes.io/version: "0.30.0-distroless-libc" automountServiceAccountToken: true From 897e45d5aa3d9ede6aa9115dae41a90b5a200ffa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 09:06:22 -0400 Subject: [PATCH 028/236] chore(deps): bump regex from 1.8.1 to 1.8.2 (#17469) Bumps [regex](https://github.com/rust-lang/regex) from 1.8.1 to 1.8.2.
Changelog

Sourced from regex's changelog.

1.8.2 (2023-05-22)

This is a patch release that fixes a bug where regex compilation could panic in debug mode for regexes with large counted repetitions. For example, a{2147483516}{2147483416}{5} resulted in an integer overflow that wrapped in release mode but panicking in debug mode. Despite the unintended wrapping arithmetic in release mode, it didn't cause any other logical bugs since the errant code was for new analysis that wasn't used yet.

Bug fixes:

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=regex&package-manager=cargo&previous-version=1.8.1&new-version=1.8.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 10 +++++----- Cargo.toml | 2 +- lib/codecs/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- vdev/Cargo.toml | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4585a636687dc..c71faae1f3df1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6786,13 +6786,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" +checksum = "d1a59b5d8e97dee33696bf13c5ba8ab85341c002922fba050069326b9c498974" dependencies = [ "aho-corasick 1.0.1", "memchr", - "regex-syntax 0.7.1", + "regex-syntax 0.7.2", ] [[package]] @@ -6812,9 +6812,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" +checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" [[package]] name = "rend" diff --git a/Cargo.toml b/Cargo.toml index efa84159fbc1c..03bb274d1f2d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,7 +293,7 @@ rand = { version = "0.8.5", default-features = false, features = ["small_rng"] } rand_distr = { version = "0.4.3", default-features = false } rdkafka = { version = "0.31.0", default-features = false, features = ["tokio", "libz", "ssl", "zstd"], optional = true } redis = { version = "0.23.0", default-features = false, features = ["connection-manager", "tokio-comp", "tokio-native-tls-comp"], optional = true } -regex = { version = "1.8.1", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.2", default-features = false, features = ["std", "perf"] } roaring = { version = "0.10.1", default-features = false, optional = true } seahash = { version = "4.1.0", default-features = false } semver = { version = "1.0.17", default-features = false, features = ["serde", "std"], optional = true } diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 9fda0b8cddc29..96d348a9a1357 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -17,7 +17,7 @@ memchr = { version = "2", default-features = false } once_cell = { version = "1.17", default-features = false } ordered-float = { version = "3.7.0", default-features = false } prost = { version = "0.11.8", default-features = false, features = ["std"] } -regex = { version = "1.8.1", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.2", default-features = false, features = ["std", "perf"] } serde = { version = "1", default-features = false, features = ["derive"] } serde_json = { version = "1", default-features = false } smallvec = { version = "1", default-features = false, features = ["union"] } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 097b5b49daf4d..ed61db90f8772 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -38,7 +38,7 @@ proptest = { version = "1.1", optional = true } prost-types = { version = "0.11", default-features = false } prost = { version = "0.11", default-features = false, features = ["std"] } quanta = { version = "0.11.0", default-features = false } -regex = { version = "1.8.1", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.2", default-features = false, features = ["std", "perf"] } ryu = { version = "1", default-features = false } serde = { version = "1.0.163", default-features = false, features = ["derive", "rc"] } serde_json = { version = "1.0.96", default-features = false } diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index c8b9b4f39ccf2..01952ef5adf9a 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -30,7 +30,7 @@ os_info = { version = "3.7.0", default-features = false } # watch https://github.com/epage/anstyle for official interop with Clap owo-colors = { version = "3.5.0", features = ["supports-colors"] } paste = "1.0.12" -regex = { version = "1.8.1", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.2", default-features = false, features = ["std", "perf"] } reqwest = { version = "0.11", features = ["json", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.96" From 9aaf864254bb05a92504533cd3d072341dbcb7e9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 13:13:09 +0000 Subject: [PATCH 029/236] chore(deps): bump data-encoding from 2.3.3 to 2.4.0 (#17452) Bumps [data-encoding](https://github.com/ia0/data-encoding) from 2.3.3 to 2.4.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=data-encoding&package-manager=cargo&previous-version=2.3.3&new-version=2.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- lib/dnsmsg-parser/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c71faae1f3df1..37de2c1dfea63 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2593,9 +2593,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "data-url" diff --git a/lib/dnsmsg-parser/Cargo.toml b/lib/dnsmsg-parser/Cargo.toml index eb1d8a270df35..bf62684808c3a 100644 --- a/lib/dnsmsg-parser/Cargo.toml +++ b/lib/dnsmsg-parser/Cargo.toml @@ -7,7 +7,7 @@ publish = false license = "MIT" [dependencies] -data-encoding = "2.3" +data-encoding = "2.4" thiserror = "1.0" trust-dns-proto = { version = "0.22", features = ["dnssec"] } From bca45eb32bff27429a6beb3cf1d7b241d6de8c70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 13:14:31 +0000 Subject: [PATCH 030/236] chore(ci): bump myrotvorets/set-commit-status-action from 1.1.6 to 1.1.7 (#17460) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [myrotvorets/set-commit-status-action](https://github.com/myrotvorets/set-commit-status-action) from 1.1.6 to 1.1.7.
Release notes

Sourced from myrotvorets/set-commit-status-action's releases.

1.1.7

What's Changed

... (truncated)

Commits
  • 243b4f7 1.1.7
  • ef26f55 Rebuild dist
  • 06662a1 Merge pull request #377 from myrotvorets/renovate/lock-file-maintenance
  • 1e272d7 chore(deps): lock file maintenance
  • a3117e7 Merge pull request #367 from myrotvorets/renovate/lock-file-maintenance
  • 6f390ad Update the code
  • 0fcf319 chore(deps): lock file maintenance
  • cd9e592 chore(deps): update github/codeql-action digest to 29b1f65
  • 1e0d009 chore(deps): update github/codeql-action digest to f3feb00
  • 1402c91 chore(deps): update github/codeql-action digest to 8662eab
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=myrotvorets/set-commit-status-action&package-manager=github_actions&previous-version=1.1.6&new-version=1.1.7)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cli.yml | 4 ++-- .github/workflows/component_features.yml | 4 ++-- .github/workflows/cross.yml | 6 +++--- .github/workflows/environment.yml | 4 ++-- .github/workflows/install-sh.yml | 6 +++--- .github/workflows/integration-comment.yml | 4 ++-- .github/workflows/k8s_e2e.yml | 8 ++++---- .github/workflows/misc.yml | 4 ++-- .github/workflows/regression.yml | 6 +++--- .github/workflows/unit_mac.yml | 4 ++-- .github/workflows/unit_windows.yml | 4 ++-- 11 files changed, 27 insertions(+), 27 deletions(-) diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 8ec1cf0ba8cc5..168e977096bd1 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -16,7 +16,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -54,7 +54,7 @@ jobs: if: always() - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/component_features.yml b/.github/workflows/component_features.yml index 747483c1316cd..f08436be1dc42 100644 --- a/.github/workflows/component_features.yml +++ b/.github/workflows/component_features.yml @@ -13,7 +13,7 @@ jobs: id: comment-branch - name: (PR comment) Set latest commit status as pending - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: ${{ github.event_name == 'issue_comment' }} with: sha: ${{ steps.comment-branch.outputs.head_sha }} @@ -37,7 +37,7 @@ jobs: - run: make check-component-features - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index e5fcdceb84ffb..c405fb0c4e84e 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -27,7 +27,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -68,7 +68,7 @@ jobs: path: "./target/${{ matrix.target }}/debug/vector" - name: (PR comment) Set latest commit status as failed - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: failure() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} @@ -94,7 +94,7 @@ jobs: id: comment-branch - name: (PR comment) Submit PR result as success - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index e8c64e4fca5ab..bd03c55679b19 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -22,7 +22,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -72,7 +72,7 @@ jobs: labels: ${{ steps.meta.outputs.labels }} - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/install-sh.yml b/.github/workflows/install-sh.yml index c19ea3e9310a8..950d6d4ab0f0a 100644 --- a/.github/workflows/install-sh.yml +++ b/.github/workflows/install-sh.yml @@ -16,7 +16,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -40,7 +40,7 @@ jobs: run: make sync-install - name: (PR comment) Set latest commit status as failed - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: failure() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} @@ -63,7 +63,7 @@ jobs: - name: (PR comment) Set latest commit status as ${{ job.status }} if: github.event_name == 'issue_comment' - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/integration-comment.yml b/.github/workflows/integration-comment.yml index 6481699d569f8..7deb40f88f465 100644 --- a/.github/workflows/integration-comment.yml +++ b/.github/workflows/integration-comment.yml @@ -63,7 +63,7 @@ jobs: id: comment-branch - name: (PR comment) Set latest commit status as pending - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -168,7 +168,7 @@ jobs: id: comment-branch - name: (PR comment) Submit PR result as ${{ needs.test-integration.result }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index b37e03ba12088..9ce639ca30027 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -77,7 +77,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -111,7 +111,7 @@ jobs: path: target/artifacts/* - name: (PR comment) Set latest commit status as 'failure' - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: failure() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} @@ -229,7 +229,7 @@ jobs: CARGO_INCREMENTAL: 0 - name: (PR comment) Set latest commit status as failure - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: failure() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} @@ -261,7 +261,7 @@ jobs: - name: (PR comment) Submit PR result as success if: success() && github.event_name == 'issue_comment' - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index f66c577ec7c54..480af128661cf 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -16,7 +16,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -53,7 +53,7 @@ jobs: - run: make test-docs - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 40764c5586577..352b7c40ef0c1 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -235,7 +235,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.pr-metadata-comment.outputs.COMPARISON_SHA }} token: ${{ secrets.GITHUB_TOKEN }} @@ -764,7 +764,7 @@ jobs: - name: (PR comment) Submit PR result as failed if: github.event_name == 'issue_comment' && env.FAILED == 'true' - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.compute-metadata.outputs.comparison-sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -773,7 +773,7 @@ jobs: - name: (PR comment) Submit PR result as success if: github.event_name == 'issue_comment' && env.FAILED != 'true' - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/unit_mac.yml b/.github/workflows/unit_mac.yml index d922f05da370b..bec8fe47daec7 100644 --- a/.github/workflows/unit_mac.yml +++ b/.github/workflows/unit_mac.yml @@ -20,7 +20,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -56,7 +56,7 @@ jobs: - run: make test-behavior - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} diff --git a/.github/workflows/unit_windows.yml b/.github/workflows/unit_windows.yml index 1971670c5dbe6..99073dec2dfac 100644 --- a/.github/workflows/unit_windows.yml +++ b/.github/workflows/unit_windows.yml @@ -23,7 +23,7 @@ jobs: - name: (PR comment) Set latest commit status as pending if: ${{ github.event_name == 'issue_comment' }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -44,7 +44,7 @@ jobs: - run: make test - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@1.1.6 + uses: myrotvorets/set-commit-status-action@v1.1.7 if: always() && github.event_name == 'issue_comment' with: sha: ${{ steps.comment-branch.outputs.head_sha }} From c425006f299c7a5f91509f7bdb18963f4da0748f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 13:15:58 +0000 Subject: [PATCH 031/236] chore(ci): bump xt0rted/pull-request-comment-branch from 1 to 2 (#17461) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [xt0rted/pull-request-comment-branch](https://github.com/xt0rted/pull-request-comment-branch) from 1 to 2.
Release notes

Sourced from xt0rted/pull-request-comment-branch's releases.

v2.0.0

  • Updated node runtime from 12 to 16
  • Removed deprecated ref and sha outputs. If you're using these then you should switch to head_ref and head_sha respectively.

v1.4.0

  • Bumped @actions/core from 1.2.7 to 1.10.0
  • Bumped @actions/github from 4.0.0 to 5.1.1
  • Bumped node-fetch from 2.6.1 to 2.6.7

v1.3.0

  • Bumped @actions/core from 1.2.5 to 1.2.7
  • Updated the repo_token input so it defaults to GITHUB_TOKEN. If you're already using this value you can remove this setting from your workflow.

v1.2.0

  • Deprecated ref and sha outputs in favor of head_ref and head_sha.
  • Added base_ref and base_sha outputs
  • Bumped @actions/core from 1.2.2 to 1.2.5
  • Bumped @actions/github from 2.1.1 to 4.0.0

v1.1.0

  • Bumped @actions/github to 2.1.1
Changelog

Sourced from xt0rted/pull-request-comment-branch's changelog.

2.0.0 - 2023-03-29

  • Updated node runtime from 12 to 16
  • Removed deprecated ref and sha outputs. If you're using these then you should switch to head_ref and head_sha respectively.
Commits
  • d97294d Release v2.0.0
  • a87e7d3 v2.0.0
  • 680f5a7 Rename fixup-commits.yml to fixup-commits.yml
  • 3f1f025 Merge pull request #347 from xt0rted/dependabot/npm_and_yarn/types/node-18.15.11
  • afc149b Bump @​types/node from 18.15.10 to 18.15.11
  • 43a3063 Merge pull request #310 from xt0rted/remove-deprecated-outputs
  • 3733bbb Remove deprecated outputs
  • 172a677 Merge pull request #342 from xt0rted/dependabot/npm_and_yarn/typescript-5.0.2
  • 71d4d22 Merge pull request #335 from kishaningithub/patch-1
  • 3dac4bb Bump typescript from 4.9.5 to 5.0.2
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=xt0rted/pull-request-comment-branch&package-manager=github_actions&previous-version=1&new-version=2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cli.yml | 2 +- .github/workflows/component_features.yml | 2 +- .github/workflows/cross.yml | 4 ++-- .github/workflows/environment.yml | 2 +- .github/workflows/install-sh.yml | 4 ++-- .github/workflows/integration-comment.yml | 4 ++-- .github/workflows/integration-test.yml | 2 +- .github/workflows/k8s_e2e.yml | 6 +++--- .github/workflows/misc.yml | 2 +- .github/workflows/regression.yml | 4 ++-- .github/workflows/unit_mac.yml | 2 +- .github/workflows/unit_windows.yml | 2 +- 12 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 168e977096bd1..a2ba175e0e4cd 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -11,7 +11,7 @@ jobs: steps: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending diff --git a/.github/workflows/component_features.yml b/.github/workflows/component_features.yml index f08436be1dc42..2705483f598c4 100644 --- a/.github/workflows/component_features.yml +++ b/.github/workflows/component_features.yml @@ -9,7 +9,7 @@ jobs: steps: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index c405fb0c4e84e..1bf7ef188b577 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -22,7 +22,7 @@ jobs: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending @@ -90,7 +90,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} - name: (PR comment) Get PR branch - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Submit PR result as success diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index bd03c55679b19..f4d18c2263b11 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -17,7 +17,7 @@ jobs: steps: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending diff --git a/.github/workflows/install-sh.yml b/.github/workflows/install-sh.yml index 950d6d4ab0f0a..045319a191642 100644 --- a/.github/workflows/install-sh.yml +++ b/.github/workflows/install-sh.yml @@ -11,7 +11,7 @@ jobs: steps: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending @@ -58,7 +58,7 @@ jobs: - name: (PR comment) Get PR branch if: github.event_name == 'issue_comment' - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as ${{ job.status }} diff --git a/.github/workflows/integration-comment.yml b/.github/workflows/integration-comment.yml index 7deb40f88f465..ce72d06959317 100644 --- a/.github/workflows/integration-comment.yml +++ b/.github/workflows/integration-comment.yml @@ -59,7 +59,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} - name: (PR comment) Get PR branch - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending @@ -164,7 +164,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} - name: (PR comment) Get PR branch - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Submit PR result as ${{ needs.test-integration.result }} diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index f8e26564855e1..36c563732c417 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -46,7 +46,7 @@ jobs: steps: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Checkout PR branch diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index 9ce639ca30027..06c5ace79cddd 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -72,7 +72,7 @@ jobs: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending @@ -197,7 +197,7 @@ jobs: steps: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Checkout PR branch @@ -256,7 +256,7 @@ jobs: - name: (PR comment) Get PR branch if: success() && github.event_name == 'issue_comment' - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Submit PR result as success diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 480af128661cf..ce40a006da65f 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -11,7 +11,7 @@ jobs: steps: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 352b7c40ef0c1..044f7e851a04e 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -230,7 +230,7 @@ jobs: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending @@ -759,7 +759,7 @@ jobs: steps: - name: (PR comment) Get PR branch if: github.event_name == 'issue_comment' - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Submit PR result as failed diff --git a/.github/workflows/unit_mac.yml b/.github/workflows/unit_mac.yml index bec8fe47daec7..abda6ae1e177f 100644 --- a/.github/workflows/unit_mac.yml +++ b/.github/workflows/unit_mac.yml @@ -15,7 +15,7 @@ jobs: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending diff --git a/.github/workflows/unit_windows.yml b/.github/workflows/unit_windows.yml index 99073dec2dfac..87fb3f14c5e3e 100644 --- a/.github/workflows/unit_windows.yml +++ b/.github/workflows/unit_windows.yml @@ -18,7 +18,7 @@ jobs: - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} - uses: xt0rted/pull-request-comment-branch@v1 + uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending From 9f6f6ecde0db3ffdd7b904647f490511433836b5 Mon Sep 17 00:00:00 2001 From: neuronull Date: Tue, 23 May 2023 12:08:11 -0600 Subject: [PATCH 032/236] chore(ci): minor fixes to workflows post merge queue enabling (#17462) - fix files changed detection logic for aws integration tests - fix job run logic in integration tests workflow - restrict spell check workflow to only run on pull requests (not a push to any branch) --- .github/workflows/changes.yml | 17 ++++++++++++----- .github/workflows/integration.yml | 2 -- .github/workflows/regression.yml | 2 ++ .github/workflows/spelling.yml | 5 ----- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index 449f4210b624a..a4bae7367aea3 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -211,13 +211,20 @@ jobs: - "src/sinks/appsignal/**" - "src/sinks/util/**" aws: - - "src/aws_**" - - "src/internal_events/aws_**" - - "src/sources/aws_**" + - "src/aws/**" + - "src/internal_events/aws*" + - "src/sources/aws_ecs_metrics/**" + - "src/sources/aws_kinesis_firehose/**" + - "src/sources/aws_s3/**" + - "src/sources/aws_sqs/**" - "src/sources/util/**" - - "src/sinks/aws_**" + - "src/sinks/aws_cloudwatch_logs/**" + - "src/sinks/aws_cloudwatch_metrics/**" + - "src/sinks/aws_kinesis/**" + - "src/sinks/aws_s3/**" + - "src/sinks/aws_sqs/**" - "src/sinks/util/**" - - "src/transforms/aws_**" + - "src/transforms/aws*" axiom: - "src/sinks/axiom.rs" - "src/sinks/util/**" diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 1dc2ef8140160..d8ebebfc7f50e 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -34,7 +34,6 @@ env: jobs: changes: - if: github.event_name == 'pull_request' uses: ./.github/workflows/changes.yml with: base_ref: ${{ github.event.pull_request.base.ref }} @@ -43,7 +42,6 @@ jobs: # Calls the Integration Test workflow for each integration that was detected to have files changed that impact it. integration-matrix: - if: always() uses: ./.github/workflows/integration-test.yml with: if: ${{ matrix.run.if }} diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 044f7e851a04e..681a6757cfafa 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -61,6 +61,8 @@ jobs: all_changed: - added|deleted|modified: "**" ignore: + - "./.github/**" + - "./.gitignore" - "distribution/**" - "rust-doc/**" - "docs/**" diff --git a/.github/workflows/spelling.yml b/.github/workflows/spelling.yml index 9d17e26393e5a..6bc9e822ba966 100644 --- a/.github/workflows/spelling.yml +++ b/.github/workflows/spelling.yml @@ -57,11 +57,6 @@ name: Check Spelling # ... otherwise adjust the `with:` as you wish on: - push: - branches: - - "**" - tags-ignore: - - "**" pull_request_target: branches: - "**" From c1262cd162e04550b69913877d6b97037aceaea4 Mon Sep 17 00:00:00 2001 From: Ari Date: Tue, 23 May 2023 14:18:45 -0700 Subject: [PATCH 033/236] chore(aws_s3 sink): Update metadata to match the editorial review for the schema. (#17475) --- lib/codecs/src/encoding/format/avro.rs | 1 + lib/vector-config-common/src/human_friendly.rs | 2 +- lib/vector-core/src/tls/settings.rs | 4 ++++ src/aws/auth.rs | 2 ++ src/sinks/aws_s3/config.rs | 1 + 5 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/codecs/src/encoding/format/avro.rs b/lib/codecs/src/encoding/format/avro.rs index 35bf45890043d..3bfeea5ea1dad 100644 --- a/lib/codecs/src/encoding/format/avro.rs +++ b/lib/codecs/src/encoding/format/avro.rs @@ -47,6 +47,7 @@ pub struct AvroSerializerOptions { #[configurable(metadata( docs::examples = r#"{ "type": "record", "name": "log", "fields": [{ "name": "message", "type": "string" }] }"# ))] + #[configurable(metadata(docs::human_name = "Schema JSON"))] pub schema: String, } diff --git a/lib/vector-config-common/src/human_friendly.rs b/lib/vector-config-common/src/human_friendly.rs index 29c9a290efb93..177e32f428ad7 100644 --- a/lib/vector-config-common/src/human_friendly.rs +++ b/lib/vector-config-common/src/human_friendly.rs @@ -48,7 +48,7 @@ static WELL_KNOWN_ACRONYMS: Lazy> = Lazy::new(|| { "api", "amqp", "aws", "ec2", "ecs", "gcp", "hec", "http", "https", "nats", "nginx", "s3", "sqs", "tls", "ssl", "otel", "gelf", "csv", "json", "rfc3339", "lz4", "us", "eu", "bsd", "vrl", "tcp", "udp", "id", "uuid", "kms", "uri", "url", "acp", "uid", "ip", "pid", - "ndjson", "ewma", "rtt", "cpu", "acl", + "ndjson", "ewma", "rtt", "cpu", "acl", "imds", "acl", "alpn", ]; acronyms.iter().map(|s| s.to_lowercase()).collect() diff --git a/lib/vector-core/src/tls/settings.rs b/lib/vector-core/src/tls/settings.rs index bf049bb86bcb7..4454cfc76026e 100644 --- a/lib/vector-core/src/tls/settings.rs +++ b/lib/vector-core/src/tls/settings.rs @@ -119,6 +119,7 @@ pub struct TlsConfig { /// The certificate must be in the DER or PEM (X.509) format. Additionally, the certificate can be provided as an inline string in PEM format. #[serde(alias = "ca_path")] #[configurable(metadata(docs::examples = "/path/to/certificate_authority.crt"))] + #[configurable(metadata(docs::human_name = "CA File Path"))] pub ca_file: Option, /// Absolute path to a certificate file used to identify this server. @@ -129,6 +130,7 @@ pub struct TlsConfig { /// If this is set, and is not a PKCS#12 archive, `key_file` must also be set. #[serde(alias = "crt_path")] #[configurable(metadata(docs::examples = "/path/to/host_certificate.crt"))] + #[configurable(metadata(docs::human_name = "Certificate File Path"))] pub crt_file: Option, /// Absolute path to a private key file used to identify this server. @@ -136,6 +138,7 @@ pub struct TlsConfig { /// The key must be in DER or PEM (PKCS#8) format. Additionally, the key can be provided as an inline string in PEM format. #[serde(alias = "key_path")] #[configurable(metadata(docs::examples = "/path/to/host_certificate.key"))] + #[configurable(metadata(docs::human_name = "Key File Path"))] pub key_file: Option, /// Passphrase used to unlock the encrypted key file. @@ -143,6 +146,7 @@ pub struct TlsConfig { /// This has no effect unless `key_file` is set. #[configurable(metadata(docs::examples = "${KEY_PASS_ENV_VAR}"))] #[configurable(metadata(docs::examples = "PassWord1"))] + #[configurable(metadata(docs::human_name = "Key File Password"))] pub key_pass: Option, } diff --git a/src/aws/auth.rs b/src/aws/auth.rs index ec73bc97984bf..899ca39087bd9 100644 --- a/src/aws/auth.rs +++ b/src/aws/auth.rs @@ -117,6 +117,7 @@ pub enum AwsAuthentication { /// Relevant when the default credentials chain or `assume_role` is used. #[configurable(metadata(docs::type_unit = "seconds"))] #[configurable(metadata(docs::examples = 30))] + #[configurable(metadata(docs::human_name = "Load Timeout"))] load_timeout_secs: Option, /// Configuration for authenticating with AWS through IMDS. @@ -141,6 +142,7 @@ pub enum AwsAuthentication { /// Relevant when the default credentials chain or `assume_role` is used. #[configurable(metadata(docs::type_unit = "seconds"))] #[configurable(metadata(docs::examples = 30))] + #[configurable(metadata(docs::human_name = "Load Timeout"))] load_timeout_secs: Option, /// Configuration for authenticating with AWS through IMDS. diff --git a/src/sinks/aws_s3/config.rs b/src/sinks/aws_s3/config.rs index 38f22220d293a..27afbcd706d21 100644 --- a/src/sinks/aws_s3/config.rs +++ b/src/sinks/aws_s3/config.rs @@ -83,6 +83,7 @@ pub struct S3SinkConfig { /// This ensures there are no name collisions, and can be useful in high-volume workloads where /// object keys must be unique. #[serde(default = "crate::serde::default_true")] + #[configurable(metadata(docs::human_name = "Append UUID to Filename"))] pub filename_append_uuid: bool, /// The filename extension to use in the object key. From 9235fc249f4a0aa34d1119ed7dd334e23e5c3674 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 May 2023 13:49:32 +0000 Subject: [PATCH 034/236] chore(deps): bump proptest from 1.1.0 to 1.2.0 (#17476) Bumps [proptest](https://github.com/proptest-rs/proptest) from 1.1.0 to 1.2.0.
Commits
  • c65b6aa Changelog : backfill changelogs
  • fd43fcc bump version to 1.2.0
  • 5670183 Merge pull request #324 from tzemanovic/tomas/sm-prerequisites
  • b88e9ff Merge pull request #318 from tzemanovic/tomas/env-vars-config-override
  • 18ca1af [Fix] NonZero : only impl Arbitrary for 128-bit NonZero nums in non-wasm targ...
  • 6b4df75 proptest/changelog: add #324
  • fb2d578 make num sampling functions public
  • 6eb7574 make SizeRange methods public
  • a6bc664 make VarBitSet public
  • 22672a6 changelog: add #318
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=proptest&package-manager=cargo&previous-version=1.1.0&new-version=1.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 15 ++++----------- Cargo.toml | 2 +- lib/vector-buffers/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 4 ++-- 4 files changed, 8 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 37de2c1dfea63..2b69bd0228762 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6312,16 +6312,15 @@ dependencies = [ [[package]] name = "proptest" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f1b898011ce9595050a68e60f90bad083ff2987a695a42357134c8381fba70" +checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" dependencies = [ "bit-set", "bitflags", "byteorder", "lazy_static", "num-traits", - "quick-error 2.0.1", "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", @@ -6472,12 +6471,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quick-xml" version = "0.27.1" @@ -6876,7 +6869,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ "hostname", - "quick-error 1.2.3", + "quick-error", ] [[package]] @@ -7167,7 +7160,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", "wait-timeout", ] diff --git a/Cargo.toml b/Cargo.toml index 03bb274d1f2d9..30dea96e76dfa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -348,7 +348,7 @@ criterion = { version = "0.4.0", features = ["html_reports", "async_tokio"] } itertools = { version = "0.10.5", default-features = false } libc = "0.2.144" similar-asserts = "1.4.2" -proptest = "1.1" +proptest = "1.2" quickcheck = "1.0.3" reqwest = { version = "0.11", features = ["json"] } tempfile = "3.5.0" diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index 0084fc048d9d7..f105b82a47401 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -39,7 +39,7 @@ hdrhistogram = "7.5.2" metrics-tracing-context = { version = "0.14.0", default-features = false } metrics-util = { version = "0.15.0", default-features = false, features = ["debugging"] } once_cell = "1.17" -proptest = "1.1" +proptest = "1.2" quickcheck = "1.0" rand = "0.8.5" serde_yaml = { version = "0.9", default-features = false } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index ed61db90f8772..7140ff58ec49d 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -34,7 +34,7 @@ ordered-float = { version = "3.7.0", default-features = false } openssl = { version = "0.10.52", default-features = false, features = ["vendored"] } parking_lot = { version = "0.12.1", default-features = false } pin-project = { version = "1.1.0", default-features = false } -proptest = { version = "1.1", optional = true } +proptest = { version = "1.2", optional = true } prost-types = { version = "0.11", default-features = false } prost = { version = "0.11", default-features = false, features = ["std"] } quanta = { version = "0.11.0", default-features = false } @@ -83,7 +83,7 @@ criterion = { version = "0.4.0", features = ["html_reports"] } env-test-util = "1.0.1" quickcheck = "1" quickcheck_macros = "1" -proptest = "1.1" +proptest = "1.2" similar-asserts = "1.4.2" tokio-test = "0.4.2" toml = { version = "0.7.4", default-features = false, features = ["parse"] } From ebf958b1355b4b729e7c99232bc40e2f7e809abf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 May 2023 13:57:35 +0000 Subject: [PATCH 035/236] chore(deps): bump opendal from 0.34.0 to 0.35.0 (#17471) Bumps [opendal](https://github.com/apache/incubator-opendal) from 0.34.0 to 0.35.0.
Release notes

Sourced from opendal's releases.

v0.35.0

NOTE: This release is not yet an official ASF release.

Upgrade to v0.35

Public API

  • OpenDAL removes rarely used Operator::from_env and Operator::from_iter APIs
    • Users can use Operator::via_map instead.

Raw API

  • OpenDAL adds append support with could break existing layers. Please make sure append requests have been forward correctly.
  • After the merging of scan and list, OpenDAL removes the scan from raw API. Please use list_without_delimiter instead.

[v0.35.0] - 2023-05-23

Added

  • feat(services/onedrive): Implement list, create_dir, stat and upload ing large files (#2231)
  • feat(bindings/C): Initially support stat in C binding (#2249)
  • feat(bindings/python): Enable abi3 to avoid building on different python version (#2255)
  • feat(bindings/C): support BDD tests using GTest (#2254)
  • feat(services/sftp): setup integration tests (#2192)
  • feat(core): Add trait and public API for append (#2260)
  • feat(services/sftp): support copy and rename for sftp (#2263)
  • feat(services/sftp): support copy and read_seek (#2267)
  • feat: Add COS service support (#2269)
  • feat(services/cos): Add support for loading from env (#2271)
  • feat(core): add presign support for obs (#2253)
  • feat(services/sftp): setup integration tests (#2192)
  • feat(core): add presign support for obs (#2253)
  • feat(core): public API of append (#2284)
  • test(core): test for append (#2286)
  • feat(services/oss): add append support (#2279)
  • feat(bindings/java): implement async ops to pass AsyncStepsTest (#2291)

Changed

  • services/gdrive: port code to GdriveCore & add path_2_id cache (#2203)
  • refactor: Minimize futures dependencies (#2248)
  • refactor: Add Operator::via_map to support init without generic type parameters (#2280)
  • refactor(binding/java): build, async and docs (#2276)

Fixed

... (truncated)

Changelog

Sourced from opendal's changelog.

[v0.35.0] - 2023-05-23

Added

  • feat(services/onedrive): Implement list, create_dir, stat and upload ing large files (#2231)
  • feat(bindings/C): Initially support stat in C binding (#2249)
  • feat(bindings/python): Enable abi3 to avoid building on different python version (#2255)
  • feat(bindings/C): support BDD tests using GTest (#2254)
  • feat(services/sftp): setup integration tests (#2192)
  • feat(core): Add trait and public API for append (#2260)
  • feat(services/sftp): support copy and rename for sftp (#2263)
  • feat(services/sftp): support copy and read_seek (#2267)
  • feat: Add COS service support (#2269)
  • feat(services/cos): Add support for loading from env (#2271)
  • feat(core): add presign support for obs (#2253)
  • feat(services/sftp): setup integration tests (#2192)
  • feat(core): add presign support for obs (#2253)
  • feat(core): public API of append (#2284)
  • test(core): test for append (#2286)
  • feat(services/oss): add append support (#2279)
  • feat(bindings/java): implement async ops to pass AsyncStepsTest (#2291)

Changed

  • services/gdrive: port code to GdriveCore & add path_2_id cache (#2203)
  • refactor: Minimize futures dependencies (#2248)
  • refactor: Add Operator::via_map to support init without generic type parameters (#2280)
  • refactor(binding/java): build, async and docs (#2276)

Fixed

  • fix: Fix bugs that failed wasabi's integration tests (#2273)

Removed

  • feat(core): remove scan from raw API (#2262)

Docs

  • chore(s3): update builder region doc (#2247)
  • docs: Add services in readme (#2251)
  • docs: Unify capabilities list for kv services (#2257)
  • docs(nodejs): fix some example code errors (#2277)
  • docs(bindings/C): C binding contributing documentation (#2266)
  • docs: Add new docs that available for all languages (#2285)
  • docs: Remove unlicensed svg (#2289)
  • fix(website): double active route (#2290)

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=opendal&package-manager=cargo&previous-version=0.34.0&new-version=0.35.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b69bd0228762..7f2a8a5db63a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5575,9 +5575,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "opendal" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "005c877c4f788a7749825bbc61031ccc950217fac6bcf9965641fbf1bdf991b0" +checksum = "440f466680e0bc98ea94af95301aab4c69d9720934baec8f46b79a69fdd87cce" dependencies = [ "anyhow", "async-compat", diff --git a/Cargo.toml b/Cargo.toml index 30dea96e76dfa..8e2fdc479d170 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -181,7 +181,7 @@ azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = azure_storage_blobs = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, optional = true } # OpenDAL -opendal = {version = "0.34", default-features = false, features = ["native-tls", "services-webhdfs"], optional = true} +opendal = {version = "0.35", default-features = false, features = ["native-tls", "services-webhdfs"], optional = true} # Tower tower = { version = "0.4.13", default-features = false, features = ["buffer", "limit", "retry", "timeout", "util", "balance", "discover"] } From 9a44e6e8763c5d2bc91de1c24b14662d10d0b434 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Wed, 24 May 2023 08:51:54 -0600 Subject: [PATCH 036/236] chore: Update the NOTICE file (#17430) --- NOTICE | 3 +++ 1 file changed, 3 insertions(+) diff --git a/NOTICE b/NOTICE index e085f4b436c94..1c1d999241f54 100644 --- a/NOTICE +++ b/NOTICE @@ -1,4 +1,7 @@ +Unless explicitly stated otherwise all files in this repository are licensed under the Mozilla +Public License, version 2.0 (MPL-2.0). +This product includes software developed at Datadog (https://www.datadoghq.com/) Copyright (c) 2020 Vector Authors From 58d7f3dfb0b57445db931604c6f72d93015da505 Mon Sep 17 00:00:00 2001 From: neuronull Date: Wed, 24 May 2023 09:39:50 -0600 Subject: [PATCH 037/236] chore(ci): temporarily disable comment_trigger workflow (#17480) GitHub Support requested we try disabling the concurrency group to see if it resolves the issue of the workflow failing to start. --- .github/workflows/comment-trigger.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/comment-trigger.yml b/.github/workflows/comment-trigger.yml index e9c2c234233bb..cdd5f65adf990 100644 --- a/.github/workflows/comment-trigger.yml +++ b/.github/workflows/comment-trigger.yml @@ -34,9 +34,11 @@ env: # can be removed when we switch back to the upstream openssl-sys crate CARGO_NET_GIT_FETCH_WITH_CLI: true -concurrency: - group: ${{ github.workflow }}-${{ github.event.issue_comment.issue.id }}-${{ github.event.comment.body }} - cancel-in-progress: true +# TODO: temporarily disabling concurrency groups at request of GitHub Support, to see if it resolves +# the issue we are having. +#concurrency: +# group: ${{ github.workflow }}-${{ github.event.issue_comment.issue.id }}-${{ github.event.comment.body }} +# cancel-in-progress: true jobs: validate: From 541bb0087eb95b8d67c98547240c8104c5b2a69f Mon Sep 17 00:00:00 2001 From: Will Wang Date: Wed, 24 May 2023 17:03:53 -0400 Subject: [PATCH 038/236] chore(enterprise): Extend library functionality for secret scanning (#17483) Ref OPB-710 This PR - Exposes patterns and the `interpolate` function for use in OPW. This will help deduplicate some logic and reduce maintenance burden. We use the patterns themselves in secret scanning logic, and will use the `interpolate` function in bootstrap-related logic. --- src/config/loading/secret.rs | 2 +- src/config/mod.rs | 4 +++- src/config/vars.rs | 35 +++++++++++++++++++---------------- 3 files changed, 23 insertions(+), 18 deletions(-) diff --git a/src/config/loading/secret.rs b/src/config/loading/secret.rs index 54c83823e2218..09a32260708e1 100644 --- a/src/config/loading/secret.rs +++ b/src/config/loading/secret.rs @@ -26,7 +26,7 @@ use crate::{ // - "SECRET[backend..secret.name]" will match and capture "backend" and ".secret.name" // - "SECRET[secret_name]" will not match // - "SECRET[.secret.name]" will not match -static COLLECTOR: Lazy = +pub static COLLECTOR: Lazy = Lazy::new(|| Regex::new(r"SECRET\[([[:word:]]+)\.([[:word:].]+)\]").unwrap()); /// Helper type for specifically deserializing secrets backends. diff --git a/src/config/mod.rs b/src/config/mod.rs index 473961f2eaf7c..c1f949d3506fa 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -48,7 +48,8 @@ pub use format::{Format, FormatHint}; pub use id::{ComponentKey, Inputs}; pub use loading::{ load, load_builder_from_paths, load_from_paths, load_from_paths_with_provider_and_secrets, - load_from_str, load_source_from_paths, merge_path_lists, process_paths, CONFIG_PATHS, + load_from_str, load_source_from_paths, merge_path_lists, process_paths, COLLECTOR, + CONFIG_PATHS, }; pub use provider::ProviderConfig; pub use secret::SecretBackend; @@ -59,6 +60,7 @@ pub use transform::{ }; pub use unit_test::{build_unit_tests, build_unit_tests_main, UnitTestResult}; pub use validation::warnings; +pub use vars::{interpolate, ENVIRONMENT_VARIABLE_INTERPOLATION_REGEX}; pub use vector_core::config::{ init_log_schema, log_schema, proxy::ProxyConfig, LogSchema, OutputId, }; diff --git a/src/config/vars.rs b/src/config/vars.rs index 3b923a9687bfe..73316884cebf3 100644 --- a/src/config/vars.rs +++ b/src/config/vars.rs @@ -1,7 +1,25 @@ use std::collections::HashMap; +use once_cell::sync::Lazy; use regex::{Captures, Regex}; +// Environment variable names can have any characters from the Portable Character Set other +// than NUL. However, for Vector's interpolation, we are closer to what a shell supports which +// is solely of uppercase letters, digits, and the '_' (that is, the `[:word:]` regex class). +// In addition to these characters, we allow `.` as this commonly appears in environment +// variable names when they come from a Java properties file. +// +// https://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html +pub static ENVIRONMENT_VARIABLE_INTERPOLATION_REGEX: Lazy = Lazy::new(|| { + Regex::new( + r"(?x) + \$\$| + \$([[:word:].]+)| + \$\{([[:word:].]+)(?:(:?-|:?\?)([^}]*))?\}", + ) + .unwrap() +}); + /// (result, warnings) pub fn interpolate( input: &str, @@ -10,22 +28,7 @@ pub fn interpolate( let mut errors = Vec::new(); let mut warnings = Vec::new(); - // Environment variable names can have any characters from the Portable Character Set other - // than NUL. However, for Vector's interpolation, we are closer to what a shell supports which - // is solely of uppercase letters, digits, and the '_' (that is, the `[:word:]` regex class). - // In addition to these characters, we allow `.` as this commonly appears in environment - // variable names when they come from a Java properties file. - // - // https://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html - let re = Regex::new( - r"(?x) - \$\$| - \$([[:word:].]+)| - \$\{([[:word:].]+)(?:(:?-|:?\?)([^}]*))?\}", - ) - .unwrap(); - - let interpolated = re + let interpolated = ENVIRONMENT_VARIABLE_INTERPOLATION_REGEX .replace_all(input, |caps: &Captures<'_>| { let flags = caps.get(3).map(|m| m.as_str()).unwrap_or_default(); let def_or_err = caps.get(4).map(|m| m.as_str()).unwrap_or_default(); From 78fb4694c26d061314e8a01236a67633d8035d5c Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Wed, 24 May 2023 15:04:04 -0700 Subject: [PATCH 039/236] fix(distribution): Fix architecture detection for ARMv7 (#17484) The patterns were incorrectly matching the the OS for ARMv7 builds which have extra chacters indicating platform support. Fixes: https://github.com/vectordotdev/vector/issues/17450 Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- distribution/install.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/distribution/install.sh b/distribution/install.sh index b4f72e03bcccd..bbca042d0e4c1 100755 --- a/distribution/install.sh +++ b/distribution/install.sh @@ -139,6 +139,7 @@ install_from_archive() { assert_nz "$_arch" "arch" local _archive_arch="" + case "$_arch" in x86_64-apple-darwin) _archive_arch=$_arch @@ -152,13 +153,13 @@ install_from_archive() { aarch64-*linux*) _archive_arch="aarch64-unknown-linux-musl" ;; - armv7-*linux*-gnu) + armv7-*linux*-gnueabihf) _archive_arch="armv7-unknown-linux-gnueabihf" ;; - armv7-*linux*-musl) + armv7-*linux*-musleabihf) _archive_arch="armv7-unknown-linux-musleabihf" ;; - *) + *) err "unsupported arch: $_arch" ;; esac From 426d6602d22193940ac6e495fc5c175aa3bc8f90 Mon Sep 17 00:00:00 2001 From: Nathan Fox Date: Thu, 25 May 2023 08:17:36 -0400 Subject: [PATCH 040/236] chore: update `vrl` to `0.4.0` (#17378) This upgrades VRL to `0.4.0` Notable changes: - This is the first crates.io release for VRL. It's no longer a git dependency! - All VRL macros are now exported at the root, which required some import changes - Previously the `vrl` crate had an internal `test` feature that was un-intentionally enabled. This was caught and fixed in recent VRL refactoring. Vector was relying on this in a few places where is shouldn't be (all related to converting an `f64` into a `Value`. That implementation is normally only available for tests, since the `f64` needs to be checked for `NaN` first). As a quick fix, to keep existing behavior, the `test` feature is now explicitly enabled for VRL. [An issue](https://github.com/vectordotdev/vector/issues/17377) was created to track this and remove it. --- Cargo.lock | 269 ++++-------------- Cargo.toml | 2 +- LICENSE-3rdparty.csv | 3 +- lib/codecs/Cargo.toml | 2 +- lib/codecs/src/encoding/format/avro.rs | 2 +- lib/codecs/src/encoding/format/gelf.rs | 2 +- lib/codecs/src/encoding/format/json.rs | 2 +- lib/codecs/src/encoding/format/logfmt.rs | 2 +- lib/codecs/src/encoding/format/native_json.rs | 2 +- lib/enrichment/Cargo.toml | 2 +- .../src/find_enrichment_table_records.rs | 1 + .../src/get_enrichment_table_record.rs | 1 + lib/opentelemetry-proto/Cargo.toml | 2 +- lib/vector-common/Cargo.toml | 2 +- lib/vector-common/src/lib.rs | 2 +- lib/vector-config/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 4 +- lib/vector-core/src/event/log_event.rs | 2 +- lib/vector-core/src/event/vrl_target.rs | 4 +- lib/vector-lookup/Cargo.toml | 2 +- lib/vector-lookup/src/lib.rs | 6 +- lib/vector-vrl/cli/Cargo.toml | 2 +- lib/vector-vrl/functions/Cargo.toml | 2 +- lib/vector-vrl/tests/Cargo.toml | 2 +- lib/vector-vrl/web-playground/Cargo.toml | 2 +- src/config/enterprise.rs | 2 +- src/sinks/influxdb/logs.rs | 2 +- src/sources/aws_kinesis_firehose/mod.rs | 2 +- src/sources/dnstap/schema.rs | 2 +- src/sources/docker_logs/tests.rs | 2 +- src/sources/exec/mod.rs | 2 +- src/sources/file.rs | 2 +- .../file_descriptors/file_descriptor.rs | 2 +- src/sources/file_descriptors/stdin.rs | 2 +- src/sources/gcp_pubsub.rs | 2 +- src/sources/kafka.rs | 2 +- src/sources/kubernetes_logs/parser/cri.rs | 2 +- src/sources/kubernetes_logs/parser/docker.rs | 2 +- src/sources/kubernetes_logs/parser/mod.rs | 2 +- .../kubernetes_logs/parser/test_util.rs | 3 +- src/sources/opentelemetry/tests.rs | 2 +- src/sources/redis/mod.rs | 2 +- src/sources/socket/mod.rs | 5 +- src/transforms/remap.rs | 6 +- 44 files changed, 101 insertions(+), 269 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f2a8a5db63a8..155f906ab6ca9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -212,6 +212,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "arbitrary" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" +dependencies = [ + "derive_arbitrary", +] + [[package]] name = "arc-swap" version = "1.6.0" @@ -2603,52 +2612,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d7439c3735f405729d52c3fbbe4de140eaf938a1fe47d227c27f8254d4302a5" -[[package]] -name = "datadog-filter" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "datadog-search-syntax", - "dyn-clone", - "regex", -] - -[[package]] -name = "datadog-grok" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "bytes 1.4.0", - "chrono", - "chrono-tz", - "lalrpop", - "lalrpop-util", - "nom", - "once_cell", - "onig", - "ordered-float 3.7.0", - "path", - "peeking_take_while", - "regex", - "serde_json", - "thiserror", - "tracing 0.1.37", - "value", - "vrl-compiler", -] - -[[package]] -name = "datadog-search-syntax" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "itertools", - "once_cell", - "pest", - "pest_derive", - "regex", -] - [[package]] name = "db-key" version = "0.0.5" @@ -2700,6 +2663,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_arbitrary" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" +dependencies = [ + "proc-macro2 1.0.58", + "quote 1.0.27", + "syn 1.0.109", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -5823,17 +5797,6 @@ version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" -[[package]] -name = "path" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "once_cell", - "regex", - "serde", - "snafu", -] - [[package]] name = "pbkdf2" version = "0.11.0" @@ -9120,25 +9083,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" -[[package]] -name = "value" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "bytes 1.4.0", - "chrono", - "mlua", - "once_cell", - "ordered-float 3.7.0", - "path", - "quickcheck", - "regex", - "serde", - "serde_json", - "snafu", - "tracing 0.1.37", -] - [[package]] name = "vcpkg" version = "0.2.15" @@ -9687,120 +9631,14 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "datadog-filter", - "datadog-grok", - "datadog-search-syntax", - "path", - "value", - "vrl-cli", - "vrl-compiler", - "vrl-core", - "vrl-diagnostic", - "vrl-parser", - "vrl-stdlib", - "vrl-tests", -] - -[[package]] -name = "vrl-cli" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "clap 4.1.14", - "exitcode", - "indoc", - "once_cell", - "path", - "prettytable-rs", - "regex", - "rustyline", - "serde_json", - "thiserror", - "value", - "vrl-compiler", - "vrl-core", - "vrl-diagnostic", - "vrl-stdlib", - "webbrowser", -] - -[[package]] -name = "vrl-compiler" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "anymap", - "bytes 1.4.0", - "chrono", - "chrono-tz", - "dyn-clone", - "getrandom 0.2.9", - "indoc", - "lalrpop-util", - "ordered-float 3.7.0", - "paste", - "path", - "regex", - "serde", - "snafu", - "thiserror", - "tracing 0.1.37", - "value", - "vrl-diagnostic", - "vrl-parser", -] - -[[package]] -name = "vrl-core" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "bytes 1.4.0", - "chrono", - "chrono-tz", - "derivative", - "nom", - "ordered-float 3.7.0", - "path", - "serde", - "serde_json", - "snafu", - "value", - "vrl-diagnostic", -] - -[[package]] -name = "vrl-diagnostic" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "codespan-reporting", - "termcolor", -] - -[[package]] -name = "vrl-parser" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "lalrpop", - "lalrpop-util", - "ordered-float 3.7.0", - "paste", - "path", - "thiserror", - "vrl-diagnostic", -] - -[[package]] -name = "vrl-stdlib" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6236fdfaaa956af732a73630b8a8b43ef75e0a42fed6b94cf3c1c19c99daca5" dependencies = [ "aes", + "ansi_term", + "anymap", + "arbitrary", "base16", "base64 0.21.1", "bytes 1.4.0", @@ -9808,78 +9646,69 @@ dependencies = [ "cfb-mode", "charset", "chrono", + "chrono-tz", "cidr-utils", + "clap 4.1.14", + "codespan-reporting", "csv", "ctr", "data-encoding", - "datadog-filter", - "datadog-grok", - "datadog-search-syntax", "dns-lookup", + "dyn-clone", + "exitcode", "flate2", + "getrandom 0.2.9", "grok", "hex", "hmac", "hostname", "indexmap", "indoc", + "itertools", + "lalrpop", + "lalrpop-util", "md-5", + "mlua", "nom", "ofb", "once_cell", + "onig", "ordered-float 3.7.0", - "path", + "paste", + "peeking_take_while", "percent-encoding", + "pest", + "pest_derive", + "prettydiff", + "prettytable-rs", + "quickcheck", "quoted_printable", "rand 0.8.5", "regex", "roxmltree", "rust_decimal", + "rustyline", "seahash", "serde", "serde_json", "sha-1", "sha2 0.10.6", "sha3", + "snafu", "strip-ansi-escapes", "syslog_loose", + "termcolor", + "thiserror", "tracing 0.1.37", "uaparser", "url", "utf8-width", "uuid", - "value", - "vrl-compiler", - "vrl-core", - "vrl-diagnostic", + "webbrowser", "woothee", "zstd 0.12.3+zstd.1.5.2", ] -[[package]] -name = "vrl-tests" -version = "0.1.0" -source = "git+https://github.com/vectordotdev/vrl?rev=v0.3.0#113005bcee6cd7b5ea0a53a7db2fc45ba4bc4125" -dependencies = [ - "ansi_term", - "chrono", - "chrono-tz", - "clap 4.1.14", - "glob", - "path", - "prettydiff", - "regex", - "serde", - "serde_json", - "tikv-jemallocator", - "tracing-subscriber", - "value", - "vrl-compiler", - "vrl-core", - "vrl-diagnostic", - "vrl-stdlib", -] - [[package]] name = "vsimd" version = "0.8.0" diff --git a/Cargo.toml b/Cargo.toml index 8e2fdc479d170..6e8c218a65ad4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -226,7 +226,7 @@ hex = { version = "0.4.3", default-features = false, optional = true } sha2 = { version = "0.10.6", default-features = false, optional = true } # VRL Lang -vrl = { package = "vrl", git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", features = ["cli"] } +vrl = { package = "vrl", version = "0.4.0", features = ["cli", "test"] } # External libs arc-swap = { version = "1.6", default-features = false, optional = true } diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index ebace511178c0..3f5b7ca6808e8 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -12,6 +12,7 @@ ansi_term,https://github.com/ogham/rust-ansi-term,MIT,"ogham@bsago.me, Ryan Sche anyhow,https://github.com/dtolnay/anyhow,MIT OR Apache-2.0,David Tolnay anymap,https://github.com/chris-morgan/anymap,BlueOak-1.0.0 OR MIT OR Apache-2.0,Chris Morgan apache-avro,https://github.com/apache/avro,Apache-2.0,Apache Avro team +arbitrary,https://github.com/rust-fuzz/arbitrary,MIT OR Apache-2.0,"The Rust-Fuzz Project Developers, Nick Fitzgerald , Manish Goregaokar , Simonas Kazlauskas , Brian L. Troutwine , Corey Farwell " arc-swap,https://github.com/vorner/arc-swap,MIT OR Apache-2.0,Michal 'vorner' Vaner arr_macro,https://github.com/JoshMcguigan/arr_macro,MIT OR Apache-2.0,Josh Mcguigan arrayvec,https://github.com/bluss/arrayvec,MIT OR Apache-2.0,bluss @@ -147,6 +148,7 @@ data-url,https://github.com/servo/rust-url,MIT OR Apache-2.0,Simon Sapin der,https://github.com/RustCrypto/formats/tree/master/der,Apache-2.0 OR MIT,RustCrypto Developers derivative,https://github.com/mcarton/rust-derivative,MIT OR Apache-2.0,mcarton +derive_arbitrary,https://github.com/rust-fuzz/arbitrary,MIT OR Apache-2.0,"The Rust-Fuzz Project Developers, Nick Fitzgerald , Manish Goregaokar , Andre Bogus , Corey Farwell " derive_more,https://github.com/JelteF/derive_more,MIT,Jelte Fennema digest,https://github.com/RustCrypto/traits,MIT OR Apache-2.0,RustCrypto Developers dirs,https://github.com/soc/dirs-rs,MIT OR Apache-2.0,Simon Ochsenreither @@ -565,7 +567,6 @@ utf8-width,https://github.com/magiclen/utf8-width,MIT,Magic Len , Christian Duerr " uuid,https://github.com/uuid-rs/uuid,Apache-2.0 OR MIT,"Ashley Mannix, Christopher Armstrong, Dylan DPC, Hunar Roop Kahlon" valuable,https://github.com/tokio-rs/valuable,MIT,The valuable Authors -value,https://github.com/vectordotdev/vrl,MPL-2.0,Vector Contributors vec_map,https://github.com/contain-rs/vec-map,MIT OR Apache-2.0,"Alex Crichton , Jorge Aparicio , Alexis Beingessner , Brian Anderson <>, tbu- <>, Manish Goregaokar <>, Aaron Turon , Adolfo Ochagavía <>, Niko Matsakis <>, Steven Fackler <>, Chase Southwood , Eduard Burtescu <>, Florian Wilkens <>, Félix Raimundo <>, Tibor Benke <>, Markus Siemens , Josh Branchaud , Huon Wilson , Corey Farwell , Aaron Liblong <>, Nick Cameron , Patrick Walton , Felix S Klock II <>, Andrew Paseltiner , Sean McArthur , Vadim Petrochenkov <>" void,https://github.com/reem/rust-void,MIT,Jonathan Reem vrl,https://github.com/vectordotdev/vrl,MPL-2.0,Vector Contributors diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 96d348a9a1357..71cbe5dbdc7f7 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -25,7 +25,7 @@ snafu = { version = "0.7.4", default-features = false, features = ["futures"] } syslog_loose = { version = "0.18", default-features = false, optional = true } tokio-util = { version = "0.7", default-features = false, features = ["codec"] } tracing = { version = "0.1", default-features = false } -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["value"] } +vrl = { version = "0.4.0", default-features = false, features = ["value"] } vector-common = { path = "../vector-common", default-features = false } vector-config = { path = "../vector-config", default-features = false } vector-config-common = { path = "../vector-config-common", default-features = false } diff --git a/lib/codecs/src/encoding/format/avro.rs b/lib/codecs/src/encoding/format/avro.rs index 3bfeea5ea1dad..bfac175f18c21 100644 --- a/lib/codecs/src/encoding/format/avro.rs +++ b/lib/codecs/src/encoding/format/avro.rs @@ -82,7 +82,7 @@ mod tests { use bytes::BytesMut; use indoc::indoc; use vector_core::event::{LogEvent, Value}; - use vrl::value::btreemap; + use vrl::btreemap; use super::*; diff --git a/lib/codecs/src/encoding/format/gelf.rs b/lib/codecs/src/encoding/format/gelf.rs index 799fe41adb526..3a0c6d0461991 100644 --- a/lib/codecs/src/encoding/format/gelf.rs +++ b/lib/codecs/src/encoding/format/gelf.rs @@ -241,7 +241,7 @@ mod tests { use super::*; use chrono::{DateTime, NaiveDateTime, Utc}; use vector_core::event::{Event, EventMetadata}; - use vrl::value::btreemap; + use vrl::btreemap; use vrl::value::Value; fn do_serialize( diff --git a/lib/codecs/src/encoding/format/json.rs b/lib/codecs/src/encoding/format/json.rs index 73ab79c78c095..943a421c77556 100644 --- a/lib/codecs/src/encoding/format/json.rs +++ b/lib/codecs/src/encoding/format/json.rs @@ -91,7 +91,7 @@ mod tests { use chrono::{TimeZone, Timelike, Utc}; use vector_core::event::{LogEvent, Metric, MetricKind, MetricValue, StatisticKind, Value}; use vector_core::metric_tags; - use vrl::value::btreemap; + use vrl::btreemap; use super::*; diff --git a/lib/codecs/src/encoding/format/logfmt.rs b/lib/codecs/src/encoding/format/logfmt.rs index 37b0351b085ee..e656cc2c52164 100644 --- a/lib/codecs/src/encoding/format/logfmt.rs +++ b/lib/codecs/src/encoding/format/logfmt.rs @@ -60,7 +60,7 @@ mod tests { use super::*; use bytes::BytesMut; use vector_core::event::{LogEvent, Value}; - use vrl::value::btreemap; + use vrl::btreemap; #[test] fn serialize_logfmt() { diff --git a/lib/codecs/src/encoding/format/native_json.rs b/lib/codecs/src/encoding/format/native_json.rs index 6762c92737834..854bba9d97ec2 100644 --- a/lib/codecs/src/encoding/format/native_json.rs +++ b/lib/codecs/src/encoding/format/native_json.rs @@ -53,7 +53,7 @@ impl Encoder for NativeJsonSerializer { mod tests { use bytes::BytesMut; use vector_core::event::{LogEvent, Value}; - use vrl::value::btreemap; + use vrl::btreemap; use super::*; diff --git a/lib/enrichment/Cargo.toml b/lib/enrichment/Cargo.toml index ef118022f36d8..c88d81e2dfb09 100644 --- a/lib/enrichment/Cargo.toml +++ b/lib/enrichment/Cargo.toml @@ -10,4 +10,4 @@ arc-swap = { version = "1.6.0", default-features = false } dyn-clone = { version = "1.0.11", default-features = false } chrono = { version = "0.4.19", default-features = false } vector-common = { path = "../vector-common", default-features = false, features = [ "btreemap", "conversion", "serde" ] } -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["diagnostic"] } +vrl = { version = "0.4.0", default-features = false, features = ["diagnostic"] } diff --git a/lib/enrichment/src/find_enrichment_table_records.rs b/lib/enrichment/src/find_enrichment_table_records.rs index f4fbfd03c3ea6..bfe0aa7183af2 100644 --- a/lib/enrichment/src/find_enrichment_table_records.rs +++ b/lib/enrichment/src/find_enrichment_table_records.rs @@ -194,6 +194,7 @@ mod tests { use vector_common::TimeZone; use vrl::compiler::state::RuntimeState; use vrl::compiler::TargetValue; + use vrl::value; use vrl::value::Secrets; use super::*; diff --git a/lib/enrichment/src/get_enrichment_table_record.rs b/lib/enrichment/src/get_enrichment_table_record.rs index 93c432ccdd09f..edaac844966c1 100644 --- a/lib/enrichment/src/get_enrichment_table_record.rs +++ b/lib/enrichment/src/get_enrichment_table_record.rs @@ -186,6 +186,7 @@ mod tests { use vector_common::TimeZone; use vrl::compiler::state::RuntimeState; use vrl::compiler::TargetValue; + use vrl::value; use vrl::value::Secrets; use super::*; diff --git a/lib/opentelemetry-proto/Cargo.toml b/lib/opentelemetry-proto/Cargo.toml index 107fe072f8176..23c91e4c26282 100644 --- a/lib/opentelemetry-proto/Cargo.toml +++ b/lib/opentelemetry-proto/Cargo.toml @@ -17,5 +17,5 @@ lookup = { package = "vector-lookup", path = "../vector-lookup", default-feature ordered-float = { version = "3.7.0", default-features = false } prost = { version = "0.11", default-features = false, features = ["std"] } tonic = { version = "0.9", default-features = false, features = ["codegen", "gzip", "prost", "tls", "tls-roots", "transport"] } -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["value"] } +vrl = { version = "0.4.0", default-features = false, features = ["value"] } vector-core = { path = "../vector-core", default-features = false } diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 4ef8a662866c3..c2d26eb49e14f 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -62,7 +62,7 @@ snafu = { version = "0.7", optional = true } stream-cancel = { version = "0.8.1", default-features = false } tokio = { version = "1.28.1", default-features = false, features = ["macros", "time"] } tracing = { version = "0.1.34", default-features = false } -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["value", "core", "compiler"] } +vrl = { version = "0.4.0", default-features = false, features = ["value", "core", "compiler"] } vector-config = { path = "../vector-config" } vector-config-common = { path = "../vector-config-common" } vector-config-macros = { path = "../vector-config-macros" } diff --git a/lib/vector-common/src/lib.rs b/lib/vector-common/src/lib.rs index bcf58c72de723..832eaf0d5cdc5 100644 --- a/lib/vector-common/src/lib.rs +++ b/lib/vector-common/src/lib.rs @@ -13,7 +13,7 @@ #![deny(unused_comparisons)] #[cfg(feature = "btreemap")] -pub use vrl::value::btreemap; +pub use vrl::btreemap; #[cfg(feature = "byte_size_of")] pub mod byte_size_of; diff --git a/lib/vector-config/Cargo.toml b/lib/vector-config/Cargo.toml index 41f1a40ff2c59..67f99178aa472 100644 --- a/lib/vector-config/Cargo.toml +++ b/lib/vector-config/Cargo.toml @@ -26,7 +26,7 @@ snafu = { version = "0.7.4", default-features = false } toml = { version = "0.7.4", default-features = false } tracing = { version = "0.1.34", default-features = false } url = { version = "2.3.1", default-features = false, features = ["serde"] } -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["compiler"] } +vrl = { version = "0.4.0", default-features = false, features = ["compiler"] } vector-config-common = { path = "../vector-config-common" } vector-config-macros = { path = "../vector-config-macros" } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 7140ff58ec49d..f8cf61f0b3b72 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -65,7 +65,7 @@ vector-common = { path = "../vector-common" } vector-config = { path = "../vector-config" } vector-config-common = { path = "../vector-config-common" } vector-config-macros = { path = "../vector-config-macros" } -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0" } +vrl = { version = "0.4.0" } [target.'cfg(target_os = "macos")'.dependencies] security-framework = "2.9.1" @@ -94,7 +94,7 @@ rand = "0.8.5" rand_distr = "0.4.3" tracing-subscriber = { version = "0.3.17", default-features = false, features = ["env-filter", "fmt", "ansi", "registry"] } vector-common = { path = "../vector-common", default-features = false, features = ["test"] } -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["value", "arbitrary", "lua"] } +vrl = { version = "0.4.0", default-features = false, features = ["value", "arbitrary", "lua"] } [features] api = ["dep:async-graphql"] diff --git a/lib/vector-core/src/event/log_event.rs b/lib/vector-core/src/event/log_event.rs index a1e1642e74094..353d0416fccaa 100644 --- a/lib/vector-core/src/event/log_event.rs +++ b/lib/vector-core/src/event/log_event.rs @@ -694,7 +694,7 @@ mod test { use super::*; use crate::test_util::open_fixture; use lookup::event_path; - use vrl::value::value; + use vrl::value; // The following two tests assert that renaming a key has no effect if the // keys are equivalent, whether the key exists in the log or not. diff --git a/lib/vector-core/src/event/vrl_target.rs b/lib/vector-core/src/event/vrl_target.rs index 861721eb7b1aa..f6277cdfe20a6 100644 --- a/lib/vector-core/src/event/vrl_target.rs +++ b/lib/vector-core/src/event/vrl_target.rs @@ -588,7 +588,7 @@ mod test { use chrono::{offset::TimeZone, Utc}; use lookup::owned_value_path; use similar_asserts::assert_eq; - use vrl::value::btreemap; + use vrl::btreemap; use super::super::MetricValue; use super::*; @@ -858,7 +858,7 @@ mod test { #[test] fn log_into_events() { - use vrl::value::btreemap; + use vrl::btreemap; let cases = vec![ ( diff --git a/lib/vector-lookup/Cargo.toml b/lib/vector-lookup/Cargo.toml index d0255184d10ad..daec79b1e5004 100644 --- a/lib/vector-lookup/Cargo.toml +++ b/lib/vector-lookup/Cargo.toml @@ -10,4 +10,4 @@ license = "MPL-2.0" serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"] } vector-config = { path = "../vector-config" } vector-config-macros = { path = "../vector-config-macros" } -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["path"] } +vrl = { version = "0.4.0", default-features = false, features = ["path"] } diff --git a/lib/vector-lookup/src/lib.rs b/lib/vector-lookup/src/lib.rs index 7058cfc6e899f..e32e73b5f12a3 100644 --- a/lib/vector-lookup/src/lib.rs +++ b/lib/vector-lookup/src/lib.rs @@ -1,7 +1,7 @@ #![deny(warnings)] -pub use vrl::path::{ - event_path, metadata_path, owned_value_path, path, OwnedTargetPath, OwnedValuePath, PathPrefix, -}; +pub use vrl::path::{OwnedTargetPath, OwnedValuePath, PathPrefix}; + +pub use vrl::{event_path, metadata_path, owned_value_path, path}; pub mod lookup_v2; diff --git a/lib/vector-vrl/cli/Cargo.toml b/lib/vector-vrl/cli/Cargo.toml index 54d0279099bba..dd2c451de79c3 100644 --- a/lib/vector-vrl/cli/Cargo.toml +++ b/lib/vector-vrl/cli/Cargo.toml @@ -9,4 +9,4 @@ license = "MPL-2.0" [dependencies] clap = { version = "4.1.14", features = ["derive"] } vector-vrl-functions = { path = "../functions" } -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["stdlib", "cli"] } +vrl = { version = "0.4.0", default-features = false, features = ["stdlib", "cli"] } diff --git a/lib/vector-vrl/functions/Cargo.toml b/lib/vector-vrl/functions/Cargo.toml index 08fb64069f397..787637bad848b 100644 --- a/lib/vector-vrl/functions/Cargo.toml +++ b/lib/vector-vrl/functions/Cargo.toml @@ -7,4 +7,4 @@ publish = false license = "MPL-2.0" [dependencies] -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["compiler", "path", "diagnostic"] } +vrl = { version = "0.4.0", default-features = false, features = ["compiler", "path", "diagnostic"] } diff --git a/lib/vector-vrl/tests/Cargo.toml b/lib/vector-vrl/tests/Cargo.toml index 12c28a88a52e5..2ff39275589b0 100644 --- a/lib/vector-vrl/tests/Cargo.toml +++ b/lib/vector-vrl/tests/Cargo.toml @@ -7,7 +7,7 @@ publish = false [dependencies] enrichment = { path = "../../enrichment" } -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", features = ["test_framework"]} +vrl = { version = "0.4.0", features = ["test_framework"]} vector-vrl-functions = { path = "../../vector-vrl/functions" } ansi_term = "0.12" diff --git a/lib/vector-vrl/web-playground/Cargo.toml b/lib/vector-vrl/web-playground/Cargo.toml index 79433c22ffa5e..72a9118937ba6 100644 --- a/lib/vector-vrl/web-playground/Cargo.toml +++ b/lib/vector-vrl/web-playground/Cargo.toml @@ -10,7 +10,7 @@ crate-type = ["cdylib"] [dependencies] wasm-bindgen = "0.2" -vrl = { git = "https://github.com/vectordotdev/vrl", rev = "v0.3.0", default-features = false, features = ["value", "stdlib"] } +vrl = { version = "0.4.0", default-features = false, features = ["value", "stdlib"] } serde = { version = "1.0", features = ["derive"] } serde-wasm-bindgen = "0.5" gloo-utils = { version = "0.1", features = ["serde"] } diff --git a/src/config/enterprise.rs b/src/config/enterprise.rs index 97075716b3b96..bce787aa7d183 100644 --- a/src/config/enterprise.rs +++ b/src/config/enterprise.rs @@ -833,9 +833,9 @@ mod test { use indexmap::IndexMap; use tokio::time::sleep; use vector_core::config::proxy::ProxyConfig; + use vrl::btreemap; use vrl::compiler::state::ExternalEnv; use vrl::compiler::{compile, compile_with_external, CompileConfig}; - use vrl::value::btreemap; use vrl::value::kind::Collection; use vrl::value::Kind; use wiremock::{matchers, Mock, MockServer, ResponseTemplate}; diff --git a/src/sinks/influxdb/logs.rs b/src/sinks/influxdb/logs.rs index 80781c34f863c..0bcd27ef35e90 100644 --- a/src/sinks/influxdb/logs.rs +++ b/src/sinks/influxdb/logs.rs @@ -883,7 +883,7 @@ mod integration_tests { use std::sync::Arc; use vector_core::config::{LegacyKey, LogNamespace}; use vector_core::event::{BatchNotifier, BatchStatus, Event, LogEvent}; - use vrl::value::value; + use vrl::value; use super::*; use crate::{ diff --git a/src/sources/aws_kinesis_firehose/mod.rs b/src/sources/aws_kinesis_firehose/mod.rs index cefb74beafc07..d546ce175b6cd 100644 --- a/src/sources/aws_kinesis_firehose/mod.rs +++ b/src/sources/aws_kinesis_firehose/mod.rs @@ -255,7 +255,7 @@ mod tests { use similar_asserts::assert_eq; use tokio::time::{sleep, Duration}; use vector_common::assert_event_data_eq; - use vrl::value::value; + use vrl::value; use super::*; use crate::{ diff --git a/src/sources/dnstap/schema.rs b/src/sources/dnstap/schema.rs index dcf4a60935a9b..90a76dfa33a44 100644 --- a/src/sources/dnstap/schema.rs +++ b/src/sources/dnstap/schema.rs @@ -1,6 +1,6 @@ use lookup::{owned_value_path, OwnedValuePath}; use std::collections::BTreeMap; -use vrl::value::btreemap; +use vrl::btreemap; use vrl::value::{ kind::{Collection, Field}, Kind, diff --git a/src/sources/docker_logs/tests.rs b/src/sources/docker_logs/tests.rs index e055431271886..c9569209074ba 100644 --- a/src/sources/docker_logs/tests.rs +++ b/src/sources/docker_logs/tests.rs @@ -45,7 +45,7 @@ mod integration_tests { }; use futures::{stream::TryStreamExt, FutureExt}; use similar_asserts::assert_eq; - use vrl::value::value; + use vrl::value; /// None if docker is not present on the system async fn source_with<'a, L: Into>>( diff --git a/src/sources/exec/mod.rs b/src/sources/exec/mod.rs index c540c8e97ae15..1f8d755e77e3f 100644 --- a/src/sources/exec/mod.rs +++ b/src/sources/exec/mod.rs @@ -722,7 +722,7 @@ mod tests { use bytes::Bytes; use std::io::Cursor; use vector_core::event::EventMetadata; - use vrl::value::value; + use vrl::value; #[cfg(unix)] use futures::task::Poll; diff --git a/src/sources/file.rs b/src/sources/file.rs index 8c31338b30175..76bec5f106d9f 100644 --- a/src/sources/file.rs +++ b/src/sources/file.rs @@ -818,7 +818,7 @@ mod tests { sources::file, test_util::components::{assert_source_compliance, FILE_SOURCE_TAGS}, }; - use vrl::value::value; + use vrl::value; #[test] fn generate_config() { diff --git a/src/sources/file_descriptors/file_descriptor.rs b/src/sources/file_descriptors/file_descriptor.rs index 9a97591f0644c..0740674742e81 100644 --- a/src/sources/file_descriptors/file_descriptor.rs +++ b/src/sources/file_descriptors/file_descriptor.rs @@ -112,7 +112,7 @@ mod tests { SourceSender, }; use futures::StreamExt; - use vrl::value::value; + use vrl::value; #[test] fn generate_config() { diff --git a/src/sources/file_descriptors/stdin.rs b/src/sources/file_descriptors/stdin.rs index 3b6d5aecebbae..0f21db79ac4a3 100644 --- a/src/sources/file_descriptors/stdin.rs +++ b/src/sources/file_descriptors/stdin.rs @@ -117,7 +117,7 @@ mod tests { }; use futures::StreamExt; use lookup::path; - use vrl::value::value; + use vrl::value; #[test] fn generate_config() { diff --git a/src/sources/gcp_pubsub.rs b/src/sources/gcp_pubsub.rs index 10b72a11f302a..ffef638f2d3a6 100644 --- a/src/sources/gcp_pubsub.rs +++ b/src/sources/gcp_pubsub.rs @@ -841,7 +841,7 @@ mod integration_tests { use once_cell::sync::Lazy; use serde_json::{json, Value}; use tokio::time::{Duration, Instant}; - use vrl::value::btreemap; + use vrl::btreemap; use super::*; use crate::config::{ComponentKey, ProxyConfig}; diff --git a/src/sources/kafka.rs b/src/sources/kafka.rs index c9418baab5940..762c13f9c6cd9 100644 --- a/src/sources/kafka.rs +++ b/src/sources/kafka.rs @@ -913,7 +913,7 @@ mod integration_test { use tokio::time::sleep; use vector_buffers::topology::channel::BufferReceiver; use vector_core::event::EventStatus; - use vrl::value::value; + use vrl::value; use super::{test::*, *}; use crate::{ diff --git a/src/sources/kubernetes_logs/parser/cri.rs b/src/sources/kubernetes_logs/parser/cri.rs index e4d589b8b6727..ce8f6a9035c56 100644 --- a/src/sources/kubernetes_logs/parser/cri.rs +++ b/src/sources/kubernetes_logs/parser/cri.rs @@ -189,7 +189,7 @@ pub mod tests { use super::{super::test_util, *}; use crate::{event::LogEvent, test_util::trace_init}; - use vrl::value::value; + use vrl::value; fn make_long_string(base: &str, len: usize) -> String { base.chars().cycle().take(len).collect() diff --git a/src/sources/kubernetes_logs/parser/docker.rs b/src/sources/kubernetes_logs/parser/docker.rs index db25f52c63276..5b3c71201779c 100644 --- a/src/sources/kubernetes_logs/parser/docker.rs +++ b/src/sources/kubernetes_logs/parser/docker.rs @@ -207,7 +207,7 @@ enum NormalizationError { pub mod tests { use super::{super::test_util, *}; use crate::test_util::trace_init; - use vrl::value::value; + use vrl::value; fn make_long_string(base: &str, len: usize) -> String { base.chars().cycle().take(len).collect() diff --git a/src/sources/kubernetes_logs/parser/mod.rs b/src/sources/kubernetes_logs/parser/mod.rs index b8b5b4c6e9c7f..fe6f765477bd3 100644 --- a/src/sources/kubernetes_logs/parser/mod.rs +++ b/src/sources/kubernetes_logs/parser/mod.rs @@ -84,7 +84,7 @@ impl FunctionTransform for Parser { mod tests { use bytes::Bytes; use lookup::event_path; - use vrl::value::value; + use vrl::value; use super::*; use crate::{event::Event, event::LogEvent, test_util::trace_init}; diff --git a/src/sources/kubernetes_logs/parser/test_util.rs b/src/sources/kubernetes_logs/parser/test_util.rs index 745e849ad7d56..fb89b88d7ab65 100644 --- a/src/sources/kubernetes_logs/parser/test_util.rs +++ b/src/sources/kubernetes_logs/parser/test_util.rs @@ -4,7 +4,8 @@ use similar_asserts::assert_eq; use chrono::{DateTime, Utc}; use lookup::{event_path, metadata_path}; use vector_core::{config::LogNamespace, event}; -use vrl::value::{value, Value}; +use vrl::value; +use vrl::value::Value; use crate::{ event::{Event, LogEvent}, diff --git a/src/sources/opentelemetry/tests.rs b/src/sources/opentelemetry/tests.rs index a6085cff20769..798759fa1138d 100644 --- a/src/sources/opentelemetry/tests.rs +++ b/src/sources/opentelemetry/tests.rs @@ -12,7 +12,7 @@ use similar_asserts::assert_eq; use std::collections::BTreeMap; use tonic::Request; use vector_core::config::LogNamespace; -use vrl::value::value; +use vrl::value; use crate::{ config::{SourceConfig, SourceContext}, diff --git a/src/sources/redis/mod.rs b/src/sources/redis/mod.rs index 05723b6cc3d4a..c8ab0ed6de71e 100644 --- a/src/sources/redis/mod.rs +++ b/src/sources/redis/mod.rs @@ -321,7 +321,7 @@ mod integration_test { }, SourceSender, }; - use vrl::value::value; + use vrl::value; const REDIS_SERVER: &str = "redis://redis:6379/0"; diff --git a/src/sources/socket/mod.rs b/src/sources/socket/mod.rs index e0d212a59f9ee..93366629f3420 100644 --- a/src/sources/socket/mod.rs +++ b/src/sources/socket/mod.rs @@ -344,8 +344,9 @@ mod test { time::{timeout, Duration, Instant}, }; use vector_core::event::EventContainer; - use vrl::value::value; - use vrl::value::{btreemap, Value}; + use vrl::btreemap; + use vrl::value; + use vrl::value::Value; #[cfg(unix)] use { diff --git a/src/transforms/remap.rs b/src/transforms/remap.rs index 5107a32f3bf91..877a7dde048db 100644 --- a/src/transforms/remap.rs +++ b/src/transforms/remap.rs @@ -721,10 +721,8 @@ mod tests { use indoc::{formatdoc, indoc}; use vector_core::{config::GlobalOptions, event::EventMetadata, metric_tags}; - use vrl::value::{ - btreemap, - kind::{Collection, Index}, - }; + use vrl::btreemap; + use vrl::value::kind::{Collection, Index}; use super::*; use crate::{ From 670bdea00ab7a13921aa3194667068b27f58e35a Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Thu, 25 May 2023 14:26:55 +0100 Subject: [PATCH 041/236] chore(observability): set source fields to mean service (#17470) This adds the `service` meaning to the `appname` field from syslog, `app_name` from `heroku_logs`, `source` from `splunk_hec`. It also adds a new field to `demo_logs` called `service` and populates it with the value `vector`. The `datadog_agent` source already [handles this](https://github.com/vectordotdev/vector/blob/master/src/sources/datadog_agent/mod.rs#L225-L231). I can't think of any other sources that may specify a potential service field, but I am very open to suggestions! --------- Signed-off-by: Stephen Wakely --- lib/codecs/src/decoding/format/syslog.rs | 14 +++++++++++--- src/sources/datadog_agent/tests.rs | 12 ++++++++++-- src/sources/demo_logs.rs | 23 +++++++++++++++++++++-- src/sources/heroku_logs.rs | 10 +++++++--- src/sources/splunk_hec/mod.rs | 10 +++++++--- src/sources/syslog.rs | 4 ++-- 6 files changed, 58 insertions(+), 15 deletions(-) diff --git a/lib/codecs/src/decoding/format/syslog.rs b/lib/codecs/src/decoding/format/syslog.rs index 68824b0e923d5..d4aedefdc1d46 100644 --- a/lib/codecs/src/decoding/format/syslog.rs +++ b/lib/codecs/src/decoding/format/syslog.rs @@ -71,7 +71,11 @@ impl SyslogDeserializerConfig { ) .optional_field(&owned_value_path!("facility"), Kind::bytes(), None) .optional_field(&owned_value_path!("version"), Kind::integer(), None) - .optional_field(&owned_value_path!("appname"), Kind::bytes(), None) + .optional_field( + &owned_value_path!("appname"), + Kind::bytes(), + Some("service"), + ) .optional_field(&owned_value_path!("msgid"), Kind::bytes(), None) .optional_field( &owned_value_path!("procid"), @@ -112,7 +116,11 @@ impl SyslogDeserializerConfig { ) .optional_field(&owned_value_path!("facility"), Kind::bytes(), None) .optional_field(&owned_value_path!("version"), Kind::integer(), None) - .optional_field(&owned_value_path!("appname"), Kind::bytes(), None) + .optional_field( + &owned_value_path!("appname"), + Kind::bytes(), + Some("service"), + ) .optional_field(&owned_value_path!("msgid"), Kind::bytes(), None) .optional_field( &owned_value_path!("procid"), @@ -172,7 +180,7 @@ impl SyslogDeserializerConfig { None, &owned_value_path!("appname"), Kind::bytes().or_undefined(), - None, + Some("service"), ) .with_source_metadata( source, diff --git a/src/sources/datadog_agent/tests.rs b/src/sources/datadog_agent/tests.rs index 107f8f10aced1..4508082573d6c 100644 --- a/src/sources/datadog_agent/tests.rs +++ b/src/sources/datadog_agent/tests.rs @@ -1690,7 +1690,11 @@ fn test_config_outputs() { ) .optional_field(&owned_value_path!("facility"), Kind::bytes(), None) .optional_field(&owned_value_path!("version"), Kind::integer(), None) - .optional_field(&owned_value_path!("appname"), Kind::bytes(), None) + .optional_field( + &owned_value_path!("appname"), + Kind::bytes(), + Some("service"), + ) .optional_field(&owned_value_path!("msgid"), Kind::bytes(), None) .optional_field( &owned_value_path!("procid"), @@ -1766,7 +1770,11 @@ fn test_config_outputs() { Kind::integer(), None, ) - .optional_field(&owned_value_path!("appname"), Kind::bytes(), None) + .optional_field( + &owned_value_path!("appname"), + Kind::bytes(), + Some("service"), + ) .optional_field(&owned_value_path!("msgid"), Kind::bytes(), None) .optional_field( &owned_value_path!("procid"), diff --git a/src/sources/demo_logs.rs b/src/sources/demo_logs.rs index 9f0b6a0a098f7..d587a20cfc7b1 100644 --- a/src/sources/demo_logs.rs +++ b/src/sources/demo_logs.rs @@ -5,6 +5,7 @@ use codecs::{ }; use fakedata::logs::*; use futures::StreamExt; +use lookup::{owned_value_path, path}; use rand::seq::SliceRandom; use serde_with::serde_as; use snafu::Snafu; @@ -15,7 +16,11 @@ use vector_common::internal_event::{ ByteSize, BytesReceived, CountByteSize, InternalEventHandle as _, Protocol, }; use vector_config::configurable_component; -use vector_core::{config::LogNamespace, EstimatedJsonEncodedSizeOf}; +use vector_core::{ + config::{LegacyKey, LogNamespace}, + EstimatedJsonEncodedSizeOf, +}; +use vrl::value::Kind; use crate::{ codecs::{Decoder, DecodingConfig}, @@ -249,6 +254,13 @@ async fn demo_logs_source( DemoLogsConfig::NAME, now, ); + log_namespace.insert_source_metadata( + "service", + log, + Some(LegacyKey::InsertIfEmpty(path!("service"))), + path!("service"), + "vector", + ); event }); @@ -300,7 +312,14 @@ impl SourceConfig for DemoLogsConfig { let schema_definition = self .decoding .schema_definition(log_namespace) - .with_standard_vector_source_metadata(); + .with_standard_vector_source_metadata() + .with_source_metadata( + DemoLogsConfig::NAME, + Some(LegacyKey::InsertIfEmpty(owned_value_path!("service"))), + &owned_value_path!("service"), + Kind::bytes(), + Some("service"), + ); vec![SourceOutput::new_logs( self.decoding.output_type(), diff --git a/src/sources/heroku_logs.rs b/src/sources/heroku_logs.rs index a1e3d98df4fe6..2cdf7d7d44424 100644 --- a/src/sources/heroku_logs.rs +++ b/src/sources/heroku_logs.rs @@ -107,7 +107,7 @@ impl LogplexConfig { Some(LegacyKey::InsertIfEmpty(owned_value_path!("app_name"))), &owned_value_path!("app_name"), Kind::bytes(), - None, + Some("service"), ) .with_source_metadata( LogplexConfig::NAME, @@ -694,7 +694,7 @@ mod tests { .with_metadata_field( &owned_value_path!(LogplexConfig::NAME, "app_name"), Kind::bytes(), - None, + Some("service"), ) .with_metadata_field( &owned_value_path!(LogplexConfig::NAME, "proc_id"), @@ -731,7 +731,11 @@ mod tests { .with_event_field(&owned_value_path!("source_type"), Kind::bytes(), None) .with_event_field(&owned_value_path!("timestamp"), Kind::timestamp(), None) .with_event_field(&owned_value_path!("host"), Kind::bytes(), Some("host")) - .with_event_field(&owned_value_path!("app_name"), Kind::bytes(), None) + .with_event_field( + &owned_value_path!("app_name"), + Kind::bytes(), + Some("service"), + ) .with_event_field(&owned_value_path!("proc_id"), Kind::bytes(), None) .unknown_fields(Kind::bytes()); diff --git a/src/sources/splunk_hec/mod.rs b/src/sources/splunk_hec/mod.rs index 34808fac0c046..0aae6053ac402 100644 --- a/src/sources/splunk_hec/mod.rs +++ b/src/sources/splunk_hec/mod.rs @@ -226,7 +226,7 @@ impl SourceConfig for SplunkConfig { Some(LegacyKey::Overwrite(owned_value_path!(SOURCE))), &owned_value_path!("source"), Kind::bytes(), - None, + Some("service"), ) // Not to be confused with `source_type`. .with_source_metadata( @@ -2475,7 +2475,7 @@ mod tests { .with_metadata_field( &owned_value_path!("splunk_hec", "source"), Kind::bytes(), - None, + Some("service"), ) .with_metadata_field( &owned_value_path!("splunk_hec", "channel"), @@ -2519,7 +2519,11 @@ mod tests { .with_event_field(&owned_value_path!("source_type"), Kind::bytes(), None) .with_event_field(&owned_value_path!("splunk_channel"), Kind::bytes(), None) .with_event_field(&owned_value_path!("splunk_index"), Kind::bytes(), None) - .with_event_field(&owned_value_path!("splunk_source"), Kind::bytes(), None) + .with_event_field( + &owned_value_path!("splunk_source"), + Kind::bytes(), + Some("service"), + ) .with_event_field(&owned_value_path!("splunk_sourcetype"), Kind::bytes(), None) .with_event_field(&owned_value_path!("timestamp"), Kind::timestamp(), None); diff --git a/src/sources/syslog.rs b/src/sources/syslog.rs index 6bd7fd3c6df7d..63671c5595249 100644 --- a/src/sources/syslog.rs +++ b/src/sources/syslog.rs @@ -549,7 +549,7 @@ mod test { .with_metadata_field( &owned_value_path!("syslog", "appname"), Kind::bytes().or_undefined(), - None, + Some("service"), ) .with_metadata_field( &owned_value_path!("syslog", "msgid"), @@ -628,7 +628,7 @@ mod test { .with_event_field( &owned_value_path!("appname"), Kind::bytes().or_undefined(), - None, + Some("service"), ) .with_event_field( &owned_value_path!("msgid"), From 077a294d10412552e80c41429f23bd6a4f47724b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 14:13:59 +0000 Subject: [PATCH 042/236] chore(deps): Bump async-graphql from 5.0.8 to 5.0.9 (#17486) Bumps [async-graphql](https://github.com/async-graphql/async-graphql) from 5.0.8 to 5.0.9.
Changelog

Sourced from async-graphql's changelog.

[5.0.9] 2023-05-25

  • Prevent input check stack overflow #1293
  • Change batch requests to run concurrently #1290
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=async-graphql&package-manager=cargo&previous-version=5.0.8&new-version=5.0.9)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 155f906ab6ca9..733aec4c548e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -407,9 +407,9 @@ dependencies = [ [[package]] name = "async-graphql" -version = "5.0.8" +version = "5.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ae09afb01514b3dbd6328547b2b11fcbcb0205d9c5e6f2e17e60cb166a82d7f" +checksum = "364423936c4b828ac1615ce325e528c5afbe6e6995d799ee5683c7d36720dfa4" dependencies = [ "async-graphql-derive", "async-graphql-parser", @@ -438,9 +438,9 @@ dependencies = [ [[package]] name = "async-graphql-derive" -version = "5.0.8" +version = "5.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60ae62851dd3ff9a7550aee75e848e8834b75285b458753e98dd71d0733ad3f2" +checksum = "23a06320343bbe0a1f2e29ec6d1ed34e0460f10e6827b3154a78e4ccc039dbc4" dependencies = [ "Inflector", "async-graphql-parser", @@ -454,9 +454,9 @@ dependencies = [ [[package]] name = "async-graphql-parser" -version = "5.0.8" +version = "5.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6ee332acd99d2c50c3443beae46e9ed784c205eead9a668b7b5118b4a60a8b" +checksum = "46ce3b4b57e2a4630ea5e69eeb02fb5ee3c5f48754fcf7fd6a7bf3b4f96538f0" dependencies = [ "async-graphql-value", "pest", @@ -466,9 +466,9 @@ dependencies = [ [[package]] name = "async-graphql-value" -version = "5.0.8" +version = "5.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122da50452383410545b9428b579f4cda5616feb6aa0aff0003500c53fcff7b7" +checksum = "637c6b5a755133d47c9829df04b7a5e2f1856fe4c1101f581650c93198eba103" dependencies = [ "bytes 1.4.0", "indexmap", diff --git a/Cargo.toml b/Cargo.toml index 6e8c218a65ad4..1dc4bd8ad1af8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -210,7 +210,7 @@ smpl_jwt = { version = "0.7.1", default-features = false, optional = true } lapin = { version = "2.2.1", default-features = false, features = ["native-tls"], optional = true } # API -async-graphql = { version = "5.0.8", default-features = false, optional = true, features = ["chrono"] } +async-graphql = { version = "5.0.9", default-features = false, optional = true, features = ["chrono"] } async-graphql-warp = { version = "5.0.8", default-features = false, optional = true } itertools = { version = "0.10.5", default-features = false, optional = true } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index f8cf61f0b3b72..0ddcb35a016b6 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" publish = false [dependencies] -async-graphql = { version = "5.0.8", default-features = false, features = ["playground" ], optional = true } +async-graphql = { version = "5.0.9", default-features = false, features = ["playground" ], optional = true } async-trait = { version = "0.1", default-features = false } bitmask-enum = { version = "2.1.0", default-features = false } bytes = { version = "1.4.0", default-features = false, features = ["serde"] } From 79f7dfb4d4633badf8ee89f0e940fa44f5bd59aa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 14:14:38 +0000 Subject: [PATCH 043/236] chore(deps): bump memmap2 from 0.6.1 to 0.6.2 (#17482) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [memmap2](https://github.com/RazrFalcon/memmap2-rs) from 0.6.1 to 0.6.2.
Changelog

Sourced from memmap2's changelog.

[0.6.2] - 2023-05-24

Fixed

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=memmap2&package-manager=cargo&previous-version=0.6.1&new-version=0.6.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- lib/vector-buffers/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 733aec4c548e0..e76c30b8bdef5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4878,9 +4878,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memmap2" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0aa1b505aeecb0adb017db2b6a79a17a38e64f882a201f05e9de8a982cd6096" +checksum = "6d28bba84adfe6646737845bc5ebbfa2c08424eb1c37e94a1fd2a82adb56a872" dependencies = [ "libc", ] diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index f105b82a47401..8fde815aa32de 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -16,7 +16,7 @@ crossbeam-queue = { version = "0.3.8", default-features = false, features = ["st crossbeam-utils = { version = "0.8.15", default-features = false } fslock = { version = "0.2.1", default-features = false, features = ["std"] } futures = { version = "0.3.28", default-features = false, features = ["std"] } -memmap2 = { version = "0.6.1", default-features = false } +memmap2 = { version = "0.6.2", default-features = false } metrics = "0.21.0" num-traits = { version = "0.2.15", default-features = false } pin-project = { version = "1.1.0", default-features = false } From 84f0adac7a8e6306e12eaf13dc8c28f23e33f867 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 14:15:58 +0000 Subject: [PATCH 044/236] chore(deps): bump criterion from 0.4.0 to 0.5.0 (#17477) Bumps [criterion](https://github.com/bheisler/criterion.rs) from 0.4.0 to 0.5.0.
Changelog

Sourced from criterion's changelog.

[0.5.0] - 2023-05-23

Changed

  • Replaced lazy_static dependency with once_cell
  • Improved documentation of the html_reports feature
  • Replaced atty dependency with is-terminal
  • MSRV bumped to 1.64
  • Upgraded clap dependency to v4
  • Upgraded tempfile dependency to v3.5.0

Fixed

  • Quick mode (--quick) no longer outputs 1ms for measured times over 5 seconds
  • Documentation updates
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=criterion&package-manager=cargo&previous-version=0.4.0&new-version=0.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 47 ++++++----------------------------- Cargo.toml | 2 +- lib/dnsmsg-parser/Cargo.toml | 2 +- lib/file-source/Cargo.toml | 2 +- lib/tracing-limit/Cargo.toml | 2 +- lib/vector-buffers/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- 7 files changed, 13 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e76c30b8bdef5..518c12689d6b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1919,23 +1919,11 @@ dependencies = [ "atty", "bitflags", "strsim 0.8.0", - "textwrap 0.11.0", + "textwrap", "unicode-width", "vec_map", ] -[[package]] -name = "clap" -version = "3.2.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" -dependencies = [ - "bitflags", - "clap_lex 0.2.4", - "indexmap", - "textwrap 0.16.0", -] - [[package]] name = "clap" version = "4.1.14" @@ -1964,7 +1952,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "351f9ad9688141ed83dfd8f5fb998a06225ef444b48ff4dc43de6d409b7fd10b" dependencies = [ "bitflags", - "clap_lex 0.4.1", + "clap_lex", "is-terminal", "strsim 0.10.0", "termcolor", @@ -1992,15 +1980,6 @@ dependencies = [ "syn 2.0.10", ] -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - [[package]] name = "clap_lex" version = "0.4.1" @@ -2277,20 +2256,20 @@ dependencies = [ [[package]] name = "criterion" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" +checksum = "9f9c16c823fba76d9643cc387e9677d9771abe0827561381815215c47f808da9" dependencies = [ "anes", - "atty", "cast", "ciborium", - "clap 3.2.23", + "clap 4.1.14", "criterion-plot", "futures 0.3.28", + "is-terminal", "itertools", - "lazy_static", "num-traits", + "once_cell", "oorandom", "plotters", "rayon", @@ -5717,12 +5696,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "os_str_bytes" -version = "6.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" - [[package]] name = "outref" version = "0.5.1" @@ -8121,12 +8094,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "textwrap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" - [[package]] name = "thiserror" version = "1.0.40" diff --git a/Cargo.toml b/Cargo.toml index 1dc4bd8ad1af8..cd2ff5e09dc8e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -344,7 +344,7 @@ azure_identity = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev azure_storage_blobs = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["azurite_workaround"] } azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["azurite_workaround"] } base64 = "0.21.1" -criterion = { version = "0.4.0", features = ["html_reports", "async_tokio"] } +criterion = { version = "0.5.0", features = ["html_reports", "async_tokio"] } itertools = { version = "0.10.5", default-features = false } libc = "0.2.144" similar-asserts = "1.4.2" diff --git a/lib/dnsmsg-parser/Cargo.toml b/lib/dnsmsg-parser/Cargo.toml index bf62684808c3a..e6ba28689e1aa 100644 --- a/lib/dnsmsg-parser/Cargo.toml +++ b/lib/dnsmsg-parser/Cargo.toml @@ -12,7 +12,7 @@ thiserror = "1.0" trust-dns-proto = { version = "0.22", features = ["dnssec"] } [dev-dependencies] -criterion = "0.4" +criterion = "0.5" [lib] bench = false diff --git a/lib/file-source/Cargo.toml b/lib/file-source/Cargo.toml index 9a2329b8a9e18..3057ea20e726a 100644 --- a/lib/file-source/Cargo.toml +++ b/lib/file-source/Cargo.toml @@ -74,7 +74,7 @@ default-features = false features = ["full"] [dev-dependencies] -criterion = "0.4" +criterion = "0.5" quickcheck = "1" tempfile = "3.5.0" similar-asserts = "1.4.2" diff --git a/lib/tracing-limit/Cargo.toml b/lib/tracing-limit/Cargo.toml index ab5c634ff7ce3..48c0bf0762a98 100644 --- a/lib/tracing-limit/Cargo.toml +++ b/lib/tracing-limit/Cargo.toml @@ -12,7 +12,7 @@ tracing-subscriber = { version = "0.3", default-features = false, features = ["r dashmap = { version = "5.2.0", default-features = false } [dev-dependencies] -criterion = "0.4" +criterion = "0.5" tracing = "0.1.34" mock_instant = { version = "0.3" } tracing-subscriber = { version = "0.3.17", default-features = false, features = ["env-filter", "fmt"] } diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index 8fde815aa32de..b454916b3f65d 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -33,7 +33,7 @@ vector-common = { path = "../vector-common", default-features = false, features [dev-dependencies] clap = "4.1.14" -criterion = { version = "0.4", features = ["html_reports", "async_tokio"] } +criterion = { version = "0.5", features = ["html_reports", "async_tokio"] } crossbeam-queue = "0.3.8" hdrhistogram = "7.5.2" metrics-tracing-context = { version = "0.14.0", default-features = false } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 0ddcb35a016b6..1187e0a415449 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -79,7 +79,7 @@ prost-build = "0.11" [dev-dependencies] base64 = "0.21.1" chrono-tz = { version = "0.8.2", default-features = false } -criterion = { version = "0.4.0", features = ["html_reports"] } +criterion = { version = "0.5.0", features = ["html_reports"] } env-test-util = "1.0.1" quickcheck = "1" quickcheck_macros = "1" From 7699f4ded19e520258adddd4c628a7a309c52c4e Mon Sep 17 00:00:00 2001 From: neuronull Date: Thu, 25 May 2023 11:33:59 -0600 Subject: [PATCH 045/236] chore(ci): update comment_trigger note about concurrency groups (#17491) --- .github/workflows/comment-trigger.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/comment-trigger.yml b/.github/workflows/comment-trigger.yml index cdd5f65adf990..8d3128ba14300 100644 --- a/.github/workflows/comment-trigger.yml +++ b/.github/workflows/comment-trigger.yml @@ -34,10 +34,12 @@ env: # can be removed when we switch back to the upstream openssl-sys crate CARGO_NET_GIT_FETCH_WITH_CLI: true -# TODO: temporarily disabling concurrency groups at request of GitHub Support, to see if it resolves -# the issue we are having. +# The below concurrency group settings would let us cancel in progress runs that were triggered with the +# same comment on a given PR, which could save on time consuming runs. +# But GH does not currently support the github.event.comment.body as part of the concurrency name, this +# appears to be due to the potential length of it. #concurrency: -# group: ${{ github.workflow }}-${{ github.event.issue_comment.issue.id }}-${{ github.event.comment.body }} +# group: ${{ github.workflow }}-${{ github.event.issue.id }}-${{ github.event.comment.body }} # cancel-in-progress: true jobs: From ac81fc1318b229e2b9c6bbcd080af7438afde85a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 18:18:38 +0000 Subject: [PATCH 046/236] chore(deps): Bump async-graphql-warp from 5.0.8 to 5.0.9 (#17489) Bumps [async-graphql-warp](https://github.com/async-graphql/async-graphql) from 5.0.8 to 5.0.9.
Changelog

Sourced from async-graphql-warp's changelog.

[5.0.9] 2023-05-25

  • Prevent input check stack overflow #1293
  • Change batch requests to run concurrently #1290
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=async-graphql-warp&package-manager=cargo&previous-version=5.0.8&new-version=5.0.9)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 518c12689d6b6..d67aefb341520 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -478,9 +478,9 @@ dependencies = [ [[package]] name = "async-graphql-warp" -version = "5.0.8" +version = "5.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a2e1f7023d0074be87cd9ae83e3d6e9c6854bd7544026fdfc46c568e76c021" +checksum = "fae3d1991cb75a984eb6787b84dd7ebb362a696d4239bb59abbc5a015a01724c" dependencies = [ "async-graphql", "futures-util", diff --git a/Cargo.toml b/Cargo.toml index cd2ff5e09dc8e..8af29a1b3ad7b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -211,7 +211,7 @@ lapin = { version = "2.2.1", default-features = false, features = ["native-tls"] # API async-graphql = { version = "5.0.9", default-features = false, optional = true, features = ["chrono"] } -async-graphql-warp = { version = "5.0.8", default-features = false, optional = true } +async-graphql-warp = { version = "5.0.9", default-features = false, optional = true } itertools = { version = "0.10.5", default-features = false, optional = true } # API client From b28d915cb6a48da836bb4736c027f1ca5d623fe2 Mon Sep 17 00:00:00 2001 From: Nathan Fox Date: Thu, 25 May 2023 15:31:23 -0400 Subject: [PATCH 047/236] chore: remove custom async sleep impl (#17493) While attempting to update `aws-smithy-async` (https://github.com/vectordotdev/vector/pull/17472) I noticed the SDK now provides an implementation of `AsyncSleep` for tokio, so our custom implementation is no longer needed. --- src/aws/mod.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/src/aws/mod.rs b/src/aws/mod.rs index 0c3c53e53c1fd..fcd2c1c05f117 100644 --- a/src/aws/mod.rs +++ b/src/aws/mod.rs @@ -7,13 +7,13 @@ use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::task::{Context, Poll}; -use std::time::{Duration, SystemTime}; +use std::time::SystemTime; pub use auth::{AwsAuthentication, ImdsAuthentication}; use aws_config::meta::region::ProvideRegion; use aws_sigv4::http_request::{SignableRequest, SigningSettings}; use aws_sigv4::SigningParams; -use aws_smithy_async::rt::sleep::{AsyncSleep, Sleep}; +use aws_smithy_async::rt::sleep::TokioSleep; use aws_smithy_client::bounds::SmithyMiddleware; use aws_smithy_client::erase::{DynConnector, DynMiddleware}; use aws_smithy_client::{Builder, SdkError}; @@ -114,7 +114,7 @@ pub async fn create_smithy_client( let mut client_builder = Builder::new() .connector(connector) .middleware(middleware) - .sleep_impl(Arc::new(TokioSleep)); + .sleep_impl(Arc::new(TokioSleep::new())); client_builder.set_retry_config(Some(retry_config.into())); Ok(client_builder.build()) @@ -190,15 +190,6 @@ pub async fn sign_request( Ok(()) } -#[derive(Debug)] -pub struct TokioSleep; - -impl AsyncSleep for TokioSleep { - fn sleep(&self, duration: Duration) -> Sleep { - Sleep::new(tokio::time::sleep(duration)) - } -} - /// Layer for capturing the payload size for AWS API client requests and emitting internal telemetry. #[derive(Clone)] struct CaptureRequestSize { From 2a76cac4d327eac537996d3409a64633c96f5ac8 Mon Sep 17 00:00:00 2001 From: Toby Lawrence Date: Thu, 25 May 2023 16:00:07 -0400 Subject: [PATCH 048/236] chore(statsd sink): refactor `statsd` sink to stream-based style (#16199) This PR completely refactors the `statsd` sink in the "new style", which uses stream-based combinators to build a `Stream` implementation that drives component behavior. At a high-level, the PR is indeed for refactoring the sink, but ultimately includes as much, if not more, refactoring work around establishing reusable `Service`-based primitives for building other sinks like `statsd` i.e. `socket` or `syslog`. ## Reviewer Notes I've mostly copied the existing shared socket sink types -- `TcpSinkConfig`, etc -- and existing socket services -- `UdpService` -- and created consistent versions of them for TCP, UDP, and Unix Domain sockets. This includes a configuration type that is `Configurable`-compatible for all of them, with socket-specific configurations[1] and then methods for generating both the `Service` implementation and a `Healthcheck` implementation. Ultimately, this should form the basis of other sink refactors that use sockets directly (`socket`, `syslog`, etc) but it may be desirable to do some more trait-ifying to avoid some of the necessary boilerplate introduced here. ## Remaining Work - [x] fix normalizer unit tests + add one for sketches --- .github/actions/spelling/allow.txt | 1 + lib/vector-common/src/internal_event/mod.rs | 6 + src/internal_events/mod.rs | 13 +- src/internal_events/socket.rs | 2 +- src/internal_events/statsd_sink.rs | 2 +- src/internal_events/unix.rs | 28 + src/lib.rs | 2 +- src/net.rs | 52 ++ src/sinks/statsd.rs | 633 ------------------ src/sinks/statsd/batch.rs | 27 + src/sinks/statsd/config.rs | 163 +++++ src/sinks/statsd/encoder.rs | 374 +++++++++++ src/sinks/statsd/mod.rs | 12 + src/sinks/statsd/normalizer.rs | 454 +++++++++++++ src/sinks/statsd/request_builder.rs | 155 +++++ src/sinks/statsd/service.rs | 106 +++ src/sinks/statsd/sink.rs | 98 +++ src/sinks/statsd/tests.rs | 100 +++ src/sinks/util/builder.rs | 42 +- src/sinks/util/metadata.rs | 12 +- src/sinks/util/service.rs | 1 + src/sinks/util/service/net/mod.rs | 367 ++++++++++ src/sinks/util/service/net/tcp.rs | 101 +++ src/sinks/util/service/net/udp.rs | 83 +++ src/sinks/util/service/net/unix.rs | 134 ++++ src/sinks/util/udp.rs | 108 +-- src/sources/socket/udp.rs | 5 +- src/sources/statsd/mod.rs | 5 +- src/sources/syslog.rs | 5 +- src/udp.rs | 15 - .../components/sinks/base/statsd.cue | 26 +- 31 files changed, 2346 insertions(+), 786 deletions(-) create mode 100644 src/net.rs delete mode 100644 src/sinks/statsd.rs create mode 100644 src/sinks/statsd/batch.rs create mode 100644 src/sinks/statsd/config.rs create mode 100644 src/sinks/statsd/encoder.rs create mode 100644 src/sinks/statsd/mod.rs create mode 100644 src/sinks/statsd/normalizer.rs create mode 100644 src/sinks/statsd/request_builder.rs create mode 100644 src/sinks/statsd/service.rs create mode 100644 src/sinks/statsd/sink.rs create mode 100644 src/sinks/statsd/tests.rs create mode 100644 src/sinks/util/service/net/mod.rs create mode 100644 src/sinks/util/service/net/tcp.rs create mode 100644 src/sinks/util/service/net/udp.rs create mode 100644 src/sinks/util/service/net/unix.rs delete mode 100644 src/udp.rs diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 7b0334fa0e905..c3a73ef09667c 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -52,6 +52,7 @@ Enot Evercoss Explay FAQs +FQDNs Fabro Figma Flipboard diff --git a/lib/vector-common/src/internal_event/mod.rs b/lib/vector-common/src/internal_event/mod.rs index dcf0c42114fe7..37d0dcfc634d9 100644 --- a/lib/vector-common/src/internal_event/mod.rs +++ b/lib/vector-common/src/internal_event/mod.rs @@ -131,6 +131,12 @@ impl From<&'static str> for Protocol { } } +impl From for SharedString { + fn from(value: Protocol) -> Self { + value.0 + } +} + /// Macro to take care of some of the repetitive boilerplate in implementing a registered event. See /// the other events in this module for examples of how to use this. /// diff --git a/src/internal_events/mod.rs b/src/internal_events/mod.rs index e8ce5bad9e4c9..f357cbb0f469b 100644 --- a/src/internal_events/mod.rs +++ b/src/internal_events/mod.rs @@ -253,18 +253,7 @@ pub(crate) use self::statsd_sink::*; pub(crate) use self::tag_cardinality_limit::*; #[cfg(feature = "transforms-throttle")] pub(crate) use self::throttle::*; -#[cfg(all( - any( - feature = "sinks-socket", - feature = "sinks-statsd", - feature = "sources-dnstap", - feature = "sources-metrics", - feature = "sources-statsd", - feature = "sources-syslog", - feature = "sources-socket" - ), - unix -))] +#[cfg(unix)] pub(crate) use self::unix::*; #[cfg(feature = "sinks-websocket")] pub(crate) use self::websocket::*; diff --git a/src/internal_events/socket.rs b/src/internal_events/socket.rs index 468701a1fa194..28ac92270f3d6 100644 --- a/src/internal_events/socket.rs +++ b/src/internal_events/socket.rs @@ -13,7 +13,7 @@ pub enum SocketMode { } impl SocketMode { - const fn as_str(self) -> &'static str { + pub const fn as_str(self) -> &'static str { match self { Self::Tcp => "tcp", Self::Udp => "udp", diff --git a/src/internal_events/statsd_sink.rs b/src/internal_events/statsd_sink.rs index 248cddf8cfd5f..27e33fd0aa89b 100644 --- a/src/internal_events/statsd_sink.rs +++ b/src/internal_events/statsd_sink.rs @@ -10,7 +10,7 @@ use vector_common::internal_event::{ #[derive(Debug)] pub struct StatsdInvalidMetricError<'a> { pub value: &'a MetricValue, - pub kind: &'a MetricKind, + pub kind: MetricKind, } impl<'a> InternalEvent for StatsdInvalidMetricError<'a> { diff --git a/src/internal_events/unix.rs b/src/internal_events/unix.rs index a74c2b7e2c12b..2f004ec38b826 100644 --- a/src/internal_events/unix.rs +++ b/src/internal_events/unix.rs @@ -90,6 +90,34 @@ impl InternalEvent for UnixSocketSendError<'_, E> { } } +#[derive(Debug)] +pub struct UnixSendIncompleteError { + pub data_size: usize, + pub sent: usize, +} + +impl InternalEvent for UnixSendIncompleteError { + fn emit(self) { + let reason = "Could not send all data in one Unix datagram."; + error!( + message = reason, + data_size = self.data_size, + sent = self.sent, + dropped = self.data_size - self.sent, + error_type = error_type::WRITER_FAILED, + stage = error_stage::SENDING, + internal_log_rate_limit = true, + ); + counter!( + "component_errors_total", 1, + "error_type" => error_type::WRITER_FAILED, + "stage" => error_stage::SENDING, + ); + + emit!(ComponentEventsDropped:: { count: 1, reason }); + } +} + #[derive(Debug)] pub struct UnixSocketFileDeleteError<'a> { pub path: &'a Path, diff --git a/src/lib.rs b/src/lib.rs index 910c244dd4ade..37d71f87922ec 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -83,6 +83,7 @@ pub mod line_agg; pub mod list; #[cfg(any(feature = "sources-nats", feature = "sinks-nats"))] pub(crate) mod nats; +pub mod net; #[allow(unreachable_pub)] pub(crate) mod proto; pub mod providers; @@ -112,7 +113,6 @@ pub mod trace; #[allow(unreachable_pub)] pub mod transforms; pub mod types; -pub mod udp; pub mod unit_test; pub(crate) mod utilization; pub mod validate; diff --git a/src/net.rs b/src/net.rs new file mode 100644 index 0000000000000..f62abfa32974a --- /dev/null +++ b/src/net.rs @@ -0,0 +1,52 @@ +//! Networking-related helper functions. + +use std::{io, time::Duration}; + +use socket2::{SockRef, TcpKeepalive}; +use tokio::net::TcpStream; + +/// Sets the receive buffer size for a socket. +/// +/// This is the equivalent of setting the `SO_RCVBUF` socket setting directly. +/// +/// # Errors +/// +/// If there is an error setting the receive buffer size on the given socket, or if the value given +/// as the socket is not a valid socket, an error variant will be returned explaining the underlying +/// I/O error. +pub fn set_receive_buffer_size<'s, S>(socket: &'s S, size: usize) -> io::Result<()> +where + SockRef<'s>: From<&'s S>, +{ + SockRef::from(socket).set_recv_buffer_size(size) +} + +/// Sets the send buffer size for a socket. +/// +/// This is the equivalent of setting the `SO_SNDBUF` socket setting directly. +/// +/// # Errors +/// +/// If there is an error setting the send buffer size on the given socket, or if the value given +/// as the socket is not a valid socket, an error variant will be returned explaining the underlying +/// I/O error. +pub fn set_send_buffer_size<'s, S>(socket: &'s S, size: usize) -> io::Result<()> +where + SockRef<'s>: From<&'s S>, +{ + SockRef::from(socket).set_send_buffer_size(size) +} + +/// Sets the TCP keepalive behavior on a socket. +/// +/// This is the equivalent of setting the `SO_KEEPALIVE` and `TCP_KEEPALIVE` socket settings +/// directly. +/// +/// # Errors +/// +/// If there is an error with either enabling keepalive probes or setting the TCP keepalive idle +/// timeout on the given socket, an error variant will be returned explaining the underlying I/O +/// error. +pub fn set_keepalive(socket: &TcpStream, ttl: Duration) -> io::Result<()> { + SockRef::from(socket).set_tcp_keepalive(&TcpKeepalive::new().with_time(ttl)) +} diff --git a/src/sinks/statsd.rs b/src/sinks/statsd.rs deleted file mode 100644 index 95978bd54ec88..0000000000000 --- a/src/sinks/statsd.rs +++ /dev/null @@ -1,633 +0,0 @@ -use std::{ - fmt::Display, - net::{IpAddr, Ipv4Addr, SocketAddr}, - task::{Context, Poll}, -}; - -use bytes::{BufMut, BytesMut}; -use futures::{future, stream, SinkExt, TryFutureExt}; -use futures_util::FutureExt; -use tokio_util::codec::Encoder; -use tower::{Service, ServiceBuilder}; - -use vector_config::configurable_component; -use vector_core::ByteSizeOf; - -#[cfg(unix)] -use crate::sinks::util::unix::UnixSinkConfig; -use crate::{ - config::{AcknowledgementsConfig, GenerateConfig, Input, SinkConfig, SinkContext}, - event::{ - metric::{Metric, MetricKind, MetricTags, MetricValue, StatisticKind}, - Event, - }, - internal_events::StatsdInvalidMetricError, - sinks::util::{ - buffer::metrics::compress_distribution, - encode_namespace, - tcp::TcpSinkConfig, - udp::{UdpService, UdpSinkConfig}, - BatchConfig, BatchSink, Buffer, Compression, EncodedEvent, - }, -}; - -use super::util::SinkBatchSettings; - -pub struct StatsdSvc { - inner: UdpService, -} - -/// Configuration for the `statsd` sink. -#[configurable_component(sink("statsd"))] -#[derive(Clone, Debug)] -pub struct StatsdSinkConfig { - /// Sets the default namespace for any metrics sent. - /// - /// This namespace is only used if a metric has no existing namespace. When a namespace is - /// present, it is used as a prefix to the metric name, and separated with a period (`.`). - #[serde(alias = "namespace")] - #[configurable(metadata(docs::examples = "service"))] - pub default_namespace: Option, - - #[serde(flatten)] - pub mode: Mode, - - #[configurable(derived)] - #[serde( - default, - deserialize_with = "crate::serde::bool_or_struct", - skip_serializing_if = "crate::serde::skip_serializing_if_default" - )] - pub acknowledgements: AcknowledgementsConfig, -} - -/// Socket mode. -#[configurable_component] -#[derive(Clone, Debug)] -#[serde(tag = "mode", rename_all = "snake_case")] -#[configurable(metadata(docs::enum_tag_description = "The type of socket to use."))] -pub enum Mode { - /// Send over TCP. - Tcp(TcpSinkConfig), - - /// Send over UDP. - Udp(StatsdUdpConfig), - - /// Send over a Unix domain socket (UDS). - #[cfg(unix)] - Unix(UnixSinkConfig), -} - -#[derive(Clone, Copy, Debug, Default)] -pub struct StatsdDefaultBatchSettings; - -impl SinkBatchSettings for StatsdDefaultBatchSettings { - const MAX_EVENTS: Option = Some(1000); - const MAX_BYTES: Option = Some(1300); - const TIMEOUT_SECS: f64 = 1.0; -} - -/// UDP configuration. -#[configurable_component] -#[derive(Clone, Debug)] -pub struct StatsdUdpConfig { - #[serde(flatten)] - pub udp: UdpSinkConfig, - - #[configurable(derived)] - #[serde(default)] - pub batch: BatchConfig, -} - -fn default_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8125) -} - -impl GenerateConfig for StatsdSinkConfig { - fn generate_config() -> toml::Value { - toml::Value::try_from(Self { - default_namespace: None, - mode: Mode::Udp(StatsdUdpConfig { - batch: Default::default(), - udp: UdpSinkConfig::from_address(default_address().to_string()), - }), - acknowledgements: Default::default(), - }) - .unwrap() - } -} - -#[async_trait::async_trait] -impl SinkConfig for StatsdSinkConfig { - async fn build( - &self, - _cx: SinkContext, - ) -> crate::Result<(super::VectorSink, super::Healthcheck)> { - let default_namespace = self.default_namespace.clone(); - let mut encoder = StatsdEncoder { default_namespace }; - match &self.mode { - Mode::Tcp(config) => config.build(Default::default(), encoder), - Mode::Udp(config) => { - // 1432 bytes is a recommended packet size to fit into MTU - // https://github.com/statsd/statsd/blob/master/docs/metric_types.md#multi-metric-packets - // However we need to leave some space for +1 extra trailing event in the buffer. - // Also one might keep an eye on server side limitations, like - // mentioned here https://github.com/DataDog/dd-agent/issues/2638 - let batch = config.batch.into_batch_settings()?; - let (service, healthcheck) = config.udp.build_service()?; - let service = StatsdSvc { inner: service }; - let sink = BatchSink::new( - ServiceBuilder::new().service(service), - Buffer::new(batch.size, Compression::None), - batch.timeout, - ) - .sink_map_err(|error| error!(message = "Fatal statsd sink error.", %error)) - .with_flat_map(move |event: Event| { - stream::iter({ - let byte_size = event.size_of(); - let mut bytes = BytesMut::new(); - - // Errors are handled by `Encoder`. - encoder - .encode(event, &mut bytes) - .map(|_| Ok(EncodedEvent::new(bytes, byte_size))) - }) - }); - - Ok((super::VectorSink::from_event_sink(sink), healthcheck)) - } - #[cfg(unix)] - Mode::Unix(config) => config.build(Default::default(), encoder), - } - } - - fn input(&self) -> Input { - Input::metric() - } - - fn acknowledgements(&self) -> &AcknowledgementsConfig { - &self.acknowledgements - } -} - -// Note that if multi-valued tags are present, this encoding may change the order from the input -// event, since the tags with multiple values may not have been grouped together. -// This is not an issue, but noting as it may be an observed behavior. -fn encode_tags(tags: &MetricTags) -> String { - let parts: Vec<_> = tags - .iter_all() - .map(|(name, tag_value)| match tag_value { - Some(value) => format!("{}:{}", name, value), - None => name.to_owned(), - }) - .collect(); - - // `parts` is already sorted by key because of BTreeMap - parts.join(",") -} - -fn push_event( - buf: &mut Vec, - metric: &Metric, - val: V, - metric_type: &str, - sample_rate: Option, -) { - buf.push(format!("{}:{}|{}", metric.name(), val, metric_type)); - - if let Some(sample_rate) = sample_rate { - if sample_rate != 1 { - buf.push(format!("@{}", 1.0 / f64::from(sample_rate))) - } - }; - - if let Some(t) = metric.tags() { - buf.push(format!("#{}", encode_tags(t))); - }; -} - -#[derive(Debug, Clone)] -struct StatsdEncoder { - default_namespace: Option, -} - -impl Encoder for StatsdEncoder { - type Error = codecs::encoding::Error; - - fn encode(&mut self, event: Event, bytes: &mut BytesMut) -> Result<(), Self::Error> { - let mut buf = Vec::new(); - - let metric = event.as_metric(); - match metric.value() { - MetricValue::Counter { value } => { - push_event(&mut buf, metric, value, "c", None); - } - MetricValue::Gauge { value } => { - match metric.kind() { - MetricKind::Incremental => { - push_event(&mut buf, metric, format!("{:+}", value), "g", None) - } - MetricKind::Absolute => push_event(&mut buf, metric, value, "g", None), - }; - } - MetricValue::Distribution { samples, statistic } => { - let metric_type = match statistic { - StatisticKind::Histogram => "h", - StatisticKind::Summary => "d", - }; - - // TODO: This would actually be good to potentially add a helper combinator for, in the same vein as - // `SinkBuilderExt::normalized`, that provides a metric "optimizer" for doing these sorts of things. We - // don't actually compress distributions as-is in other metrics sinks unless they use the old-style - // approach coupled with `MetricBuffer`. While not every sink would benefit from this -- the - // `datadog_metrics` sink always converts distributions to sketches anyways, for example -- a lot of - // them could. - // - // This would also imply rewriting this sink in the new style to take advantage of it. - let mut samples = samples.clone(); - let compressed_samples = compress_distribution(&mut samples); - let mut temp_buf = Vec::new(); - for sample in compressed_samples { - push_event( - &mut temp_buf, - metric, - sample.value, - metric_type, - Some(sample.rate), - ); - let msg = encode_namespace( - metric.namespace().or(self.default_namespace.as_deref()), - '.', - temp_buf.join("|"), - ); - buf.push(msg); - temp_buf.clear() - } - } - MetricValue::Set { values } => { - for val in values { - push_event(&mut buf, metric, val, "s", None); - } - } - _ => { - emit!(StatsdInvalidMetricError { - value: metric.value(), - kind: &metric.kind(), - }); - - return Ok(()); - } - }; - - // TODO: this properly encodes aggregate histograms, but it does not handle sketches. There - // are complications with applying this to sketches, as it is required to extract the - // buckets and unpack the sketch in order to get the real values for distribution samples. - // Tracked in #11661. - let msg: String = match metric.value() { - MetricValue::Distribution { .. } => buf.join("\n"), - _ => encode_namespace( - metric.namespace().or(self.default_namespace.as_deref()), - '.', - buf.join("|"), - ), - }; - - bytes.put_slice(&msg.into_bytes()); - bytes.put_u8(b'\n'); - - Ok(()) - } -} - -impl Service for StatsdSvc { - type Response = (); - type Error = crate::Error; - type Future = future::BoxFuture<'static, Result<(), Self::Error>>; - - // Emission of Error internal event is handled upstream by the caller - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_ready(cx).map_err(Into::into) - } - - // Emission of Error internal event is handled upstream by the caller - fn call(&mut self, frame: BytesMut) -> Self::Future { - self.inner.call(frame).err_into().boxed() - } -} - -#[cfg(test)] -mod test { - use bytes::Bytes; - use futures::{channel::mpsc, StreamExt, TryStreamExt}; - use tokio::net::UdpSocket; - use tokio_util::{codec::BytesCodec, udp::UdpFramed}; - use vector_core::{event::metric::TagValue, metric_tags}; - #[cfg(feature = "sources-statsd")] - use {crate::sources::statsd::parser::parse, std::str::from_utf8}; - - use super::*; - use crate::{ - event::Metric, - test_util::{ - components::{assert_sink_compliance, SINK_TAGS}, - *, - }, - }; - - #[test] - fn generate_config() { - crate::test_util::test_generate_config::(); - } - - fn tags() -> MetricTags { - metric_tags!( - "normal_tag" => "value", - "multi_value" => "true", - "multi_value" => "false", - "multi_value" => TagValue::Bare, - "bare_tag" => TagValue::Bare, - ) - } - - #[test] - fn test_encode_tags() { - let actual = encode_tags(&tags()); - let mut actual = actual.split(',').collect::>(); - actual.sort(); - - let mut expected = - "bare_tag,normal_tag:value,multi_value:true,multi_value:false,multi_value" - .split(',') - .collect::>(); - expected.sort(); - - assert_eq!(actual, expected); - } - - #[test] - fn tags_order() { - assert_eq!( - &encode_tags( - &vec![ - ("a", "value"), - ("b", "value"), - ("c", "value"), - ("d", "value"), - ("e", "value"), - ] - .into_iter() - .map(|(k, v)| (k.to_owned(), v.to_owned())) - .collect() - ), - "a:value,b:value,c:value,d:value,e:value" - ); - } - - #[cfg(feature = "sources-statsd")] - #[test] - fn test_encode_counter() { - let metric1 = Metric::new( - "counter", - MetricKind::Incremental, - MetricValue::Counter { value: 1.5 }, - ) - .with_tags(Some(tags())); - let event = Event::Metric(metric1.clone()); - let mut encoder = StatsdEncoder { - default_namespace: None, - }; - let mut frame = BytesMut::new(); - encoder.encode(event, &mut frame).unwrap(); - let metric2 = parse(from_utf8(&frame).unwrap().trim()).unwrap(); - vector_common::assert_event_data_eq!(metric1, metric2); - } - - #[cfg(feature = "sources-statsd")] - #[test] - fn test_encode_absolute_counter() { - let metric1 = Metric::new( - "counter", - MetricKind::Absolute, - MetricValue::Counter { value: 1.5 }, - ); - let event = Event::Metric(metric1); - let mut encoder = StatsdEncoder { - default_namespace: None, - }; - let mut frame = BytesMut::new(); - encoder.encode(event, &mut frame).unwrap(); - // The statsd parser will parse the counter as Incremental, - // so we can't compare it with the parsed value. - assert_eq!("counter:1.5|c\n", from_utf8(&frame).unwrap()); - } - - #[cfg(feature = "sources-statsd")] - #[test] - fn test_encode_gauge() { - let metric1 = Metric::new( - "gauge", - MetricKind::Incremental, - MetricValue::Gauge { value: -1.5 }, - ) - .with_tags(Some(tags())); - let event = Event::Metric(metric1.clone()); - let mut encoder = StatsdEncoder { - default_namespace: None, - }; - let mut frame = BytesMut::new(); - encoder.encode(event, &mut frame).unwrap(); - let metric2 = parse(from_utf8(&frame).unwrap().trim()).unwrap(); - vector_common::assert_event_data_eq!(metric1, metric2); - } - - #[cfg(feature = "sources-statsd")] - #[test] - fn test_encode_absolute_gauge() { - let metric1 = Metric::new( - "gauge", - MetricKind::Absolute, - MetricValue::Gauge { value: 1.5 }, - ) - .with_tags(Some(tags())); - let event = Event::Metric(metric1.clone()); - let mut encoder = StatsdEncoder { - default_namespace: None, - }; - let mut frame = BytesMut::new(); - encoder.encode(event, &mut frame).unwrap(); - let metric2 = parse(from_utf8(&frame).unwrap().trim()).unwrap(); - vector_common::assert_event_data_eq!(metric1, metric2); - } - - #[cfg(feature = "sources-statsd")] - #[test] - fn test_encode_distribution() { - let metric1 = Metric::new( - "distribution", - MetricKind::Incremental, - MetricValue::Distribution { - samples: vector_core::samples![1.5 => 1, 1.5 => 1], - statistic: StatisticKind::Histogram, - }, - ) - .with_tags(Some(tags())); - - let metric1_compressed = Metric::new( - "distribution", - MetricKind::Incremental, - MetricValue::Distribution { - samples: vector_core::samples![1.5 => 2], - statistic: StatisticKind::Histogram, - }, - ) - .with_tags(Some(tags())); - - let event = Event::Metric(metric1); - let mut encoder = StatsdEncoder { - default_namespace: None, - }; - let mut frame = BytesMut::new(); - encoder.encode(event, &mut frame).unwrap(); - let metric2 = parse(from_utf8(&frame).unwrap().trim()).unwrap(); - vector_common::assert_event_data_eq!(metric1_compressed, metric2); - } - - #[cfg(feature = "sources-statsd")] - #[test] - fn test_encode_distribution_aggregated() { - let metric1 = Metric::new( - "distribution", - MetricKind::Incremental, - MetricValue::Distribution { - samples: vector_core::samples![2.5 => 1, 1.5 => 1, 1.5 => 1], - statistic: StatisticKind::Histogram, - }, - ) - .with_tags(Some(tags())); - - let metric1_part1_compressed = Metric::new( - "distribution", - MetricKind::Incremental, - MetricValue::Distribution { - samples: vector_core::samples![2.5 => 1], - statistic: StatisticKind::Histogram, - }, - ) - .with_tags(Some(tags())); - let metric1_part2_compressed = Metric::new( - "distribution", - MetricKind::Incremental, - MetricValue::Distribution { - samples: vector_core::samples![1.5 => 2], - statistic: StatisticKind::Histogram, - }, - ) - .with_tags(Some(tags())); - let event = Event::Metric(metric1); - let mut encoder = StatsdEncoder { - default_namespace: None, - }; - let mut frame = BytesMut::new(); - encoder.encode(event, &mut frame).unwrap(); - - let res = from_utf8(&frame).unwrap().trim(); - let mut packets = res.split('\n'); - - let metric2 = parse(packets.next().unwrap().trim()).unwrap(); - vector_common::assert_event_data_eq!(metric1_part2_compressed, metric2); - - let metric3 = parse(packets.next().unwrap().trim()).unwrap(); - vector_common::assert_event_data_eq!(metric1_part1_compressed, metric3); - } - - #[cfg(feature = "sources-statsd")] - #[test] - fn test_encode_set() { - let metric1 = Metric::new( - "set", - MetricKind::Incremental, - MetricValue::Set { - values: vec!["abc".to_owned()].into_iter().collect(), - }, - ) - .with_tags(Some(tags())); - let event = Event::Metric(metric1.clone()); - let mut encoder = StatsdEncoder { - default_namespace: None, - }; - let mut frame = BytesMut::new(); - encoder.encode(event, &mut frame).unwrap(); - let metric2 = parse(from_utf8(&frame).unwrap().trim()).unwrap(); - - vector_common::assert_event_data_eq!(metric1, metric2); - } - - #[tokio::test] - async fn test_send_to_statsd() { - trace_init(); - - let addr = next_addr(); - let mut batch = BatchConfig::default(); - batch.max_bytes = Some(512); - - let config = StatsdSinkConfig { - default_namespace: Some("ns".into()), - mode: Mode::Udp(StatsdUdpConfig { - batch, - udp: UdpSinkConfig::from_address(addr.to_string()), - }), - acknowledgements: Default::default(), - }; - - let events = vec![ - Event::Metric( - Metric::new( - "counter", - MetricKind::Incremental, - MetricValue::Counter { value: 1.5 }, - ) - .with_namespace(Some("vector")) - .with_tags(Some(tags())), - ), - Event::Metric( - Metric::new( - "histogram", - MetricKind::Incremental, - MetricValue::Distribution { - samples: vector_core::samples![2.0 => 100], - statistic: StatisticKind::Histogram, - }, - ) - .with_namespace(Some("vector")), - ), - ]; - let (mut tx, rx) = mpsc::channel(0); - - let context = SinkContext::new_test(); - assert_sink_compliance(&SINK_TAGS, async move { - let (sink, _healthcheck) = config.build(context).await.unwrap(); - - let socket = UdpSocket::bind(addr).await.unwrap(); - tokio::spawn(async move { - let mut stream = UdpFramed::new(socket, BytesCodec::new()) - .map_err(|error| error!(message = "Error reading line.", %error)) - .map_ok(|(bytes, _addr)| bytes.freeze()); - - while let Some(Ok(item)) = stream.next().await { - tx.send(item).await.unwrap(); - } - }); - - sink.run(stream::iter(events).map(Into::into)) - .await - .expect("Running sink failed") - }) - .await; - - let messages = collect_n(rx, 1).await; - assert_eq!( - messages[0], - Bytes::from("vector.counter:1.5|c|#bare_tag,multi_value:true,multi_value:false,multi_value,normal_tag:value\nvector.histogram:2|h|@0.01\n"), - ); - } -} diff --git a/src/sinks/statsd/batch.rs b/src/sinks/statsd/batch.rs new file mode 100644 index 0000000000000..23504e146ca46 --- /dev/null +++ b/src/sinks/statsd/batch.rs @@ -0,0 +1,27 @@ +use vector_core::{event::Metric, stream::batcher::limiter::ItemBatchSize}; + +// This accounts for the separators, the metric type string, the length of the value itself. It can +// never be too small, as the above values will always take at least 4 bytes. +const EST_OVERHEAD_LEN: usize = 4; + +#[derive(Default)] +pub(super) struct StatsdBatchSizer; + +impl ItemBatchSize for StatsdBatchSizer { + fn size(&self, item: &Metric) -> usize { + // Metric name. + item.series().name().name().len() + // Metric namespace, with an additional 1 to account for the namespace separator. + + item.series().name().namespace().map(|s| s.len() + 1).unwrap_or(0) + // Metric tags, with an additional 1 per tag to account for the tag key/value separator. + + item.series().tags().map(|t| { + t.iter_all().map(|(k, v)| { + k.len() + 1 + v.map(|v| v.len()).unwrap_or(0) + }) + .sum() + }) + .unwrap_or(0) + // Estimated overhead (separators, metric value, etc) + + EST_OVERHEAD_LEN + } +} diff --git a/src/sinks/statsd/config.rs b/src/sinks/statsd/config.rs new file mode 100644 index 0000000000000..5e1052d59a8bd --- /dev/null +++ b/src/sinks/statsd/config.rs @@ -0,0 +1,163 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use async_trait::async_trait; +use vector_common::internal_event::Protocol; +use vector_config::{component::GenerateConfig, configurable_component}; +use vector_core::{ + config::{AcknowledgementsConfig, Input}, + sink::VectorSink, +}; + +use crate::{ + config::{SinkConfig, SinkContext}, + internal_events::SocketMode, + sinks::{ + util::{ + service::net::{NetworkConnector, TcpConnectorConfig, UdpConnectorConfig}, + BatchConfig, SinkBatchSettings, + }, + Healthcheck, + }, +}; + +#[cfg(unix)] +use crate::sinks::util::service::net::UnixConnectorConfig; + +use super::{request_builder::StatsdRequestBuilder, service::StatsdService, sink::StatsdSink}; + +#[derive(Clone, Copy, Debug, Default)] +pub struct StatsdDefaultBatchSettings; + +impl SinkBatchSettings for StatsdDefaultBatchSettings { + const MAX_EVENTS: Option = Some(1000); + const MAX_BYTES: Option = Some(1300); + const TIMEOUT_SECS: f64 = 1.0; +} + +/// Configuration for the `statsd` sink. +#[configurable_component(sink("statsd"))] +#[derive(Clone, Debug)] +pub struct StatsdSinkConfig { + /// Sets the default namespace for any metrics sent. + /// + /// This namespace is only used if a metric has no existing namespace. When a namespace is + /// present, it is used as a prefix to the metric name, and separated with a period (`.`). + #[serde(alias = "namespace")] + #[configurable(metadata(docs::examples = "service"))] + pub default_namespace: Option, + + #[serde(flatten)] + pub mode: Mode, + + #[configurable(derived)] + #[serde(default)] + pub batch: BatchConfig, + + #[configurable(derived)] + #[serde( + default, + deserialize_with = "crate::serde::bool_or_struct", + skip_serializing_if = "crate::serde::skip_serializing_if_default" + )] + pub acknowledgements: AcknowledgementsConfig, +} + +/// Socket mode. +#[configurable_component] +#[derive(Clone, Debug)] +#[serde(tag = "mode", rename_all = "snake_case")] +#[configurable(metadata(docs::enum_tag_description = "The type of socket to use."))] +pub enum Mode { + /// Send over TCP. + Tcp(TcpConnectorConfig), + + /// Send over UDP. + Udp(UdpConnectorConfig), + + /// Send over a Unix domain socket (UDS). + #[cfg(unix)] + Unix(UnixConnectorConfig), +} + +impl Mode { + const fn as_socket_mode(&self) -> SocketMode { + match self { + Self::Tcp(_) => SocketMode::Tcp, + Self::Udp(_) => SocketMode::Udp, + #[cfg(unix)] + Self::Unix(_) => SocketMode::Unix, + } + } + + fn as_connector(&self) -> NetworkConnector { + match self { + Self::Tcp(config) => config.as_connector(), + Self::Udp(config) => config.as_connector(), + #[cfg(unix)] + Self::Unix(config) => config.as_connector(), + } + } +} + +fn default_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8125) +} + +impl GenerateConfig for StatsdSinkConfig { + fn generate_config() -> toml::Value { + let address = default_address(); + + toml::Value::try_from(Self { + default_namespace: None, + mode: Mode::Udp(UdpConnectorConfig::from_address( + address.ip().to_string(), + address.port(), + )), + batch: Default::default(), + acknowledgements: Default::default(), + }) + .unwrap() + } +} + +#[async_trait] +impl SinkConfig for StatsdSinkConfig { + async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { + let batcher_settings = self.batch.into_batcher_settings()?; + + let socket_mode = self.mode.as_socket_mode(); + let request_builder = + StatsdRequestBuilder::new(self.default_namespace.clone(), socket_mode)?; + let protocol = Protocol::from(socket_mode.as_str()); + + let connector = self.mode.as_connector(); + let service = connector.service(); + let healthcheck = connector.healthcheck(); + + let sink = StatsdSink::new( + StatsdService::from_transport(service), + batcher_settings, + request_builder, + protocol, + ); + Ok((VectorSink::from_event_streamsink(sink), healthcheck)) + } + + fn input(&self) -> Input { + Input::metric() + } + + fn acknowledgements(&self) -> &AcknowledgementsConfig { + &self.acknowledgements + } +} + +#[cfg(test)] +mod test { + use super::StatsdSinkConfig; + + #[test] + fn generate_config() { + crate::test_util::test_generate_config::(); + } +} diff --git a/src/sinks/statsd/encoder.rs b/src/sinks/statsd/encoder.rs new file mode 100644 index 0000000000000..c75661cb97a30 --- /dev/null +++ b/src/sinks/statsd/encoder.rs @@ -0,0 +1,374 @@ +use std::{ + fmt::Display, + io::{self, Write}, +}; + +use bytes::{BufMut, BytesMut}; +use tokio_util::codec::Encoder; +use vector_core::event::{Metric, MetricKind, MetricTags, MetricValue, StatisticKind}; + +use crate::{ + internal_events::StatsdInvalidMetricError, + sinks::util::{buffer::metrics::compress_distribution, encode_namespace}, +}; + +/// Error type for errors that can never happen, but for use with `Encoder`. +/// +/// For the StatsD encoder, the encoding operation is infallible. However, as `Encoder` requires +/// that the associated error type can be created by `From`, we can't simply use +/// `Infallible`. This type exists to bridge that gap, acting as a marker type for "we emit no +/// errors" while supporting the trait bounds on `Encoder::Error`. +#[derive(Debug)] +pub struct InfallibleIo; + +impl From for InfallibleIo { + fn from(_: io::Error) -> Self { + Self + } +} + +#[derive(Debug, Clone)] +pub(super) struct StatsdEncoder { + default_namespace: Option, +} + +impl StatsdEncoder { + /// Creates a new `StatsdEncoder` with the given default namespace, if any. + pub const fn new(default_namespace: Option) -> Self { + Self { default_namespace } + } +} + +impl<'a> Encoder<&'a Metric> for StatsdEncoder { + type Error = InfallibleIo; + + fn encode(&mut self, metric: &'a Metric, buf: &mut BytesMut) -> Result<(), Self::Error> { + let namespace = metric.namespace().or(self.default_namespace.as_deref()); + let name = encode_namespace(namespace, '.', metric.name()); + let tags = metric.tags().map(encode_tags); + + match metric.value() { + MetricValue::Counter { value } => { + encode_and_write_single_event(buf, &name, tags.as_deref(), value, "c", None); + } + MetricValue::Gauge { value } => { + match metric.kind() { + MetricKind::Incremental => encode_and_write_single_event( + buf, + &name, + tags.as_deref(), + format!("{:+}", value), + "g", + None, + ), + MetricKind::Absolute => { + encode_and_write_single_event(buf, &name, tags.as_deref(), value, "g", None) + } + }; + } + MetricValue::Distribution { samples, statistic } => { + let metric_type = match statistic { + StatisticKind::Histogram => "h", + StatisticKind::Summary => "d", + }; + + // TODO: This would actually be good to potentially add a helper combinator for, in the same vein as + // `SinkBuilderExt::normalized`, that provides a metric "optimizer" for doing these sorts of things. We + // don't actually compress distributions as-is in other metrics sinks unless they use the old-style + // approach coupled with `MetricBuffer`. While not every sink would benefit from this -- the + // `datadog_metrics` sink always converts distributions to sketches anyways, for example -- a lot of + // them could. + let mut samples = samples.clone(); + let compressed_samples = compress_distribution(&mut samples); + for sample in compressed_samples { + encode_and_write_single_event( + buf, + &name, + tags.as_deref(), + sample.value, + metric_type, + Some(sample.rate), + ); + } + } + MetricValue::Set { values } => { + for val in values { + encode_and_write_single_event(buf, &name, tags.as_deref(), val, "s", None); + } + } + _ => { + emit!(StatsdInvalidMetricError { + value: metric.value(), + kind: metric.kind(), + }); + + return Ok(()); + } + }; + + Ok(()) + } +} + +// Note that if multi-valued tags are present, this encoding may change the order from the input +// event, since the tags with multiple values may not have been grouped together. +// This is not an issue, but noting as it may be an observed behavior. +fn encode_tags(tags: &MetricTags) -> String { + let parts: Vec<_> = tags + .iter_all() + .map(|(name, tag_value)| match tag_value { + Some(value) => format!("{}:{}", name, value), + None => name.to_owned(), + }) + .collect(); + + // `parts` is already sorted by key because of BTreeMap + parts.join(",") +} + +fn encode_and_write_single_event( + buf: &mut BytesMut, + metric_name: &str, + metric_tags: Option<&str>, + val: V, + metric_type: &str, + sample_rate: Option, +) { + let mut writer = buf.writer(); + + write!(&mut writer, "{}:{}|{}", metric_name, val, metric_type).unwrap(); + + if let Some(sample_rate) = sample_rate { + if sample_rate != 1 { + write!(&mut writer, "|@{}", 1.0 / f64::from(sample_rate)).unwrap(); + } + }; + + if let Some(t) = metric_tags { + write!(&mut writer, "|#{}", t).unwrap(); + }; + + writeln!(&mut writer).unwrap(); +} + +#[cfg(test)] +mod tests { + use vector_core::{ + event::{metric::TagValue, MetricTags}, + metric_tags, + }; + + use super::encode_tags; + + #[cfg(feature = "sources-statsd")] + use vector_core::event::{Metric, MetricKind, MetricValue, StatisticKind}; + + #[cfg(feature = "sources-statsd")] + fn encode_metric(metric: &Metric) -> bytes::BytesMut { + use tokio_util::codec::Encoder; + + let mut encoder = super::StatsdEncoder { + default_namespace: None, + }; + let mut frame = bytes::BytesMut::new(); + encoder.encode(metric, &mut frame).unwrap(); + frame + } + + #[cfg(feature = "sources-statsd")] + fn parse_encoded_metrics(metric: &[u8]) -> Vec { + use crate::sources::statsd::parser::parse as statsd_parse; + + let s = std::str::from_utf8(metric).unwrap().trim(); + s.split('\n') + .map(|packet| statsd_parse(packet).expect("should not fail to parse statsd packet")) + .collect() + } + + fn tags() -> MetricTags { + metric_tags!( + "normal_tag" => "value", + "multi_value" => "true", + "multi_value" => "false", + "multi_value" => TagValue::Bare, + "bare_tag" => TagValue::Bare, + ) + } + + #[test] + fn test_encode_tags() { + let actual = encode_tags(&tags()); + let mut actual = actual.split(',').collect::>(); + actual.sort(); + + let mut expected = + "bare_tag,normal_tag:value,multi_value:true,multi_value:false,multi_value" + .split(',') + .collect::>(); + expected.sort(); + + assert_eq!(actual, expected); + } + + #[test] + fn tags_order() { + assert_eq!( + &encode_tags( + &vec![ + ("a", "value"), + ("b", "value"), + ("c", "value"), + ("d", "value"), + ("e", "value"), + ] + .into_iter() + .map(|(k, v)| (k.to_owned(), v.to_owned())) + .collect() + ), + "a:value,b:value,c:value,d:value,e:value" + ); + } + + #[cfg(feature = "sources-statsd")] + #[test] + fn test_encode_counter() { + let input = Metric::new( + "counter", + MetricKind::Incremental, + MetricValue::Counter { value: 1.5 }, + ) + .with_tags(Some(tags())); + + let frame = encode_metric(&input); + let mut output = parse_encoded_metrics(&frame); + vector_common::assert_event_data_eq!(input, output.remove(0)); + } + + #[cfg(feature = "sources-statsd")] + #[test] + fn test_encode_absolute_counter() { + let input = Metric::new( + "counter", + MetricKind::Absolute, + MetricValue::Counter { value: 1.5 }, + ); + + let frame = encode_metric(&input); + // The statsd parser will parse the counter as Incremental, + // so we can't compare it with the parsed value. + assert_eq!("counter:1.5|c\n", std::str::from_utf8(&frame).unwrap()); + } + + #[cfg(feature = "sources-statsd")] + #[test] + fn test_encode_gauge() { + let input = Metric::new( + "gauge", + MetricKind::Incremental, + MetricValue::Gauge { value: -1.5 }, + ) + .with_tags(Some(tags())); + + let frame = encode_metric(&input); + let mut output = parse_encoded_metrics(&frame); + vector_common::assert_event_data_eq!(input, output.remove(0)); + } + + #[cfg(feature = "sources-statsd")] + #[test] + fn test_encode_absolute_gauge() { + let input = Metric::new( + "gauge", + MetricKind::Absolute, + MetricValue::Gauge { value: 1.5 }, + ) + .with_tags(Some(tags())); + + let frame = encode_metric(&input); + let mut output = parse_encoded_metrics(&frame); + vector_common::assert_event_data_eq!(input, output.remove(0)); + } + + #[cfg(feature = "sources-statsd")] + #[test] + fn test_encode_distribution() { + let input = Metric::new( + "distribution", + MetricKind::Incremental, + MetricValue::Distribution { + samples: vector_core::samples![1.5 => 1, 1.5 => 1], + statistic: StatisticKind::Histogram, + }, + ) + .with_tags(Some(tags())); + + let expected = Metric::new( + "distribution", + MetricKind::Incremental, + MetricValue::Distribution { + samples: vector_core::samples![1.5 => 2], + statistic: StatisticKind::Histogram, + }, + ) + .with_tags(Some(tags())); + + let frame = encode_metric(&input); + let mut output = parse_encoded_metrics(&frame); + vector_common::assert_event_data_eq!(expected, output.remove(0)); + } + + #[cfg(feature = "sources-statsd")] + #[test] + fn test_encode_distribution_aggregated() { + let input = Metric::new( + "distribution", + MetricKind::Incremental, + MetricValue::Distribution { + samples: vector_core::samples![2.5 => 1, 1.5 => 1, 1.5 => 1], + statistic: StatisticKind::Histogram, + }, + ) + .with_tags(Some(tags())); + + let expected1 = Metric::new( + "distribution", + MetricKind::Incremental, + MetricValue::Distribution { + samples: vector_core::samples![1.5 => 2], + statistic: StatisticKind::Histogram, + }, + ) + .with_tags(Some(tags())); + let expected2 = Metric::new( + "distribution", + MetricKind::Incremental, + MetricValue::Distribution { + samples: vector_core::samples![2.5 => 1], + statistic: StatisticKind::Histogram, + }, + ) + .with_tags(Some(tags())); + + let frame = encode_metric(&input); + let mut output = parse_encoded_metrics(&frame); + vector_common::assert_event_data_eq!(expected1, output.remove(0)); + vector_common::assert_event_data_eq!(expected2, output.remove(0)); + } + + #[cfg(feature = "sources-statsd")] + #[test] + fn test_encode_set() { + let input = Metric::new( + "set", + MetricKind::Incremental, + MetricValue::Set { + values: vec!["abc".to_owned()].into_iter().collect(), + }, + ) + .with_tags(Some(tags())); + + let frame = encode_metric(&input); + let mut output = parse_encoded_metrics(&frame); + vector_common::assert_event_data_eq!(input, output.remove(0)); + } +} diff --git a/src/sinks/statsd/mod.rs b/src/sinks/statsd/mod.rs new file mode 100644 index 0000000000000..73d43ff3cc163 --- /dev/null +++ b/src/sinks/statsd/mod.rs @@ -0,0 +1,12 @@ +mod batch; +mod config; +mod encoder; +mod normalizer; +mod request_builder; +mod service; +mod sink; + +#[cfg(test)] +mod tests; + +pub use self::config::StatsdSinkConfig; diff --git a/src/sinks/statsd/normalizer.rs b/src/sinks/statsd/normalizer.rs new file mode 100644 index 0000000000000..fc8a9656a636d --- /dev/null +++ b/src/sinks/statsd/normalizer.rs @@ -0,0 +1,454 @@ +use vector_core::event::{Metric, MetricValue}; + +use crate::sinks::util::buffer::metrics::{MetricNormalize, MetricSet}; + +#[derive(Default)] +pub(crate) struct StatsdNormalizer; + +impl MetricNormalize for StatsdNormalizer { + fn normalize(&mut self, state: &mut MetricSet, metric: Metric) -> Option { + // We primarily care about making sure that metrics are incremental, but for gauges, we can + // handle both incremental and absolute versions during encoding. + match metric.value() { + // Pass through gauges as-is. + MetricValue::Gauge { .. } => Some(metric), + // Otherwise, ensure that it's incremental. + _ => state.make_incremental(metric), + } + } +} + +#[cfg(test)] +mod tests { + use std::fmt; + + use vector_core::event::{ + metric::{Bucket, Sample}, + Metric, MetricKind, MetricValue, StatisticKind, + }; + + use super::StatsdNormalizer; + use crate::sinks::util::buffer::metrics::{MetricNormalize, MetricSet}; + + fn buckets_from_samples(values: &[f64]) -> (Vec, f64, u64) { + // Generate buckets, and general statistics, for an input set of data. We only use this in + // tests, and so we have some semi-realistic buckets here, but mainly we use them for testing, + // not for most accurately/efficiently representing the input samples. + let bounds = &[ + 1.0, + 2.0, + 4.0, + 8.0, + 16.0, + 32.0, + 64.0, + 128.0, + 256.0, + 512.0, + 1024.0, + f64::INFINITY, + ]; + let mut buckets = bounds + .iter() + .map(|b| Bucket { + upper_limit: *b, + count: 0, + }) + .collect::>(); + + let mut sum = 0.0; + let mut count = 0; + for value in values { + for bucket in buckets.iter_mut() { + if *value <= bucket.upper_limit { + bucket.count += 1; + } + } + + sum += *value; + count += 1; + } + + (buckets, sum, count) + } + + fn generate_f64s(start: u16, end: u16) -> Vec { + assert!(start <= end); + let mut samples = Vec::new(); + for n in start..=end { + samples.push(f64::from(n)); + } + samples + } + + fn get_counter(value: f64, kind: MetricKind) -> Metric { + Metric::new("counter", kind, MetricValue::Counter { value }) + } + + fn get_gauge(value: f64, kind: MetricKind) -> Metric { + Metric::new("gauge", kind, MetricValue::Gauge { value }) + } + + fn get_set(values: S, kind: MetricKind) -> Metric + where + S: IntoIterator, + V: fmt::Display, + { + Metric::new( + "set", + kind, + MetricValue::Set { + values: values.into_iter().map(|i| i.to_string()).collect(), + }, + ) + } + + fn get_distribution(samples: S, kind: MetricKind) -> Metric + where + S: IntoIterator, + V: Into, + { + Metric::new( + "distribution", + kind, + MetricValue::Distribution { + samples: samples + .into_iter() + .map(|n| Sample { + value: n.into(), + rate: 1, + }) + .collect(), + statistic: StatisticKind::Histogram, + }, + ) + } + + fn get_aggregated_histogram(samples: S, kind: MetricKind) -> Metric + where + S: IntoIterator, + V: Into, + { + let samples = samples.into_iter().map(Into::into).collect::>(); + let (buckets, sum, count) = buckets_from_samples(&samples); + + Metric::new( + "agg_histogram", + kind, + MetricValue::AggregatedHistogram { + buckets, + count, + sum, + }, + ) + } + + fn run_comparisons(inputs: Vec, expected_outputs: Vec>) { + let mut metric_set = MetricSet::default(); + let mut normalizer = StatsdNormalizer::default(); + + for (input, expected) in inputs.into_iter().zip(expected_outputs) { + let result = normalizer.normalize(&mut metric_set, input); + assert_eq!(result, expected); + } + } + + #[test] + fn absolute_counter() { + let first_value = 3.14; + let second_value = 8.675309; + + let counters = vec![ + get_counter(first_value, MetricKind::Absolute), + get_counter(second_value, MetricKind::Absolute), + ]; + + let expected_counters = vec![ + None, + Some(get_counter( + second_value - first_value, + MetricKind::Incremental, + )), + ]; + + run_comparisons(counters, expected_counters); + } + + #[test] + fn incremental_counter() { + let first_value = 3.14; + let second_value = 8.675309; + + let counters = vec![ + get_counter(first_value, MetricKind::Incremental), + get_counter(second_value, MetricKind::Incremental), + ]; + + let expected_counters = counters + .clone() + .into_iter() + .map(Option::Some) + .collect::>(); + + run_comparisons(counters, expected_counters); + } + + #[test] + fn mixed_counter() { + let first_value = 3.14; + let second_value = 8.675309; + let third_value = 16.19; + + let counters = vec![ + get_counter(first_value, MetricKind::Incremental), + get_counter(second_value, MetricKind::Absolute), + get_counter(third_value, MetricKind::Absolute), + get_counter(first_value, MetricKind::Absolute), + get_counter(second_value, MetricKind::Incremental), + get_counter(third_value, MetricKind::Incremental), + ]; + + let expected_counters = vec![ + Some(get_counter(first_value, MetricKind::Incremental)), + None, + Some(get_counter( + third_value - second_value, + MetricKind::Incremental, + )), + None, + Some(get_counter(second_value, MetricKind::Incremental)), + Some(get_counter(third_value, MetricKind::Incremental)), + ]; + + run_comparisons(counters, expected_counters); + } + + #[test] + fn absolute_gauge() { + let first_value = 3.14; + let second_value = 8.675309; + + let gauges = vec![ + get_gauge(first_value, MetricKind::Absolute), + get_gauge(second_value, MetricKind::Absolute), + ]; + + let expected_gauges = gauges + .clone() + .into_iter() + .map(Option::Some) + .collect::>(); + + run_comparisons(gauges, expected_gauges); + } + + #[test] + fn incremental_gauge() { + let first_value = 3.14; + let second_value = 8.675309; + + let gauges = vec![ + get_gauge(first_value, MetricKind::Incremental), + get_gauge(second_value, MetricKind::Incremental), + ]; + + let expected_gauges = gauges + .clone() + .into_iter() + .map(Option::Some) + .collect::>(); + + run_comparisons(gauges, expected_gauges); + } + + #[test] + fn mixed_gauge() { + let first_value = 3.14; + let second_value = 8.675309; + let third_value = 16.19; + + let gauges = vec![ + get_gauge(first_value, MetricKind::Incremental), + get_gauge(second_value, MetricKind::Absolute), + get_gauge(third_value, MetricKind::Absolute), + get_gauge(first_value, MetricKind::Absolute), + get_gauge(second_value, MetricKind::Incremental), + get_gauge(third_value, MetricKind::Incremental), + ]; + + let expected_gauges = gauges + .clone() + .into_iter() + .map(Option::Some) + .collect::>(); + + run_comparisons(gauges, expected_gauges); + } + + #[test] + fn absolute_set() { + let sets = vec![ + get_set(1..=20, MetricKind::Absolute), + get_set(15..=25, MetricKind::Absolute), + ]; + + let expected_sets = vec![None, Some(get_set(21..=25, MetricKind::Incremental))]; + + run_comparisons(sets, expected_sets); + } + + #[test] + fn incremental_set() { + let sets = vec![ + get_set(1..=20, MetricKind::Incremental), + get_set(15..=25, MetricKind::Incremental), + ]; + + let expected_sets = vec![ + Some(get_set(1..=20, MetricKind::Incremental)), + Some(get_set(15..=25, MetricKind::Incremental)), + ]; + + run_comparisons(sets, expected_sets); + } + + #[test] + fn mixed_set() { + let sets = vec![ + get_set(1..=20, MetricKind::Incremental), + get_set(10..=16, MetricKind::Absolute), + get_set(15..=25, MetricKind::Absolute), + get_set(1..5, MetricKind::Incremental), + get_set(3..=42, MetricKind::Incremental), + ]; + + let expected_sets = vec![ + Some(get_set(1..=20, MetricKind::Incremental)), + None, + Some(get_set(17..=25, MetricKind::Incremental)), + Some(get_set(1..5, MetricKind::Incremental)), + Some(get_set(3..=42, MetricKind::Incremental)), + ]; + + run_comparisons(sets, expected_sets); + } + + #[test] + fn absolute_distribution() { + let samples1 = generate_f64s(1, 100); + let samples2 = generate_f64s(1, 125); + let expected_samples = generate_f64s(101, 125); + + let distributions = vec![ + get_distribution(samples1, MetricKind::Absolute), + get_distribution(samples2, MetricKind::Absolute), + ]; + + let expected_distributions = vec![ + None, + Some(get_distribution(expected_samples, MetricKind::Incremental)), + ]; + + run_comparisons(distributions, expected_distributions); + } + + #[test] + fn incremental_distribution() { + let samples1 = generate_f64s(1, 100); + let samples2 = generate_f64s(75, 125); + + let distributions = vec![ + get_distribution(samples1, MetricKind::Incremental), + get_distribution(samples2, MetricKind::Incremental), + ]; + + let expected_distributions = distributions.iter().cloned().map(Some).collect(); + + run_comparisons(distributions, expected_distributions); + } + + #[test] + fn mixed_distribution() { + let samples1 = generate_f64s(1, 100); + let samples2 = generate_f64s(75, 125); + let samples3 = generate_f64s(75, 187); + let samples4 = generate_f64s(22, 45); + let samples5 = generate_f64s(1, 100); + + let distributions = vec![ + get_distribution(samples1, MetricKind::Incremental), + get_distribution(samples2, MetricKind::Absolute), + get_distribution(samples3, MetricKind::Absolute), + get_distribution(samples4, MetricKind::Incremental), + get_distribution(samples5, MetricKind::Incremental), + ]; + + let expected_distributions = vec![ + Some(distributions[0].clone()), + None, + Some(get_distribution( + generate_f64s(126, 187), + MetricKind::Incremental, + )), + Some(distributions[3].clone()), + Some(distributions[4].clone()), + ]; + + run_comparisons(distributions, expected_distributions); + } + + #[test] + fn absolute_aggregated_histogram() { + let samples1 = generate_f64s(1, 100); + let samples2 = generate_f64s(1, 125); + + let agg_histograms = vec![ + get_aggregated_histogram(samples1, MetricKind::Absolute), + get_aggregated_histogram(samples2, MetricKind::Absolute), + ]; + + let expected_agg_histograms = vec![]; + + run_comparisons(agg_histograms, expected_agg_histograms); + } + + #[test] + fn incremental_aggregated_histogram() { + let samples1 = generate_f64s(1, 100); + let samples2 = generate_f64s(1, 125); + + let agg_histograms = vec![ + get_aggregated_histogram(samples1, MetricKind::Incremental), + get_aggregated_histogram(samples2, MetricKind::Incremental), + ]; + + let expected_agg_histograms = agg_histograms + .clone() + .into_iter() + .map(Option::Some) + .collect::>(); + + run_comparisons(agg_histograms, expected_agg_histograms); + } + + #[test] + fn mixed_aggregated_histogram() { + let samples1 = generate_f64s(1, 100); + let samples2 = generate_f64s(75, 125); + let samples3 = generate_f64s(75, 187); + let samples4 = generate_f64s(22, 45); + let samples5 = generate_f64s(1, 100); + + let agg_histograms = vec![ + get_aggregated_histogram(samples1, MetricKind::Incremental), + get_aggregated_histogram(samples2, MetricKind::Absolute), + get_aggregated_histogram(samples3, MetricKind::Absolute), + get_aggregated_histogram(samples4, MetricKind::Incremental), + get_aggregated_histogram(samples5, MetricKind::Incremental), + ]; + + let expected_agg_histograms = vec![]; + + run_comparisons(agg_histograms, expected_agg_histograms); + } +} diff --git a/src/sinks/statsd/request_builder.rs b/src/sinks/statsd/request_builder.rs new file mode 100644 index 0000000000000..9cfdf119a08d3 --- /dev/null +++ b/src/sinks/statsd/request_builder.rs @@ -0,0 +1,155 @@ +use std::convert::Infallible; + +use bytes::BytesMut; +use snafu::Snafu; +use tokio_util::codec::Encoder; +use vector_common::request_metadata::RequestMetadata; +use vector_core::event::{EventFinalizers, Finalizable, Metric}; + +use super::{encoder::StatsdEncoder, service::StatsdRequest}; +use crate::{ + internal_events::SocketMode, + sinks::util::{ + metadata::RequestMetadataBuilder, request_builder::EncodeResult, IncrementalRequestBuilder, + }, +}; + +#[derive(Debug, Snafu)] +pub enum RequestBuilderError { + #[snafu(display("Failed to build the request builder: {}", reason))] + FailedToBuild { reason: &'static str }, +} + +/// Incremental request builder specific to StatsD. +pub struct StatsdRequestBuilder { + encoder: StatsdEncoder, + request_max_size: usize, + encode_buf: BytesMut, +} + +impl StatsdRequestBuilder { + pub fn new( + default_namespace: Option, + socket_mode: SocketMode, + ) -> Result { + let encoder = StatsdEncoder::new(default_namespace); + let request_max_size = match socket_mode { + // Following the recommended advice [1], we use a datagram size that should reasonably + // fit within the MTU of the common places that Vector will run: virtual cloud networks, + // regular ol' Ethernet networks, and so on. + // + // [1]: https://github.com/statsd/statsd/blob/0de340f864/docs/metric_types.md?plain=1#L121 + SocketMode::Udp => 1432, + + // Since messages can be much bigger with TCP and Unix domain sockets, we'll give + // ourselves the chance to build bigger requests which should increase I/O efficiency. + SocketMode::Tcp | SocketMode::Unix => 8192, + }; + + Ok(Self::from_encoder_and_max_size(encoder, request_max_size)) + } + + fn from_encoder_and_max_size(encoder: StatsdEncoder, request_max_size: usize) -> Self { + Self { + encoder, + request_max_size, + encode_buf: BytesMut::with_capacity(8192), + } + } +} + +impl Clone for StatsdRequestBuilder { + fn clone(&self) -> Self { + Self::from_encoder_and_max_size(self.encoder.clone(), self.request_max_size) + } +} + +impl IncrementalRequestBuilder> for StatsdRequestBuilder { + type Metadata = (EventFinalizers, RequestMetadata); + type Payload = Vec; + type Request = StatsdRequest; + type Error = Infallible; + + fn encode_events_incremental( + &mut self, + mut input: Vec, + ) -> Vec> { + let mut results = Vec::new(); + let mut pending = None; + + let mut metrics = input.drain(..); + while metrics.len() != 0 || pending.is_some() { + let mut n = 0; + + let mut request_buf = Vec::new(); + let mut finalizers = EventFinalizers::default(); + let mut request_metadata_builder = RequestMetadataBuilder::default(); + + loop { + // Grab the previously pending metric, or the next metric from the drain. + let (mut metric, was_encoded) = match pending.take() { + Some(metric) => (metric, true), + None => match metrics.next() { + Some(metric) => (metric, false), + None => break, + }, + }; + + // Encode the metric. Once we've done that, see if it can fit into the request + // buffer without exceeding the maximum request size limit. + // + // If it doesn't fit, we'll store this metric off to the side and break out of this + // loop, which will finalize the current request payload and store it in the vector of + // all generated requests. Otherwise, we'll merge it in and continue encoding. + // + // Crucially, we only break out if the current request payload already has data in + // it, as we need to be able to stick at least one encoded metric into each request. + if !was_encoded { + self.encode_buf.clear(); + self.encoder + .encode(&metric, &mut self.encode_buf) + .expect("encoding is infallible"); + } + + let request_buf_len = request_buf.len(); + if request_buf_len != 0 + && (request_buf_len + self.encode_buf.len() > self.request_max_size) + { + // The metric, as encoded, would cause us to exceed our maximum request size, so + // store it off to the side and finalize the current request. + pending = Some(metric); + break; + } + + // Merge the encoded metric into the request buffer and take over its event + // finalizers, etc. + request_buf.extend(&self.encode_buf[..]); + finalizers.merge(metric.take_finalizers()); + request_metadata_builder.track_event(metric); + n += 1; + } + + // If we encoded one or more metrics this pass, finalize the request. + if n > 0 { + let encode_result = EncodeResult::uncompressed(request_buf); + let request_metadata = request_metadata_builder.build(&encode_result); + + results.push(Ok(( + (finalizers, request_metadata), + encode_result.into_payload(), + ))); + } + } + + results + } + + fn build_request(&mut self, metadata: Self::Metadata, payload: Self::Payload) -> Self::Request { + let (finalizers, metadata) = metadata; + StatsdRequest { + payload, + finalizers, + metadata, + } + } +} diff --git a/src/sinks/statsd/service.rs b/src/sinks/statsd/service.rs new file mode 100644 index 0000000000000..4d3c3bc78dd09 --- /dev/null +++ b/src/sinks/statsd/service.rs @@ -0,0 +1,106 @@ +use std::task::{Context, Poll}; + +use futures_util::future::BoxFuture; +use tower::Service; +use vector_common::{ + finalization::{EventFinalizers, EventStatus, Finalizable}, + internal_event::CountByteSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; +use vector_core::stream::DriverResponse; + +/// Generalized request for sending metrics to a StatsD endpoint. +#[derive(Clone, Debug)] +pub struct StatsdRequest { + pub payload: Vec, + pub finalizers: EventFinalizers, + pub metadata: RequestMetadata, +} + +impl Finalizable for StatsdRequest { + fn take_finalizers(&mut self) -> EventFinalizers { + std::mem::take(&mut self.finalizers) + } +} + +impl MetaDescriptive for StatsdRequest { + fn get_metadata(&self) -> RequestMetadata { + self.metadata + } +} + +// Placeholder response to shuttle request metadata for StatsD requests. +// +// As StatsD sends no response back to a caller, there's no success/failure to report except for raw +// I/O errors when sending the request. Primarily, this type shuttles the metadata around the +// request -- events sent, bytes sent, etc -- that is required by `Driver`. +#[derive(Debug)] +pub struct StatsdResponse { + metadata: RequestMetadata, +} + +impl DriverResponse for StatsdResponse { + fn event_status(&self) -> EventStatus { + // If we generated a response, that implies our send concluded without any I/O errors, so we + // assume things were delivered. + EventStatus::Delivered + } + + fn events_sent(&self) -> CountByteSize { + CountByteSize( + self.metadata.event_count(), + self.metadata.events_byte_size(), + ) + } + + fn bytes_sent(&self) -> Option { + Some(self.metadata.request_wire_size()) + } +} + +#[derive(Clone)] +pub struct StatsdService { + transport: T, +} + +impl StatsdService { + /// Creates a new `StatsdService` with the given `transport` service. + /// + /// The `transport` service is responsible for sending the actual encoded requests to the downstream + /// endpoint. + pub const fn from_transport(transport: T) -> Self { + Self { transport } + } +} + +impl Service for StatsdService +where + T: Service>, + T::Error: Into>, + T::Future: Send + 'static, +{ + type Response = StatsdResponse; + type Error = Box; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context) -> Poll> { + self.transport.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, request: StatsdRequest) -> Self::Future { + let StatsdRequest { + payload, + finalizers: _, + metadata, + } = request; + + let send_future = self.transport.call(payload); + + Box::pin(async move { + send_future + .await + .map(|_| StatsdResponse { metadata }) + .map_err(Into::into) + }) + } +} diff --git a/src/sinks/statsd/sink.rs b/src/sinks/statsd/sink.rs new file mode 100644 index 0000000000000..b17097147bed2 --- /dev/null +++ b/src/sinks/statsd/sink.rs @@ -0,0 +1,98 @@ +use std::{fmt, future::ready}; + +use async_trait::async_trait; +use futures_util::{ + stream::{self, BoxStream}, + StreamExt, +}; +use tower::Service; +use vector_common::internal_event::Protocol; +use vector_core::{ + event::Event, + sink::StreamSink, + stream::{BatcherSettings, DriverResponse}, +}; + +use crate::sinks::util::SinkBuilderExt; + +use super::{ + batch::StatsdBatchSizer, normalizer::StatsdNormalizer, request_builder::StatsdRequestBuilder, + service::StatsdRequest, +}; + +pub(crate) struct StatsdSink { + service: S, + batch_settings: BatcherSettings, + request_builder: StatsdRequestBuilder, + protocol: Protocol, +} + +impl StatsdSink +where + S: Service + Send, + S::Error: fmt::Debug + Send + 'static, + S::Future: Send + 'static, + S::Response: DriverResponse, +{ + /// Creates a new `StatsdSink`. + pub const fn new( + service: S, + batch_settings: BatcherSettings, + request_builder: StatsdRequestBuilder, + protocol: Protocol, + ) -> Self { + Self { + service, + batch_settings, + request_builder, + protocol, + } + } + + async fn run_inner(self: Box, input: BoxStream<'_, Event>) -> Result<(), ()> { + input + // Convert `Event` to `Metric` so we don't have to deal with constant conversions. + .filter_map(|event| ready(event.try_into_metric())) + // Converts absolute counters into incremental counters, but otherwise leaves everything + // else alone. The encoder will handle the difference in absolute vs incremental for + // other metric types in type-specific ways i.e. incremental gauge updates use a + // different syntax, etc. + .normalized_with_default::() + .batched( + self.batch_settings + .into_item_size_config(StatsdBatchSizer::default()), + ) + // We build our requests "incrementally", which means that for a single batch of + // metrics, we might generate N requests to represent all of the metrics in the batch. + // + // We do this as for different socket modes, there are optimal request sizes to use to + // ensure the highest rate of delivery, such as staying within the MTU for UDP, etc. + .incremental_request_builder(self.request_builder) + // This unrolls the vector of request results that our request builder generates. + .flat_map(stream::iter) + // Generating requests _cannot_ fail, so we just unwrap our built requests. + .unwrap_infallible() + // Finally, we generate the driver which will take our requests, send them off, and appropriately handle + // finalization of the events, and logging/metrics, as the requests are responded to. + .into_driver(self.service) + .protocol(self.protocol) + .run() + .await + } +} + +#[async_trait] +impl StreamSink for StatsdSink +where + S: Service + Send, + S::Error: fmt::Debug + Send + 'static, + S::Future: Send + 'static, + S::Response: DriverResponse, +{ + async fn run(self: Box, input: BoxStream<'_, Event>) -> Result<(), ()> { + // Rust has issues with lifetimes and generics, which `async_trait` exacerbates, so we write + // a normal async fn in `StatsdSink` itself, and then call out to it from this trait + // implementation, which makes the compiler happy. + self.run_inner(input).await + } +} diff --git a/src/sinks/statsd/tests.rs b/src/sinks/statsd/tests.rs new file mode 100644 index 0000000000000..4c30a46b78551 --- /dev/null +++ b/src/sinks/statsd/tests.rs @@ -0,0 +1,100 @@ +use bytes::Bytes; +use futures::{StreamExt, TryStreamExt}; +use futures_util::stream; +use tokio::{net::UdpSocket, sync::mpsc}; +use tokio_stream::wrappers::ReceiverStream; +use tokio_util::{codec::BytesCodec, udp::UdpFramed}; +use vector_core::{ + event::{metric::TagValue, Event, Metric, MetricKind, MetricTags, MetricValue, StatisticKind}, + metric_tags, +}; + +use crate::{ + config::{SinkConfig, SinkContext}, + sinks::{statsd::config::Mode, util::service::net::UdpConnectorConfig}, + test_util::{ + collect_n, + components::{assert_sink_compliance, SINK_TAGS}, + next_addr, trace_init, + }, +}; + +use super::StatsdSinkConfig; + +fn tags() -> MetricTags { + metric_tags!( + "normal_tag" => "value", + "multi_value" => "true", + "multi_value" => "false", + "multi_value" => TagValue::Bare, + "bare_tag" => TagValue::Bare, + ) +} + +#[tokio::test] +async fn test_send_to_statsd() { + trace_init(); + + let addr = next_addr(); + + let config = StatsdSinkConfig { + default_namespace: Some("ns".into()), + mode: Mode::Udp(UdpConnectorConfig::from_address( + addr.ip().to_string(), + addr.port(), + )), + batch: Default::default(), + acknowledgements: Default::default(), + }; + + let events = vec![ + Event::Metric( + Metric::new( + "counter", + MetricKind::Incremental, + MetricValue::Counter { value: 1.5 }, + ) + .with_namespace(Some("vector")) + .with_tags(Some(tags())), + ), + Event::Metric( + Metric::new( + "histogram", + MetricKind::Incremental, + MetricValue::Distribution { + samples: vector_core::samples![2.0 => 100], + statistic: StatisticKind::Histogram, + }, + ) + .with_namespace(Some("vector")), + ), + ]; + let (tx, rx) = mpsc::channel(1); + + let context = SinkContext::new_test(); + assert_sink_compliance(&SINK_TAGS, async move { + let (sink, _healthcheck) = config.build(context).await.unwrap(); + + let socket = UdpSocket::bind(addr).await.unwrap(); + tokio::spawn(async move { + let mut stream = UdpFramed::new(socket, BytesCodec::new()) + .map_err(|error| error!(message = "Error reading line.", %error)) + .map_ok(|(bytes, _addr)| bytes.freeze()); + + while let Some(Ok(item)) = stream.next().await { + tx.send(item).await.unwrap(); + } + }); + + sink.run(stream::iter(events).map(Into::into)) + .await + .expect("Running sink failed") + }) + .await; + + let messages = collect_n(ReceiverStream::new(rx), 1).await; + assert_eq!( + messages[0], + Bytes::from("vector.counter:1.5|c|#bare_tag,multi_value:true,multi_value:false,multi_value,normal_tag:value\nvector.histogram:2|h|@0.01\n"), + ); +} diff --git a/src/sinks/util/builder.rs b/src/sinks/util/builder.rs index 4cbb05a6e5793..0d7af1635a4b7 100644 --- a/src/sinks/util/builder.rs +++ b/src/sinks/util/builder.rs @@ -1,6 +1,16 @@ -use std::{fmt, future::Future, hash::Hash, num::NonZeroUsize, pin::Pin, sync::Arc}; +use std::{ + convert::Infallible, + fmt, + future::Future, + hash::Hash, + num::NonZeroUsize, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; use futures_util::{stream::Map, Stream, StreamExt}; +use pin_project::pin_project; use tower::Service; use vector_core::{ event::{Finalizable, Metric}, @@ -20,6 +30,16 @@ use super::{ impl SinkBuilderExt for T where T: Stream {} pub trait SinkBuilderExt: Stream { + /// Converts a stream of infallible results by unwrapping them. + /// + /// For a stream of `Result` items, this turns it into a stream of `T` items. + fn unwrap_infallible(self) -> UnwrapInfallible + where + Self: Stream> + Sized, + { + UnwrapInfallible { st: self } + } + /// Batches the stream based on the given partitioner and batch settings. /// /// The stream will yield batches of events, with their partition key, when either a batch fills @@ -210,3 +230,23 @@ pub trait SinkBuilderExt: Stream { Driver::new(self, service) } } + +#[pin_project] +pub struct UnwrapInfallible { + #[pin] + st: St, +} + +impl Stream for UnwrapInfallible +where + St: Stream>, +{ + type Item = T; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.project(); + this.st + .poll_next(cx) + .map(|maybe| maybe.map(|result| result.unwrap())) + } +} diff --git a/src/sinks/util/metadata.rs b/src/sinks/util/metadata.rs index 54c9346bb7895..975959e56aa15 100644 --- a/src/sinks/util/metadata.rs +++ b/src/sinks/util/metadata.rs @@ -7,7 +7,7 @@ use vector_common::request_metadata::RequestMetadata; use super::request_builder::EncodeResult; -#[derive(Default, Clone)] +#[derive(Clone, Default)] pub struct RequestMetadataBuilder { event_count: usize, events_byte_size: usize, @@ -38,9 +38,13 @@ impl RequestMetadataBuilder { } } - pub fn increment(&mut self, event_count: usize, events_byte_size: usize) { - self.event_count += event_count; - self.events_byte_size += events_byte_size; + pub fn track_event(&mut self, event: E) + where + E: ByteSizeOf + EstimatedJsonEncodedSizeOf, + { + self.event_count += 1; + self.events_byte_size += event.size_of(); + self.events_estimated_json_encoded_byte_size += event.estimated_json_encoded_size_of(); } pub fn with_request_size(&self, size: NonZeroUsize) -> RequestMetadata { diff --git a/src/sinks/util/service.rs b/src/sinks/util/service.rs index db7b602d2df89..2872c4eb39f6d 100644 --- a/src/sinks/util/service.rs +++ b/src/sinks/util/service.rs @@ -35,6 +35,7 @@ use crate::{ mod concurrency; mod health; mod map; +pub mod net; pub type Svc = RateLimit, Timeout>, L>>; pub type TowerBatchedSink = BatchSink, B>; diff --git a/src/sinks/util/service/net/mod.rs b/src/sinks/util/service/net/mod.rs new file mode 100644 index 0000000000000..bf571f46100f0 --- /dev/null +++ b/src/sinks/util/service/net/mod.rs @@ -0,0 +1,367 @@ +mod tcp; +mod udp; + +#[cfg(unix)] +mod unix; + +use std::{ + io, + net::SocketAddr, + task::{ready, Context, Poll}, + time::Duration, +}; + +#[cfg(unix)] +use std::path::PathBuf; + +use crate::{ + internal_events::{ + SocketOutgoingConnectionError, TcpSocketConnectionEstablished, UdpSendIncompleteError, + }, + sinks::{util::retries::ExponentialBackoff, Healthcheck}, +}; + +#[cfg(unix)] +use crate::internal_events::{UnixSendIncompleteError, UnixSocketConnectionEstablished}; + +pub use self::tcp::TcpConnectorConfig; +pub use self::udp::UdpConnectorConfig; + +#[cfg(unix)] +pub use self::unix::{UnixConnectorConfig, UnixMode}; + +use self::tcp::TcpConnector; +use self::udp::UdpConnector; +#[cfg(unix)] +use self::unix::{UnixConnector, UnixEither}; + +use futures_util::{future::BoxFuture, FutureExt}; +use snafu::{ResultExt, Snafu}; +use tokio::{ + io::AsyncWriteExt, + net::{TcpStream, UdpSocket}, + sync::oneshot, + time::sleep, +}; +use tower::Service; +use vector_config::configurable_component; +use vector_core::tls::{MaybeTlsStream, TlsError}; + +/// Hostname and port tuple. +/// +/// Both IP addresses and hostnames/fully qualified domain names (FQDNs) are accepted formats. +/// +/// The address _must_ include a port. +#[configurable_component] +#[derive(Clone, Debug)] +#[serde(try_from = "String", into = "String")] +#[configurable(title = "The address to connect to.")] +#[configurable(metadata(docs::examples = "92.12.333.224:5000"))] +#[configurable(metadata(docs::examples = "somehost:5000"))] +struct HostAndPort { + /// Hostname. + host: String, + + /// Port. + port: u16, +} + +impl TryFrom for HostAndPort { + type Error = String; + + fn try_from(value: String) -> Result { + let uri = value.parse::().map_err(|e| e.to_string())?; + let host = uri + .host() + .ok_or_else(|| "missing host".to_string())? + .to_string(); + let port = uri.port_u16().ok_or_else(|| "missing port".to_string())?; + + Ok(Self { host, port }) + } +} + +impl From for String { + fn from(value: HostAndPort) -> Self { + format!("{}:{}", value.host, value.port) + } +} + +#[derive(Debug, Snafu)] +#[snafu(module, context(suffix(false)), visibility(pub))] +pub enum NetError { + #[snafu(display("Address is invalid: {}", reason))] + InvalidAddress { reason: String }, + + #[snafu(display("Failed to resolve address: {}", source))] + FailedToResolve { source: crate::dns::DnsError }, + + #[snafu(display("No addresses returned."))] + NoAddresses, + + #[snafu(display("Failed to configure socket: {}.", source))] + FailedToConfigure { source: std::io::Error }, + + #[snafu(display("Failed to configure TLS: {}.", source))] + FailedToConfigureTLS { source: TlsError }, + + #[snafu(display("Failed to bind socket: {}.", source))] + FailedToBind { source: std::io::Error }, + + #[snafu(display("Failed to send message: {}", source))] + FailedToSend { source: std::io::Error }, + + #[snafu(display("Failed to connect to endpoint: {}", source))] + FailedToConnect { source: std::io::Error }, + + #[snafu(display("Failed to connect to TLS endpoint: {}", source))] + FailedToConnectTLS { source: TlsError }, + + #[snafu(display("Failed to get socket back after send as channel closed unexpectedly."))] + ServiceSocketChannelClosed, +} + +enum NetworkServiceState { + /// The service is currently disconnected. + Disconnected, + + /// The service is currently attempting to connect to the endpoint. + Connecting(BoxFuture<'static, NetworkConnection>), + + /// The service is connected and idle. + Connected(NetworkConnection), + + /// The service has an in-flight send to the socket. + /// + /// If the socket experiences an unrecoverable error during the send, `None` will be returned + /// over the channel to signal the need to establish a new connection rather than reusing the + /// existing connection. + Sending(oneshot::Receiver>), +} + +enum NetworkConnection { + Tcp(MaybeTlsStream), + Udp(UdpSocket), + #[cfg(unix)] + Unix(UnixEither), +} + +impl NetworkConnection { + fn on_partial_send(&self, data_size: usize, sent: usize) { + match self { + // Can't "successfully" partially send with TCP: it either all eventually sends or the + // socket has an I/O error that kills the connection entirely. + Self::Tcp(_) => {} + Self::Udp(_) => { + emit!(UdpSendIncompleteError { data_size, sent }); + } + #[cfg(unix)] + Self::Unix(_) => { + emit!(UnixSendIncompleteError { data_size, sent }); + } + } + } + + async fn send(&mut self, buf: &[u8]) -> io::Result { + match self { + Self::Tcp(stream) => stream.write_all(buf).await.map(|()| buf.len()), + Self::Udp(socket) => socket.send(buf).await, + #[cfg(unix)] + Self::Unix(socket) => socket.send(buf).await, + } + } +} + +enum ConnectionMetadata { + Tcp { + peer_addr: SocketAddr, + }, + #[cfg(unix)] + Unix { + path: PathBuf, + }, +} + +#[derive(Clone)] +enum ConnectorType { + Tcp(TcpConnector), + Udp(UdpConnector), + #[cfg(unix)] + Unix(UnixConnector), +} + +/// A connector for generically connecting to a remote network endpoint. +/// +/// The connection can be based on TCP, UDP, or Unix Domain Sockets. +#[derive(Clone)] +pub struct NetworkConnector { + inner: ConnectorType, +} + +impl NetworkConnector { + fn on_connected(&self, metadata: ConnectionMetadata) { + match metadata { + ConnectionMetadata::Tcp { peer_addr } => { + emit!(TcpSocketConnectionEstablished { + peer_addr: Some(peer_addr) + }); + } + #[cfg(unix)] + ConnectionMetadata::Unix { path } => { + emit!(UnixSocketConnectionEstablished { path: &path }); + } + } + } + + fn on_connection_error(&self, error: E) { + emit!(SocketOutgoingConnectionError { error }); + } + + async fn connect(&self) -> Result<(NetworkConnection, Option), NetError> { + match &self.inner { + ConnectorType::Tcp(connector) => { + let (peer_addr, stream) = connector.connect().await?; + + Ok(( + NetworkConnection::Tcp(stream), + Some(ConnectionMetadata::Tcp { peer_addr }), + )) + } + ConnectorType::Udp(connector) => { + let socket = connector.connect().await?; + + Ok((NetworkConnection::Udp(socket), None)) + } + #[cfg(unix)] + ConnectorType::Unix(connector) => { + let (path, socket) = connector.connect().await?; + + Ok(( + NetworkConnection::Unix(socket), + Some(ConnectionMetadata::Unix { path }), + )) + } + } + } + + async fn connect_backoff(&self) -> NetworkConnection { + // TODO: Make this configurable. + let mut backoff = ExponentialBackoff::from_millis(2) + .factor(250) + .max_delay(Duration::from_secs(60)); + + loop { + match self.connect().await { + Ok((connection, maybe_metadata)) => { + if let Some(metadata) = maybe_metadata { + self.on_connected(metadata); + } + + return connection; + } + Err(error) => { + self.on_connection_error(error); + sleep(backoff.next().unwrap()).await; + } + } + } + } + + /// Gets a `Healthcheck` based on the configured destination of this connector. + pub fn healthcheck(&self) -> Healthcheck { + let connector = self.clone(); + Box::pin(async move { connector.connect().await.map(|_| ()).map_err(Into::into) }) + } + + /// Gets a `Service` suitable for sending data to the configured destination of this connector. + pub fn service(&self) -> NetworkService { + NetworkService::new(self.clone()) + } +} + +/// A `Service` implementation for generically sending bytes to a remote peer over a network connection. +/// +/// The connection can be based on TCP, UDP, or Unix Domain Sockets. +pub struct NetworkService { + connector: NetworkConnector, + state: NetworkServiceState, +} + +impl NetworkService { + const fn new(connector: NetworkConnector) -> Self { + Self { + connector, + state: NetworkServiceState::Disconnected, + } + } +} + +impl Service> for NetworkService { + type Response = usize; + type Error = NetError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + loop { + self.state = match &mut self.state { + NetworkServiceState::Disconnected => { + let connector = self.connector.clone(); + NetworkServiceState::Connecting(Box::pin(async move { + connector.connect_backoff().await + })) + } + NetworkServiceState::Connecting(fut) => { + let socket = ready!(fut.poll_unpin(cx)); + NetworkServiceState::Connected(socket) + } + NetworkServiceState::Connected(_) => break, + NetworkServiceState::Sending(fut) => { + match ready!(fut.poll_unpin(cx)) { + // When a send concludes, and there's an error, the request future sends + // back `None`. Otherwise, it'll send back `Some(...)` with the socket. + Ok(maybe_socket) => match maybe_socket { + Some(socket) => NetworkServiceState::Connected(socket), + None => NetworkServiceState::Disconnected, + }, + Err(_) => return Poll::Ready(Err(NetError::ServiceSocketChannelClosed)), + } + } + }; + } + Poll::Ready(Ok(())) + } + + fn call(&mut self, buf: Vec) -> Self::Future { + let (tx, rx) = oneshot::channel(); + + let mut socket = match std::mem::replace(&mut self.state, NetworkServiceState::Sending(rx)) + { + NetworkServiceState::Connected(socket) => socket, + _ => panic!("poll_ready must be called first"), + }; + + Box::pin(async move { + match socket.send(&buf).await.context(net_error::FailedToSend) { + Ok(sent) => { + // Emit an error if we weren't able to send the entire buffer. + if sent != buf.len() { + socket.on_partial_send(buf.len(), sent); + } + + // Send the socket back to the service, since theoretically it's still valid to + // reuse given that we may have simply overrun the OS socket buffers, etc. + let _ = tx.send(Some(socket)); + + Ok(sent) + } + Err(e) => { + // We need to signal back to the service that it needs to create a fresh socket + // since this one could be tainted. + let _ = tx.send(None); + + Err(e) + } + } + }) + } +} diff --git a/src/sinks/util/service/net/tcp.rs b/src/sinks/util/service/net/tcp.rs new file mode 100644 index 0000000000000..5cf97cb2ca1e5 --- /dev/null +++ b/src/sinks/util/service/net/tcp.rs @@ -0,0 +1,101 @@ +use std::net::SocketAddr; + +use snafu::ResultExt; +use tokio::net::TcpStream; + +use vector_config::configurable_component; +use vector_core::{ + tcp::TcpKeepaliveConfig, + tls::{MaybeTlsSettings, MaybeTlsStream, TlsEnableableConfig}, +}; + +use crate::dns; + +use super::{net_error::*, ConnectorType, HostAndPort, NetError, NetworkConnector}; + +/// TCP configuration. +#[configurable_component] +#[derive(Clone, Debug)] +pub struct TcpConnectorConfig { + #[configurable(derived)] + address: HostAndPort, + + #[configurable(derived)] + keepalive: Option, + + /// The size of the socket's send buffer. + /// + /// If set, the value of the setting is passed via the `SO_SNDBUF` option. + #[configurable(metadata(docs::type_unit = "bytes"))] + #[configurable(metadata(docs::examples = 65536))] + send_buffer_size: Option, + + #[configurable(derived)] + tls: Option, +} + +impl TcpConnectorConfig { + pub const fn from_address(host: String, port: u16) -> Self { + Self { + address: HostAndPort { host, port }, + keepalive: None, + send_buffer_size: None, + tls: None, + } + } + + /// Creates a [`NetworkConnector`] from this TCP connector configuration. + pub fn as_connector(&self) -> NetworkConnector { + NetworkConnector { + inner: ConnectorType::Tcp(TcpConnector { + address: self.address.clone(), + keepalive: self.keepalive, + send_buffer_size: self.send_buffer_size, + tls: self.tls.clone(), + }), + } + } +} + +#[derive(Clone)] +pub(super) struct TcpConnector { + address: HostAndPort, + keepalive: Option, + send_buffer_size: Option, + tls: Option, +} + +impl TcpConnector { + pub(super) async fn connect( + &self, + ) -> Result<(SocketAddr, MaybeTlsStream), NetError> { + let ip = dns::Resolver + .lookup_ip(self.address.host.clone()) + .await + .context(FailedToResolve)? + .next() + .ok_or(NetError::NoAddresses)?; + + let addr = SocketAddr::new(ip, self.address.port); + + let tls = MaybeTlsSettings::from_config(&self.tls, false).context(FailedToConfigureTLS)?; + let mut stream = tls + .connect(self.address.host.as_str(), &addr) + .await + .context(FailedToConnectTLS)?; + + if let Some(send_buffer_size) = self.send_buffer_size { + if let Err(error) = stream.set_send_buffer_bytes(send_buffer_size) { + warn!(%error, "Failed configuring send buffer size on TCP socket."); + } + } + + if let Some(keepalive) = self.keepalive { + if let Err(error) = stream.set_keepalive(keepalive) { + warn!(%error, "Failed configuring keepalive on TCP socket."); + } + } + + Ok((addr, stream)) + } +} diff --git a/src/sinks/util/service/net/udp.rs b/src/sinks/util/service/net/udp.rs new file mode 100644 index 0000000000000..d2655a409b008 --- /dev/null +++ b/src/sinks/util/service/net/udp.rs @@ -0,0 +1,83 @@ +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + +use snafu::ResultExt; +use tokio::net::UdpSocket; + +use vector_config::configurable_component; + +use crate::{dns, net}; + +use super::{net_error::*, ConnectorType, HostAndPort, NetError, NetworkConnector}; + +/// UDP configuration. +#[configurable_component] +#[derive(Clone, Debug)] +pub struct UdpConnectorConfig { + #[configurable(derived)] + address: HostAndPort, + + /// The size of the socket's send buffer. + /// + /// If set, the value of the setting is passed via the `SO_SNDBUF` option. + #[configurable(metadata(docs::type_unit = "bytes"))] + #[configurable(metadata(docs::examples = 65536))] + send_buffer_size: Option, +} + +impl UdpConnectorConfig { + pub const fn from_address(host: String, port: u16) -> Self { + Self { + address: HostAndPort { host, port }, + send_buffer_size: None, + } + } + + /// Creates a [`NetworkConnector`] from this UDP connector configuration. + pub fn as_connector(&self) -> NetworkConnector { + NetworkConnector { + inner: ConnectorType::Udp(UdpConnector { + address: self.address.clone(), + send_buffer_size: self.send_buffer_size, + }), + } + } +} + +#[derive(Clone)] +pub(super) struct UdpConnector { + address: HostAndPort, + send_buffer_size: Option, +} + +impl UdpConnector { + pub(super) async fn connect(&self) -> Result { + let ip = dns::Resolver + .lookup_ip(self.address.host.clone()) + .await + .context(FailedToResolve)? + .next() + .ok_or(NetError::NoAddresses)?; + + let addr = SocketAddr::new(ip, self.address.port); + let bind_address = find_bind_address(&addr); + + let socket = UdpSocket::bind(bind_address).await.context(FailedToBind)?; + + if let Some(send_buffer_size) = self.send_buffer_size { + if let Err(error) = net::set_send_buffer_size(&socket, send_buffer_size) { + warn!(%error, "Failed configuring send buffer size on UDP socket."); + } + } + + socket.connect(addr).await.context(FailedToConnect)?; + + Ok(socket) + } +} + +fn find_bind_address(remote_addr: &SocketAddr) -> SocketAddr { + match remote_addr { + SocketAddr::V4(_) => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), + SocketAddr::V6(_) => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), + } +} diff --git a/src/sinks/util/service/net/unix.rs b/src/sinks/util/service/net/unix.rs new file mode 100644 index 0000000000000..5bd1e4219d7a8 --- /dev/null +++ b/src/sinks/util/service/net/unix.rs @@ -0,0 +1,134 @@ +use std::{ + io, + os::fd::{AsFd, BorrowedFd}, + path::{Path, PathBuf}, +}; + +use snafu::ResultExt; +use tokio::{ + io::AsyncWriteExt, + net::{UnixDatagram, UnixStream}, +}; + +use vector_config::configurable_component; + +use crate::net; + +use super::{net_error::*, ConnectorType, NetError, NetworkConnector}; + +/// Unix socket modes. +#[configurable_component] +#[derive(Clone, Copy, Debug)] +pub enum UnixMode { + /// Datagram-oriented (`SOCK_DGRAM`). + Datagram, + + /// Stream-oriented (`SOCK_STREAM`). + Stream, +} + +/// Unix Domain Socket configuration. +#[configurable_component] +#[derive(Clone, Debug)] +pub struct UnixConnectorConfig { + /// The Unix socket path. + /// + /// This should be an absolute path. + #[configurable(metadata(docs::examples = "/path/to/socket"))] + path: PathBuf, + + /// The Unix socket mode to use. + #[serde(default = "default_unix_mode")] + unix_mode: UnixMode, + + /// The size of the socket's send buffer. + /// + /// If set, the value of the setting is passed via the `SO_SNDBUF` option. + #[configurable(metadata(docs::type_unit = "bytes"))] + #[configurable(metadata(docs::examples = 65536))] + send_buffer_size: Option, +} + +const fn default_unix_mode() -> UnixMode { + UnixMode::Stream +} + +impl UnixConnectorConfig { + pub fn from_path>(path: P) -> Self { + Self { + path: path.as_ref().to_path_buf(), + unix_mode: UnixMode::Stream, + send_buffer_size: None, + } + } + + /// Creates a [`NetworkConnector`] from this Unix Domain Socket connector configuration. + pub fn as_connector(&self) -> NetworkConnector { + NetworkConnector { + inner: ConnectorType::Unix(UnixConnector { + path: self.path.clone(), + mode: self.unix_mode, + send_buffer_size: self.send_buffer_size, + }), + } + } +} + +pub(super) enum UnixEither { + Datagram(UnixDatagram), + Stream(UnixStream), +} + +impl UnixEither { + pub(super) async fn send(&mut self, buf: &[u8]) -> io::Result { + match self { + Self::Datagram(datagram) => datagram.send(buf).await, + Self::Stream(stream) => stream.write_all(buf).await.map(|_| buf.len()), + } + } +} + +impl AsFd for UnixEither { + fn as_fd(&self) -> BorrowedFd<'_> { + match self { + Self::Datagram(datagram) => datagram.as_fd(), + Self::Stream(stream) => stream.as_fd(), + } + } +} + +#[derive(Clone)] +pub(super) struct UnixConnector { + path: PathBuf, + mode: UnixMode, + send_buffer_size: Option, +} + +impl UnixConnector { + pub(super) async fn connect(&self) -> Result<(PathBuf, UnixEither), NetError> { + let either_socket = match self.mode { + UnixMode::Datagram => { + UnixDatagram::unbound() + .context(FailedToBind) + .and_then(|datagram| { + datagram + .connect(&self.path) + .context(FailedToConnect) + .map(|_| UnixEither::Datagram(datagram)) + })? + } + UnixMode::Stream => UnixStream::connect(&self.path) + .await + .context(FailedToConnect) + .map(UnixEither::Stream)?, + }; + + if let Some(send_buffer_size) = self.send_buffer_size { + if let Err(error) = net::set_send_buffer_size(&either_socket, send_buffer_size) { + warn!(%error, "Failed configuring send buffer size on Unix socket."); + } + } + + Ok((self.path.clone(), either_socket)) + } +} diff --git a/src/sinks/util/udp.rs b/src/sinks/util/udp.rs index 4899a66b84959..890f2f10d0154 100644 --- a/src/sinks/util/udp.rs +++ b/src/sinks/util/udp.rs @@ -1,17 +1,15 @@ use std::{ net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, pin::Pin, - task::{ready, Context, Poll}, time::Duration, }; use async_trait::async_trait; use bytes::BytesMut; -use futures::{future::BoxFuture, stream::BoxStream, FutureExt, StreamExt}; +use futures::{stream::BoxStream, FutureExt, StreamExt}; use snafu::{ResultExt, Snafu}; -use tokio::{net::UdpSocket, sync::oneshot, time::sleep}; +use tokio::{net::UdpSocket, time::sleep}; use tokio_util::codec::Encoder; -use tower::Service; use vector_common::internal_event::{ ByteSize, BytesSent, InternalEventHandle, Protocol, Registered, }; @@ -27,27 +25,23 @@ use crate::{ SocketEventsSent, SocketMode, SocketSendError, UdpSendIncompleteError, UdpSocketConnectionEstablished, UdpSocketOutgoingConnectionError, }, + net, sinks::{ util::{retries::ExponentialBackoff, StreamSink}, Healthcheck, VectorSink, }, - udp, }; #[derive(Debug, Snafu)] pub enum UdpError { #[snafu(display("Failed to create UDP listener socket, error = {:?}.", source))] BindError { source: std::io::Error }, - #[snafu(display("Send error: {}", source))] - SendError { source: std::io::Error }, #[snafu(display("Connect error: {}", source))] ConnectError { source: std::io::Error }, #[snafu(display("No addresses returned."))] NoAddresses, #[snafu(display("Unable to resolve DNS: {}", source))] DnsError { source: crate::dns::DnsError }, - #[snafu(display("Failed to get UdpSocket back: {}", source))] - ServiceChannelRecvError { source: oneshot::error::RecvError }, } /// A UDP sink. @@ -86,14 +80,6 @@ impl UdpSinkConfig { Ok(UdpConnector::new(host, port, self.send_buffer_bytes)) } - pub fn build_service(&self) -> crate::Result<(UdpService, Healthcheck)> { - let connector = self.build_connector()?; - Ok(( - UdpService::new(connector.clone()), - async move { connector.healthcheck().await }.boxed(), - )) - } - pub fn build( &self, transformer: Transformer, @@ -145,7 +131,7 @@ impl UdpConnector { let socket = UdpSocket::bind(bind_address).await.context(BindSnafu)?; if let Some(send_buffer_bytes) = self.send_buffer_bytes { - if let Err(error) = udp::set_send_buffer_size(&socket, send_buffer_bytes) { + if let Err(error) = net::set_send_buffer_size(&socket, send_buffer_bytes) { warn!(message = "Failed configuring send buffer size on UDP socket.", %error); } } @@ -176,92 +162,6 @@ impl UdpConnector { } } -enum UdpServiceState { - Disconnected, - Connecting(BoxFuture<'static, UdpSocket>), - Connected(UdpSocket), - Sending(oneshot::Receiver), -} - -pub struct UdpService { - connector: UdpConnector, - state: UdpServiceState, - bytes_sent: Registered, -} - -impl UdpService { - fn new(connector: UdpConnector) -> Self { - Self { - connector, - state: UdpServiceState::Disconnected, - bytes_sent: register!(BytesSent::from(Protocol::UDP)), - } - } -} - -impl Service for UdpService { - type Response = (); - type Error = UdpError; - type Future = BoxFuture<'static, Result<(), Self::Error>>; - - // Emission of an internal event in case of errors is handled upstream by the caller. - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - loop { - self.state = match &mut self.state { - UdpServiceState::Disconnected => { - let connector = self.connector.clone(); - UdpServiceState::Connecting(Box::pin(async move { - connector.connect_backoff().await - })) - } - UdpServiceState::Connecting(fut) => { - let socket = ready!(fut.poll_unpin(cx)); - UdpServiceState::Connected(socket) - } - UdpServiceState::Connected(_) => break, - UdpServiceState::Sending(fut) => { - let socket = match ready!(fut.poll_unpin(cx)).context(ServiceChannelRecvSnafu) { - Ok(socket) => socket, - Err(error) => return Poll::Ready(Err(error)), - }; - UdpServiceState::Connected(socket) - } - }; - } - Poll::Ready(Ok(())) - } - - // Emission of internal events for errors and dropped events is handled upstream by the caller. - fn call(&mut self, msg: BytesMut) -> Self::Future { - let (sender, receiver) = oneshot::channel(); - let byte_size = msg.len(); - let bytes_sent = self.bytes_sent.clone(); - - let mut socket = - match std::mem::replace(&mut self.state, UdpServiceState::Sending(receiver)) { - UdpServiceState::Connected(socket) => socket, - _ => panic!("UdpService::poll_ready should be called first"), - }; - - Box::pin(async move { - // TODO: Add reconnect support as TCP/Unix? - let result = udp_send(&mut socket, &msg).await.context(SendSnafu); - _ = sender.send(socket); - - if result.is_ok() { - // NOTE: This is obviously not happening before things like compression, etc, so it's currently a - // stopgap for the `socket` and `statsd` sinks, and potentially others, to ensure that we're at least - // emitting the `BytesSent` event, and related metrics... and practically, those sinks don't compress - // anyways, so the metrics are correct as-is... they just may not be correct in the future if - // compression support was added, etc. - bytes_sent.emit(ByteSize(byte_size)); - } - - result - }) - } -} - struct UdpSink where E: Encoder + Clone + Send + Sync, diff --git a/src/sources/socket/udp.rs b/src/sources/socket/udp.rs index 50fd6a7f6f408..1671be995a299 100644 --- a/src/sources/socket/udp.rs +++ b/src/sources/socket/udp.rs @@ -22,6 +22,7 @@ use crate::{ internal_events::{ SocketBindError, SocketEventsReceived, SocketMode, SocketReceiveError, StreamClosedError, }, + net, serde::{default_decoding, default_framing_message_based}, shutdown::ShutdownSignal, sources::{ @@ -29,7 +30,7 @@ use crate::{ util::net::{try_bind_udp_socket, SocketListenAddr}, Source, }, - udp, SourceSender, + SourceSender, }; /// UDP configuration for the `socket` source. @@ -158,7 +159,7 @@ pub(super) fn udp( })?; if let Some(receive_buffer_bytes) = config.receive_buffer_bytes { - if let Err(error) = udp::set_receive_buffer_size(&socket, receive_buffer_bytes) { + if let Err(error) = net::set_receive_buffer_size(&socket, receive_buffer_bytes) { warn!(message = "Failed configuring receive buffer size on UDP socket.", %error); } } diff --git a/src/sources/statsd/mod.rs b/src/sources/statsd/mod.rs index 8fcb93e176e4b..4f1a09503492c 100644 --- a/src/sources/statsd/mod.rs +++ b/src/sources/statsd/mod.rs @@ -27,10 +27,11 @@ use crate::{ EventsReceived, SocketBindError, SocketBytesReceived, SocketMode, SocketReceiveError, StreamClosedError, }, + net, shutdown::ShutdownSignal, tcp::TcpKeepaliveConfig, tls::{MaybeTlsSettings, TlsSourceConfig}, - udp, SourceSender, + SourceSender, }; pub mod parser; @@ -273,7 +274,7 @@ async fn statsd_udp( .await?; if let Some(receive_buffer_bytes) = config.receive_buffer_bytes { - if let Err(error) = udp::set_receive_buffer_size(&socket, receive_buffer_bytes) { + if let Err(error) = net::set_receive_buffer_size(&socket, receive_buffer_bytes) { warn!(message = "Failed configuring receive buffer size on UDP socket.", %error); } } diff --git a/src/sources/syslog.rs b/src/sources/syslog.rs index 63671c5595249..12c8a318901a2 100644 --- a/src/sources/syslog.rs +++ b/src/sources/syslog.rs @@ -29,11 +29,12 @@ use crate::{ event::Event, internal_events::StreamClosedError, internal_events::{SocketBindError, SocketMode, SocketReceiveError}, + net, shutdown::ShutdownSignal, sources::util::net::{try_bind_udp_socket, SocketListenAddr, TcpNullAcker, TcpSource}, tcp::TcpKeepaliveConfig, tls::{MaybeTlsSettings, TlsSourceConfig}, - udp, SourceSender, + SourceSender, }; /// Configuration for the `syslog` source. @@ -318,7 +319,7 @@ pub fn udp( })?; if let Some(receive_buffer_bytes) = receive_buffer_bytes { - if let Err(error) = udp::set_receive_buffer_size(&socket, receive_buffer_bytes) { + if let Err(error) = net::set_receive_buffer_size(&socket, receive_buffer_bytes) { warn!(message = "Failed configuring receive buffer size on UDP socket.", %error); } } diff --git a/src/udp.rs b/src/udp.rs deleted file mode 100644 index 522a7f2cf1169..0000000000000 --- a/src/udp.rs +++ /dev/null @@ -1,15 +0,0 @@ -#![allow(missing_docs)] -use socket2::SockRef; -use tokio::net::UdpSocket; - -// This function will be obsolete after tokio/mio internally use `socket2` and expose the methods to -// apply options to a socket. -pub fn set_receive_buffer_size(socket: &UdpSocket, size: usize) -> std::io::Result<()> { - SockRef::from(socket).set_recv_buffer_size(size) -} - -// This function will be obsolete after tokio/mio internally use `socket2` and expose the methods to -// apply options to a socket. -pub fn set_send_buffer_size(socket: &UdpSocket, size: usize) -> std::io::Result<()> { - SockRef::from(socket).set_send_buffer_size(size) -} diff --git a/website/cue/reference/components/sinks/base/statsd.cue b/website/cue/reference/components/sinks/base/statsd.cue index a05e917bcd892..3a9273c8b5132 100644 --- a/website/cue/reference/components/sinks/base/statsd.cue +++ b/website/cue/reference/components/sinks/base/statsd.cue @@ -31,18 +31,17 @@ base: components: sinks: statsd: configuration: { description: """ The address to connect to. - Both IP address and hostname are accepted formats. + Both IP addresses and hostnames/fully qualified domain names (FQDNs) are accepted formats. The address _must_ include a port. """ relevant_when: "mode = \"tcp\" or mode = \"udp\"" required: true - type: string: examples: ["92.12.333.224:5000", "https://somehost:5000"] + type: string: examples: ["92.12.333.224:5000", "somehost:5000"] } batch: { - description: "Event batching behavior." - relevant_when: "mode = \"udp\"" - required: false + description: "Event batching behavior." + required: false type: object: options: { max_bytes: { description: """ @@ -114,14 +113,13 @@ base: components: sinks: statsd: configuration: { required: true type: string: examples: ["/path/to/socket"] } - send_buffer_bytes: { + send_buffer_size: { description: """ The size of the socket's send buffer. If set, the value of the setting is passed via the `SO_SNDBUF` option. """ - relevant_when: "mode = \"tcp\" or mode = \"udp\"" - required: false + required: false type: uint: { examples: [ 65536, @@ -225,4 +223,16 @@ base: components: sinks: statsd: configuration: { } } } + unix_mode: { + description: "The Unix socket mode to use." + relevant_when: "mode = \"unix\"" + required: false + type: string: { + default: "Stream" + enum: { + Datagram: "Datagram-oriented (`SOCK_DGRAM`)." + Stream: "Stream-oriented (`SOCK_STREAM`)." + } + } + } } From 5d90cff55c04701692dfe2b92416c3cf4ded5a4d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 20:01:46 +0000 Subject: [PATCH 049/236] chore(deps): bump regex from 1.8.2 to 1.8.3 (#17494) Bumps [regex](https://github.com/rust-lang/regex) from 1.8.2 to 1.8.3.
Changelog

Sourced from regex's changelog.

1.8.3 (2023-05-25)

This is a patch release that fixes a bug where the regex would report a match at every position even when it shouldn't. This could occur in a very small subset of regexes, usually an alternation of simple literals that have particular properties. (See the issue linked below for a more precise description.)

Bug fixes:

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=regex&package-manager=cargo&previous-version=1.8.2&new-version=1.8.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- lib/codecs/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- vdev/Cargo.toml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d67aefb341520..dba9f2c85b63a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6715,9 +6715,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1a59b5d8e97dee33696bf13c5ba8ab85341c002922fba050069326b9c498974" +checksum = "81ca098a9821bd52d6b24fd8b10bd081f47d39c22778cafaa75a2857a62c6390" dependencies = [ "aho-corasick 1.0.1", "memchr", diff --git a/Cargo.toml b/Cargo.toml index 8af29a1b3ad7b..47f28012e4213 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,7 +293,7 @@ rand = { version = "0.8.5", default-features = false, features = ["small_rng"] } rand_distr = { version = "0.4.3", default-features = false } rdkafka = { version = "0.31.0", default-features = false, features = ["tokio", "libz", "ssl", "zstd"], optional = true } redis = { version = "0.23.0", default-features = false, features = ["connection-manager", "tokio-comp", "tokio-native-tls-comp"], optional = true } -regex = { version = "1.8.2", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.3", default-features = false, features = ["std", "perf"] } roaring = { version = "0.10.1", default-features = false, optional = true } seahash = { version = "4.1.0", default-features = false } semver = { version = "1.0.17", default-features = false, features = ["serde", "std"], optional = true } diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 71cbe5dbdc7f7..16463c94e33a1 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -17,7 +17,7 @@ memchr = { version = "2", default-features = false } once_cell = { version = "1.17", default-features = false } ordered-float = { version = "3.7.0", default-features = false } prost = { version = "0.11.8", default-features = false, features = ["std"] } -regex = { version = "1.8.2", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.3", default-features = false, features = ["std", "perf"] } serde = { version = "1", default-features = false, features = ["derive"] } serde_json = { version = "1", default-features = false } smallvec = { version = "1", default-features = false, features = ["union"] } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 1187e0a415449..4df974f396b6e 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -38,7 +38,7 @@ proptest = { version = "1.2", optional = true } prost-types = { version = "0.11", default-features = false } prost = { version = "0.11", default-features = false, features = ["std"] } quanta = { version = "0.11.0", default-features = false } -regex = { version = "1.8.2", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.3", default-features = false, features = ["std", "perf"] } ryu = { version = "1", default-features = false } serde = { version = "1.0.163", default-features = false, features = ["derive", "rc"] } serde_json = { version = "1.0.96", default-features = false } diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 01952ef5adf9a..a71429cc5aecb 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -30,7 +30,7 @@ os_info = { version = "3.7.0", default-features = false } # watch https://github.com/epage/anstyle for official interop with Clap owo-colors = { version = "3.5.0", features = ["supports-colors"] } paste = "1.0.12" -regex = { version = "1.8.2", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.3", default-features = false, features = ["std", "perf"] } reqwest = { version = "0.11", features = ["json", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.96" From cc307460df2b45af6f33311d493c6bd7f9d44da5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 May 2023 09:02:43 -0400 Subject: [PATCH 050/236] chore(deps): Bump quote from 1.0.27 to 1.0.28 (#17496) Bumps [quote](https://github.com/dtolnay/quote) from 1.0.27 to 1.0.28.
Release notes

Sourced from quote's releases.

1.0.28

  • Enable proc_macro support on wasm targets (#254)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=quote&package-manager=cargo&previous-version=1.0.27&new-version=1.0.28)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 134 ++++++++++++++++++++++++++--------------------------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dba9f2c85b63a..671dc48fbc247 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -245,7 +245,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c6368f9ae5c6ec403ca910327ae0c9437b0a85255b6950c90d497e6177f6e5e" dependencies = [ "proc-macro-hack", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -447,7 +447,7 @@ dependencies = [ "darling 0.14.2", "proc-macro-crate 1.2.1", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", "thiserror", ] @@ -567,7 +567,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -589,7 +589,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -606,7 +606,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -1440,7 +1440,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd9e32d7420c85055e8107e5b2463c4eeefeaac18b52359fe9f9c08a18f342b2" dependencies = [ - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -1584,7 +1584,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61820b4c5693eafb998b1e67485423c923db4a75f72585c247bdee32bad81e7b" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -1595,7 +1595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c76cdbfa13def20d1f8af3ae7b3c6771f06352a74221d8851262ac384c122b8e" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -1666,7 +1666,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -1751,7 +1751,7 @@ dependencies = [ "cached_proc_macro_types", "darling 0.14.2", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -1976,7 +1976,7 @@ checksum = "81d7dc0031c3a59a04fc2ba395c8e2dd463cba1859275f065d225f6122221b45" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -2474,7 +2474,7 @@ dependencies = [ "codespan-reporting", "once_cell", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "scratch", "syn 1.0.109", ] @@ -2492,7 +2492,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -2525,7 +2525,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", ] @@ -2539,7 +2539,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", ] @@ -2551,7 +2551,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -2562,7 +2562,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" dependencies = [ "darling_core 0.14.2", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -2638,7 +2638,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -2649,7 +2649,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -2661,7 +2661,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -2918,7 +2918,7 @@ checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -2930,7 +2930,7 @@ checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -2942,7 +2942,7 @@ checksum = "11f36e95862220b211a6e2aa5eca09b4fa391b13cd52ceb8035a24bf65a79de2" dependencies = [ "once_cell", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -2962,7 +2962,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -3168,7 +3168,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4c81935e123ab0741c4c4f0d9b8377e5fb21d3de7e062fa4b1263b1fbcba1ea" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -3349,7 +3349,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -3432,7 +3432,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb19fe8de3ea0920d282f7b77dd4227aea6b8b999b42cdf0ca41b2472b14443a" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -3532,7 +3532,7 @@ dependencies = [ "heck 0.4.0", "lazy_static", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "serde", "serde_json", "syn 1.0.109", @@ -4900,7 +4900,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -5417,7 +5417,7 @@ checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.2.1", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -5429,7 +5429,7 @@ checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.2.1", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -5603,7 +5603,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -5838,7 +5838,7 @@ dependencies = [ "pest", "pest_meta", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -5926,7 +5926,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -6186,7 +6186,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", "version_check", ] @@ -6198,7 +6198,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "version_check", ] @@ -6307,7 +6307,7 @@ dependencies = [ "anyhow", "itertools", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -6336,7 +6336,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -6435,7 +6435,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -6450,9 +6450,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2 1.0.58", ] @@ -6850,7 +6850,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -7335,7 +7335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -7346,7 +7346,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -7398,7 +7398,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -7457,7 +7457,7 @@ checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -7469,7 +7469,7 @@ checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" dependencies = [ "darling 0.14.2", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -7731,7 +7731,7 @@ checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -7872,7 +7872,7 @@ dependencies = [ "heck 0.3.3", "proc-macro-error", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -7890,7 +7890,7 @@ checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "rustversion", "syn 1.0.109", ] @@ -7929,7 +7929,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "unicode-ident", ] @@ -7940,7 +7940,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "unicode-ident", ] @@ -7957,7 +7957,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -8110,7 +8110,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -8255,7 +8255,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -8476,7 +8476,7 @@ dependencies = [ "prettyplease", "proc-macro2 1.0.58", "prost-build", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -8580,7 +8580,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -8851,7 +8851,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -8881,7 +8881,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3e1c30cedd24fc597f7d37a721efdbdc2b1acae012c1ef1218f4c7c2c0f3e7" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", ] @@ -9421,7 +9421,7 @@ dependencies = [ "indexmap", "once_cell", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "serde", "serde_json", "syn 1.0.109", @@ -9434,7 +9434,7 @@ version = "0.1.0" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "serde", "serde_derive_internals", "syn 1.0.109", @@ -9700,7 +9700,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", ] [[package]] @@ -9801,7 +9801,7 @@ dependencies = [ "log", "once_cell", "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", "wasm-bindgen-shared", ] @@ -9824,7 +9824,7 @@ version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" dependencies = [ - "quote 1.0.27", + "quote 1.0.28", "wasm-bindgen-macro-support", ] @@ -9835,7 +9835,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 2.0.10", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -10240,7 +10240,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6505e6815af7de1746a08f69c69606bb45695a17149517680f3b2149713b19a3" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", ] @@ -10260,7 +10260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ "proc-macro2 1.0.58", - "quote 1.0.27", + "quote 1.0.28", "syn 1.0.109", "synstructure", ] From f261781b5ce4389fb23017a2d4892c7f16753ad9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 May 2023 13:03:25 +0000 Subject: [PATCH 051/236] chore(deps): Bump base64 from 0.21.1 to 0.21.2 (#17488) Bumps [base64](https://github.com/marshallpierce/rust-base64) from 0.21.1 to 0.21.2.
Changelog

Sourced from base64's changelog.

0.21.2

  • Rollback MSRV to 1.57.0 -- only dev dependencies need 1.60, not the main code
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=base64&package-manager=cargo&previous-version=0.21.1&new-version=0.21.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 4 ++-- lib/vector-core/Cargo.toml | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 671dc48fbc247..70f13a98996a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1378,9 +1378,9 @@ checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" [[package]] name = "base64" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f1e31e207a6b8fb791a38ea3105e6cb541f55e4d029902d3039a4ad07cc4105" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64-simd" @@ -1512,7 +1512,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af254ed2da4936ef73309e9597180558821cb16ae9bba4cb24ce6b612d8d80ed" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "bollard-stubs", "bytes 1.4.0", "chrono", @@ -4361,7 +4361,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd990069640f9db34b3b0f7a1afc62a05ffaa3be9b66aa3c313f58346df7f788" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "bytes 1.4.0", "chrono", "http", @@ -5536,7 +5536,7 @@ dependencies = [ "async-compat", "async-trait", "backon", - "base64 0.21.1", + "base64 0.21.2", "bytes 1.4.0", "chrono", "flagset", @@ -6760,7 +6760,7 @@ version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "bytes 1.4.0", "encoding_rs", "futures-core", @@ -8443,7 +8443,7 @@ dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.1", + "base64 0.21.2", "bytes 1.4.0", "flate2", "futures-core", @@ -9131,7 +9131,7 @@ dependencies = [ "azure_identity", "azure_storage", "azure_storage_blobs", - "base64 0.21.1", + "base64 0.21.2", "bloom", "bollard", "bytes 1.4.0", @@ -9448,7 +9448,7 @@ version = "0.1.0" dependencies = [ "async-graphql", "async-trait", - "base64 0.21.1", + "base64 0.21.2", "bitmask-enum", "bytes 1.4.0", "chrono", @@ -9607,7 +9607,7 @@ dependencies = [ "anymap", "arbitrary", "base16", - "base64 0.21.1", + "base64 0.21.2", "bytes 1.4.0", "cbc", "cfb-mode", @@ -10169,7 +10169,7 @@ checksum = "bd7b0b5b253ebc0240d6aac6dd671c495c467420577bf634d3064ae7e6fa2b4c" dependencies = [ "assert-json-diff", "async-trait", - "base64 0.21.1", + "base64 0.21.2", "deadpool", "futures 0.3.28", "futures-timer", diff --git a/Cargo.toml b/Cargo.toml index 47f28012e4213..23819aa9b0831 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -233,7 +233,7 @@ arc-swap = { version = "1.6", default-features = false, optional = true } async-compression = { version = "0.4.0", default-features = false, features = ["tokio", "gzip", "zstd"], optional = true } apache-avro = { version = "0.14.0", default-features = false, optional = true } axum = { version = "0.6.18", default-features = false } -base64 = { version = "0.21.1", default-features = false, optional = true } +base64 = { version = "0.21.2", default-features = false, optional = true } bloom = { version = "0.3.2", default-features = false, optional = true } bollard = { version = "0.14.0", default-features = false, features = ["ssl", "chrono"], optional = true } bytes = { version = "1.4.0", default-features = false, features = ["serde"] } @@ -343,7 +343,7 @@ azure_core = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b azure_identity = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["enable_reqwest"] } azure_storage_blobs = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["azurite_workaround"] } azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["azurite_workaround"] } -base64 = "0.21.1" +base64 = "0.21.2" criterion = { version = "0.5.0", features = ["html_reports", "async_tokio"] } itertools = { version = "0.10.5", default-features = false } libc = "0.2.144" diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 4df974f396b6e..c62ebed0d56d3 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -77,7 +77,7 @@ schannel = "0.1.21" prost-build = "0.11" [dev-dependencies] -base64 = "0.21.1" +base64 = "0.21.2" chrono-tz = { version = "0.8.2", default-features = false } criterion = { version = "0.5.0", features = ["html_reports"] } env-test-util = "1.0.1" From 2ad5b478f8948d0c3d92197f90100148cebda237 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 May 2023 13:03:51 +0000 Subject: [PATCH 052/236] chore(deps): bump aws-sigv4 from 0.55.1 to 0.55.3 (#17481) Bumps [aws-sigv4](https://github.com/awslabs/smithy-rs) from 0.55.1 to 0.55.3.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=aws-sigv4&package-manager=cargo&previous-version=0.55.1&new-version=0.55.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 70f13a98996a0..220f5b8eb45c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -940,11 +940,11 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "0.55.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab4eebc8ec484fb9eab04b15a5d1e71f3dc13bee8fdd2d9ed78bcd6ecbd7192" +checksum = "9d2ce6f507be68e968a33485ced670111d1cbad161ddbbab1e313c03d37d8f4c" dependencies = [ - "aws-smithy-http 0.55.1", + "aws-smithy-http 0.55.3", "form_urlencoded", "hex", "hmac", @@ -1066,11 +1066,11 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.55.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03bcc02d7ed9649d855c8ce4a735e9848d7b8f7568aad0504c158e3baa955df8" +checksum = "2b3b693869133551f135e1f2c77cb0b8277d9e3e17feaf2213f735857c4f0d28" dependencies = [ - "aws-smithy-types 0.55.1", + "aws-smithy-types 0.55.3", "bytes 1.4.0", "bytes-utils", "futures-core", @@ -1161,9 +1161,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "0.55.1" +version = "0.55.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0afc731fd1417d791f9145a1e0c30e23ae0beaab9b4814017708ead2fc20f1" +checksum = "16a3d0bf4f324f4ef9793b86a1701d9700fbcdbd12a846da45eed104c634c6e8" dependencies = [ "base64-simd", "itoa", @@ -9119,7 +9119,7 @@ dependencies = [ "aws-sdk-kinesis", "aws-sdk-s3", "aws-sdk-sqs", - "aws-sigv4 0.55.1", + "aws-sigv4 0.55.3", "aws-smithy-async", "aws-smithy-client", "aws-smithy-http 0.51.0", diff --git a/Cargo.toml b/Cargo.toml index 23819aa9b0831..095abe2743a30 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -166,7 +166,7 @@ aws-sdk-elasticsearch = {version = "0.21.0", default-features = false, features aws-sdk-firehose = { version = "0.21.0", default-features = false, features = ["native-tls"], optional = true } aws-sdk-kinesis = { version = "0.21.0", default-features = false, features = ["native-tls"], optional = true } aws-types = { version = "0.51.0", default-features = false, features = ["hardcoded-credentials"], optional = true } -aws-sigv4 = { version = "0.55.1", default-features = false, features = ["sign-http"], optional = true } +aws-sigv4 = { version = "0.55.3", default-features = false, features = ["sign-http"], optional = true } aws-config = { version = "0.51.0", default-features = false, features = ["native-tls"], optional = true } aws-smithy-async = { version = "0.51.0", default-features = false, optional = true } aws-smithy-client = { version = "0.51.0", default-features = false, features = ["client-hyper"], optional = true} From 4ce3278ba5c2b92391818ff85c410a01f6b71cbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 May 2023 14:47:28 +0000 Subject: [PATCH 053/236] chore(deps): Bump proc-macro2 from 1.0.58 to 1.0.59 (#17495) Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.58 to 1.0.59.
Release notes

Sourced from proc-macro2's releases.

1.0.59

  • Enable proc_macro support on wasm targets (#388)
Commits
  • 42f4f23 Release 1.0.59
  • 529e7af Merge pull request #390 from dtolnay/procmacrofrom
  • 92070f3 Render rustdoc cfg banner on proc-macro-only From impls
  • 13e3756 Merge pull request #389 from dtolnay/useprocmacro
  • 2993201 Use_proc_macro is now equivalent to feature="proc-macro"
  • d62d078 Merge pull request #388 from dtolnay/wasm
  • e60c466 Extern crate proc_macro available on all wasm targets
  • d51f395 Temporarily disable honggfuzz CI
  • 22544fe Merge pull request #386 from rickwebiii/rweber/emscripten
  • b8e751a enable proc_macro on wasm32-unknown-emscripten
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=proc-macro2&package-manager=cargo&previous-version=1.0.58&new-version=1.0.59)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 132 ++++++++++++++++++++++++++--------------------------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 220f5b8eb45c4..a4c43c93cd19b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -446,7 +446,7 @@ dependencies = [ "async-graphql-parser", "darling 0.14.2", "proc-macro-crate 1.2.1", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", "thiserror", @@ -566,7 +566,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -588,7 +588,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -605,7 +605,7 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -1573,7 +1573,7 @@ dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "syn 1.0.109", ] @@ -1583,7 +1583,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61820b4c5693eafb998b1e67485423c923db4a75f72585c247bdee32bad81e7b" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -1594,7 +1594,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c76cdbfa13def20d1f8af3ae7b3c6771f06352a74221d8851262ac384c122b8e" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -1665,7 +1665,7 @@ version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -1750,7 +1750,7 @@ checksum = "e10ca87c81aaa3a949dbbe2b5e6c2c45dbc94ba4897e45ea31ff9ec5087be3dc" dependencies = [ "cached_proc_macro_types", "darling 0.14.2", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -1975,7 +1975,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81d7dc0031c3a59a04fc2ba395c8e2dd463cba1859275f065d225f6122221b45" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -2473,7 +2473,7 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "scratch", "syn 1.0.109", @@ -2491,7 +2491,7 @@ version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -2524,7 +2524,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", @@ -2538,7 +2538,7 @@ checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", @@ -2637,7 +2637,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -2648,7 +2648,7 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -2660,7 +2660,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "rustc_version 0.4.0", "syn 1.0.109", @@ -2917,7 +2917,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -2929,7 +2929,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -2941,7 +2941,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11f36e95862220b211a6e2aa5eca09b4fa391b13cd52ceb8035a24bf65a79de2" dependencies = [ "once_cell", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -2961,7 +2961,7 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -3167,7 +3167,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4c81935e123ab0741c4c4f0d9b8377e5fb21d3de7e062fa4b1263b1fbcba1ea" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -3348,7 +3348,7 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -3431,7 +3431,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb19fe8de3ea0920d282f7b77dd4227aea6b8b999b42cdf0ca41b2472b14443a" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -3531,7 +3531,7 @@ dependencies = [ "graphql-parser", "heck 0.4.0", "lazy_static", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "serde", "serde_json", @@ -3545,7 +3545,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52fc9cde811f44b15ec0692b31e56a3067f6f431c5ace712f286e47c1dacc98" dependencies = [ "graphql_client_codegen", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "syn 1.0.109", ] @@ -4899,7 +4899,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -5416,7 +5416,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.2.1", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -5428,7 +5428,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.2.1", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -5602,7 +5602,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -5837,7 +5837,7 @@ checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -5925,7 +5925,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -6140,7 +6140,7 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c142c0e46b57171fe0c528bee8c5b7569e80f0c17e377cd0e30ea57dbc11bb51" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "syn 1.0.109", ] @@ -6185,7 +6185,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", "version_check", @@ -6197,7 +6197,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "version_check", ] @@ -6225,9 +6225,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" +checksum = "6aeca18b86b413c660b781aa319e4e2648a3e6f9eadc9b47e9038e6fe9f3451b" dependencies = [ "unicode-ident", ] @@ -6306,7 +6306,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -6335,7 +6335,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -6434,7 +6434,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -6454,7 +6454,7 @@ version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", ] [[package]] @@ -6849,7 +6849,7 @@ version = "0.7.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -7334,7 +7334,7 @@ version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -7345,7 +7345,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -7397,7 +7397,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -7456,7 +7456,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -7468,7 +7468,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" dependencies = [ "darling 0.14.2", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -7730,7 +7730,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -7871,7 +7871,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -7889,7 +7889,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "rustversion", "syn 1.0.109", @@ -7928,7 +7928,7 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "unicode-ident", ] @@ -7939,7 +7939,7 @@ version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "unicode-ident", ] @@ -7956,7 +7956,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", "unicode-xid 0.2.4", @@ -8109,7 +8109,7 @@ version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -8254,7 +8254,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -8474,7 +8474,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "prost-build", "quote 1.0.28", "syn 1.0.109", @@ -8579,7 +8579,7 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -8850,7 +8850,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -8880,7 +8880,7 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3e1c30cedd24fc597f7d37a721efdbdc2b1acae012c1ef1218f4c7c2c0f3e7" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", ] @@ -9420,7 +9420,7 @@ dependencies = [ "darling 0.13.4", "indexmap", "once_cell", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "serde", "serde_json", @@ -9433,7 +9433,7 @@ name = "vector-config-macros" version = "0.1.0" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "serde", "serde_derive_internals", @@ -9699,7 +9699,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", ] @@ -9800,7 +9800,7 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", "wasm-bindgen-shared", @@ -9834,7 +9834,7 @@ version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 2.0.10", "wasm-bindgen-backend", @@ -10239,7 +10239,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6505e6815af7de1746a08f69c69606bb45695a17149517680f3b2149713b19a3" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", ] @@ -10259,7 +10259,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.59", "quote 1.0.28", "syn 1.0.109", "synstructure", From a551f33da2b752229bd8139c72af80ce8b149638 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Fri, 26 May 2023 21:12:45 +0100 Subject: [PATCH 054/236] chore: RFC for Data Volume Insights (#17322) [Rendered](https://github.com/vectordotdev/vector/blob/stephen/data_volume_rfc/rfcs/2023-05-03-data-volume-metrics.md) --------- Signed-off-by: Stephen Wakely --- rfcs/2023-05-03-data-volume-metrics.md | 240 +++++++++++++++++++++++++ 1 file changed, 240 insertions(+) create mode 100644 rfcs/2023-05-03-data-volume-metrics.md diff --git a/rfcs/2023-05-03-data-volume-metrics.md b/rfcs/2023-05-03-data-volume-metrics.md new file mode 100644 index 0000000000000..368fe874cd420 --- /dev/null +++ b/rfcs/2023-05-03-data-volume-metrics.md @@ -0,0 +1,240 @@ +# RFC 2023-05-02 - Data Volume Insights metrics + +Vector needs to be able to emit accurate metrics that can be usefully queried +to give users insights into the volume of data moving through the system. + +## Scope + +### In scope + +- All volume event metrics within Vector need to emit the estimated JSON size of the + event. With a consistent method for determining the size it will be easier to accurately + compare data in vs data out. + - `component_received_event_bytes_total` + - `component_sent_event_bytes_total` + - `component_received_event_total` + - `component_sent_event_total` +- The metrics sent by each sink needs to be tagged with the source id of the + event so the route an event takes through Vector can be queried. +- Each event needs to be labelled with a `service`. This is a new concept + within Vector and represents the application that generated the log, + metric or trace. +- The service tag and source tag in the metrics needs to be opt in so customers + that don't need the increased cardinality are unaffected. + +### Out of scope + +- Separate metrics, `component_sent_bytes_total` and `component_received_bytes_total` + that indicate network bytes sent by Vector are not considered here. + +## Pain + +Currently it is difficult to accurately gauge the volume of data that is moving +through Vector. It is difficult to query where data being sent out has come +from. + +## Proposal + +### User Experience + +Global config options will be provided to indicate that the `service` tag and the +`source` tag should be sent. For example: + +```yaml +telemetry: + tags: + service: true + source_id: true +``` + +This will cause Vector to emit a metric like (note the last two tags): + +```prometheus +vector_component_sent_event_bytes_total{component_id="out",component_kind="sink",component_name="out",component_type="console" + ,host="machine",service="potato",source_id="stdin"} 123 +``` + +The default will be to not emit these tags. + +### Implementation + +#### Metric tags + +**service** - to attach the service, we need to add a new meaning to Vector - + `service`. Any sources that receive data that could potentially + be considered a service will need to indicate which field means + `service`. This work has largely already been done with the + LogNamespacing work, so it will be trivial to add this new field. + Not all sources will be able to specify a specific field to + indicate the `service`. In time it will be possible for this to + be accomplished through `VRL`. + +**source_id** - A new field will be added to the [Event metadata][event_metadata] - + `Arc` that will indicate the source of the event. + `OutputId` will need to be serializable so it can be stored in + the disk buffer. Since this field is just an identifier, it can + still be used even if the source no longer exists when the event + is consumed by a sink. + +We will need to do an audit of all components to ensure the +bytes emitted for the `component_received_event_bytes_total` and +`component_sent_event_bytes_total` metrics are the estimated JSON size of the +event. + +These tags will be given the name that was configured in [User Experience] +(#user-experience). + +Transforms `reduce` and `aggregate` combine multiple events together. In this +case the `source` and `service` of the first event will be taken. + +If there is no `source` a source of `-` will be emitted. The only way this can +happen is if the event was created by the `lua` transform. + +If there is no `service` available, a service of `-` will be emitted. + +Emitting a `-` rather than not emitting anything at all makes it clear that +there was no value rather than it just having been forgotten and ensures it +is clear that the metric represents no `service` or `source` rather than the +aggregate value across all services. + +The [Component Spec][component_spec] will need updating to indicate these tags +will need including. + +**Performance** - There is going to be a performance hit when emitting these metrics. +Currently for each batch a simple event is emitted containing the count and size +of the entire batch. With this change it will be necessary to scan the entire +batch to obtain the count of source, service combinations of events before emitting +the counts. This will involve additional allocations to maintain the counts as well +as the O(1) scan. + +#### `component_received_event_bytes_total` + +This metric is emitted by the framework [here][source_sender], so it looks like +the only change needed is to add the service tag. + +#### `component_sent_event_bytes_total` + +For stream based sinks this will typically be the byte value returned by +`DriverResponse::events_sent`. + +Despite being in the [Component Spec][component_spec], not all sinks currently +conform to this. + +As an example, from a cursory glance over a couple of sinks: + +The Amqp sink currently emits this value as the length of the binary +data that is sent. By the time the data has reached the code where the +`component_sent_event_bytes_total` event is emitted, that event has been +encoded and the actual estimated JSON size has been lost. The sink will need +to be updated so that when the event is encoded, the encoded event together +with the pre-encoded JSON bytesize will be sent to the service where the event +is emitted. + +The Kafka sink also currently sends the binary size, but it looks like the +estimated JSON bytesize is easily accessible at the point of emitting, so would +not need too much of a change. + +To ensure that the correct metric is sent in a type-safe manner, we will wrap +the estimated JSON size in a newtype: + +```rust +pub struct JsonSize(usize); +``` + +The `EventsSent` metric will only accept this type. + +### Registered metrics + +It is currently not possible to have dynamic tags with preregistered metrics. + +Preregistering these metrics are essential to ensure that they don't expire. + +The current mechanism to expire metrics is to check if a handle to the given +metric is being held. If it isn't, and nothing has updated that metric in +the last cycle - the metric is dropped. If a metric is dropped, the next time +that event is emitted with those tags, the count starts at zero again. + +We will need to introduce a registered event caching layer that will register +and cache new events keyed on the tags that are sent to it. + +Currently a registered metrics is stored in a `Registered`. + +We will need a new struct that can wrap this that will be generic over a tuple of +the tags for each event and the event - eg. `Cached<(String, String), EventSent>`. +This struct will maintain a BTreeMap of tags -> `Registered`. Since this will +need to be shared across threads, the cache will need to be stored in an `RwLock`. + +In pseudo rust: + +```rust +struct Cached { + cache: Arc>>, + register: Fn(Tags) -> Registered, +} + +impl Cached { + fn emit(&mut self, tags: Tags, value: Event) -> { + if Some(event) = self.cache.get(tags) { + event.emit(value); + } else { + let event = self.register(tags); + event.emit(value); + self.cache.insert(tags, event); + } + } +} +``` + +## Rationale + +The ability to visualize data flowing through Vector will allow users to ascertain +the effectiveness of the current use of Vector. This will enable users to +optimise their configurations to make the best use of Vector's features. + +## Drawbacks + +The additional tags being added to the metrics will increase the cardinality of +those metrics if they are enabled. + +## Prior Art + + +## Alternatives + +We could use an alternative metric instead of estimated JSON size. + +- *Network bytes* This provides a more accurate picture of the actual data being received + and sent by Vector, but will regularly produce different sizes for an incoming event + to an outgoing event. +- *In memory size* The size of the event as held in memory. This may be more accurate in + determining the amount of memory Vector will be utilizing at any time, will often be + less accurate compared to the data being sent and received which is often JSON. + +## Outstanding Questions + +## Plan Of Attack + +Incremental steps to execute this change. These will be converted to issues after the RFC is approved: + +- [ ] Add the `source` field to the Event metadata to indicate the source the event has come from. +- [ ] Update the Volume event metrics to take a `JsonSize` value. Use the compiler to ensure all metrics + emitted use this. The `EstimatedJsonEncodedSizeOf` trait will be updated return a `JsonSize`. +- [ ] Add the Service meaning. Update any sources that potentially create a service to point the meaning + to the relevant field. +- [ ] Introduce an event caching layer that caches registered events based on the tags sent to it. +- [ ] Update the emitted events to accept the new tags - taking the `telemetry` configuration options + into account. +- [ ] There is going to be a hit on performance with these changes. Add benchmarking to help us understand + how much the impact will be. + +## Future Improvements + +- Logs emitted by Vector should also be tagged with `source_id` and `service`. +- This rfc proposes storing the source and service as strings. This incurs a cost since scanning each + event to get the counts of events by source and service will involve multiple string comparisons. A + future optimization could be to hash the combination of these values at the source into a single + integer. + +[component_spec]: https://github.com/vectordotdev/vector/blob/master/docs/specs/component.md#componenteventssent +[source_sender]: https://github.com/vectordotdev/vector/blob/master/src/source_sender/mod.rs#L265-L268 +[event_metadata]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/src/event/metadata.rs#L20-L38 From 98c54ad3a371ac710151367a953252f9eb293548 Mon Sep 17 00:00:00 2001 From: Toby Lawrence Date: Fri, 26 May 2023 16:51:49 -0400 Subject: [PATCH 055/236] chore(observability): remove deprecated internal metrics + massive cleanup to vector top and graphql API (#17516) ## Context This PR is primarily, and simply, focused on one thing: removing now deprecated internal metrics. Specifically: - `events_in_total` - `events_out_total` - `processed_bytes_total` - `processed_events_total` These metrics have been deprecated for a while, replaced by the metrics defined in the [Component Specification](). For example, `component_sent_events_total` replaced `events_out_total`, and `processed_bytes_total` is replaced by `component_sent_bytes_total`/`component_received_bytes_total`: no more having to remember what direction the metric refers to based on what type of component is emitting it. ## Solution The change to simply stop emitting these deprecated metrics is straightforward, but most of the work in this PR centered around updating the GraphQL API, and subsequently `vector top`, to switch from the `processed_*` metrics to their `component_*` counterparts. While in the area, I adjusted some of the `vector top` output to display separated bytes in/bytes out, similar to the existing events in/events out split. This also involved a small amount of work to adjust the layout constraints, and so on, to accommodate for that. Additionally, many updates were made to the component Cue files to remove references to these now-removed metrics. ## Reviewer Notes ### Dichotomy between `processed_bytes_total` and the `component`-prefixed replacements Prior to this PR, we were backfilling `processed_bytes_total` in the following way: - for sources, alias `component_received_bytes_total` to `processed_bytes_total` - for sinks, alias `component_sent_bytes_total` to `processed_bytes_total` As such, we never emitted this metric for transforms, and `processed_bytes_total` was an analogue for network bytes. A lot of the work has tried to reflect that, such as in the GraphQL API, where spots that had `processed_bytes` are now replaced with the component type-relevant value, being either `received_bytes` or `sent_bytes`. Closes #9314. Closes #7346. --- .../graphql/queries/components.graphql | 22 +- lib/vector-api-client/graphql/schema.json | 1515 +++++++---------- ...ponent_processed_bytes_throughputs.graphql | 6 - .../component_processed_bytes_totals.graphql | 8 - ...onent_processed_events_throughputs.graphql | 6 - .../component_processed_events_totals.graphql | 8 - ...mponent_received_bytes_throughputs.graphql | 6 + .../component_received_bytes_totals.graphql | 8 + .../component_sent_bytes_throughputs.graphql | 6 + .../component_sent_bytes_totals.graphql | 8 + .../processed_events_throughput.graphql | 3 - .../processed_events_total.graphql | 5 - lib/vector-api-client/src/gql/components.rs | 56 +- lib/vector-api-client/src/gql/metrics.rs | 259 +-- .../tests/queries/file_source_metrics.graphql | 3 - .../src/internal_event/events_received.rs | 2 - .../src/internal_event/events_sent.rs | 6 - lib/vector-core/src/event/metric/mod.rs | 2 +- lib/vector-core/src/metrics/mod.rs | 22 - src/api/schema/metrics/events_in.rs | 42 - src/api/schema/metrics/events_out.rs | 42 - src/api/schema/metrics/filter.rs | 76 +- src/api/schema/metrics/mod.rs | 148 +- .../{processed_bytes.rs => received_bytes.rs} | 49 +- .../{processed_events.rs => sent_bytes.rs} | 49 +- src/api/schema/metrics/sink/generic.rs | 23 +- src/api/schema/metrics/sink/mod.rs | 20 +- src/api/schema/metrics/source/file.rs | 108 +- src/api/schema/metrics/source/generic.rs | 23 +- src/api/schema/metrics/source/mod.rs | 20 +- src/api/schema/metrics/transform/generic.rs | 22 +- src/api/schema/metrics/transform/mod.rs | 19 +- src/config/mod.rs | 4 +- src/internal_events/apache_metrics.rs | 5 +- src/internal_events/aws_ecs_metrics.rs | 2 - src/internal_events/conditions.rs | 2 - src/internal_events/docker_logs.rs | 5 - src/internal_events/exec.rs | 27 - src/internal_events/file.rs | 4 - src/internal_events/fluent.rs | 1 - src/internal_events/http.rs | 1 - src/internal_events/http_client_source.rs | 5 - src/internal_events/kafka.rs | 2 - src/internal_events/kubernetes_logs.rs | 4 +- src/internal_events/log_to_metric.rs | 10 - src/internal_events/mongodb_metrics.rs | 6 +- src/internal_events/nginx_metrics.rs | 5 - src/internal_events/socket.rs | 2 - src/top/cmd.rs | 5 +- src/top/dashboard.rs | 77 +- src/top/metrics.rs | 196 ++- src/top/mod.rs | 8 +- src/top/state.rs | 105 +- website/content/en/blog/graphql-api.md | 6 +- website/cue/reference/components.cue | 8 - website/cue/reference/components/sinks.cue | 1 - .../components/sinks/aws_kinesis_streams.cue | 2 - .../reference/components/sinks/aws_sqs.cue | 2 - .../cue/reference/components/sinks/axiom.cue | 1 - .../reference/components/sinks/azure_blob.cue | 1 - .../components/sinks/azure_monitor_logs.cue | 1 - .../reference/components/sinks/blackhole.cue | 2 - .../reference/components/sinks/clickhouse.cue | 1 - .../reference/components/sinks/console.cue | 2 - .../reference/components/sinks/databend.cue | 1 - .../components/sinks/datadog_events.cue | 1 - .../components/sinks/elasticsearch.cue | 1 - .../reference/components/sinks/gcp_pubsub.cue | 1 - .../components/sinks/gcp_stackdriver_logs.cue | 1 - .../sinks/gcp_stackdriver_metrics.cue | 3 +- .../reference/components/sinks/honeycomb.cue | 1 - .../cue/reference/components/sinks/http.cue | 3 - .../cue/reference/components/sinks/humio.cue | 1 - .../components/sinks/influxdb_logs.cue | 1 - .../cue/reference/components/sinks/loki.cue | 2 - .../cue/reference/components/sinks/mezmo.cue | 1 - .../cue/reference/components/sinks/nats.cue | 2 - .../cue/reference/components/sinks/redis.cue | 4 - .../cue/reference/components/sinks/socket.cue | 2 - .../components/sinks/splunk_hec_logs.cue | 2 - .../cue/reference/components/sinks/vector.cue | 2 - .../reference/components/sinks/websocket.cue | 3 - website/cue/reference/components/sources.cue | 1 - .../cue/reference/components/sources/amqp.cue | 3 - .../components/sources/apache_metrics.cue | 3 - .../components/sources/aws_ecs_metrics.cue | 3 - .../sources/aws_kinesis_firehose.cue | 2 - .../reference/components/sources/aws_s3.cue | 2 - .../components/sources/datadog_agent.cue | 1 - .../components/sources/demo_logs.cue | 1 - .../reference/components/sources/dnstap.cue | 3 - .../components/sources/docker_logs.cue | 3 - .../sources/eventstoredb_metrics.cue | 2 - .../cue/reference/components/sources/exec.cue | 3 - .../cue/reference/components/sources/file.cue | 1 - .../components/sources/file_descriptor.cue | 3 - .../reference/components/sources/fluent.cue | 3 - .../components/sources/heroku_logs.cue | 2 - .../components/sources/host_metrics.cue | 1 - .../components/sources/http_client.cue | 3 - .../components/sources/http_server.cue | 1 - .../components/sources/internal_metrics.cue | 64 - .../reference/components/sources/journald.cue | 3 - .../reference/components/sources/kafka.cue | 3 - .../components/sources/kubernetes_logs.cue | 3 - .../reference/components/sources/logstash.cue | 3 - .../components/sources/mongodb_metrics.cue | 1 - .../cue/reference/components/sources/nats.cue | 3 - .../components/sources/nginx_metrics.cue | 1 - .../components/sources/opentelemetry.cue | 7 +- .../components/sources/postgresql_metrics.cue | 1 - .../sources/prometheus_remote_write.cue | 3 - .../components/sources/prometheus_scrape.cue | 3 - .../reference/components/sources/redis.cue | 4 - .../reference/components/sources/socket.cue | 1 - .../components/sources/splunk_hec.cue | 1 - .../reference/components/sources/statsd.cue | 3 - .../reference/components/sources/stdin.cue | 3 - .../reference/components/sources/syslog.cue | 3 - .../reference/components/sources/vector.cue | 1 - .../cue/reference/components/transforms.cue | 2 - 121 files changed, 1134 insertions(+), 2147 deletions(-) delete mode 100644 lib/vector-api-client/graphql/subscriptions/component_processed_bytes_throughputs.graphql delete mode 100644 lib/vector-api-client/graphql/subscriptions/component_processed_bytes_totals.graphql delete mode 100644 lib/vector-api-client/graphql/subscriptions/component_processed_events_throughputs.graphql delete mode 100644 lib/vector-api-client/graphql/subscriptions/component_processed_events_totals.graphql create mode 100644 lib/vector-api-client/graphql/subscriptions/component_received_bytes_throughputs.graphql create mode 100644 lib/vector-api-client/graphql/subscriptions/component_received_bytes_totals.graphql create mode 100644 lib/vector-api-client/graphql/subscriptions/component_sent_bytes_throughputs.graphql create mode 100644 lib/vector-api-client/graphql/subscriptions/component_sent_bytes_totals.graphql delete mode 100644 lib/vector-api-client/graphql/subscriptions/processed_events_throughput.graphql delete mode 100644 lib/vector-api-client/graphql/subscriptions/processed_events_total.graphql delete mode 100644 src/api/schema/metrics/events_in.rs delete mode 100644 src/api/schema/metrics/events_out.rs rename src/api/schema/metrics/{processed_bytes.rs => received_bytes.rs} (57%) rename src/api/schema/metrics/{processed_events.rs => sent_bytes.rs} (57%) diff --git a/lib/vector-api-client/graphql/queries/components.graphql b/lib/vector-api-client/graphql/queries/components.graphql index 5be42459f8ef0..3a6f78fc27989 100644 --- a/lib/vector-api-client/graphql/queries/components.graphql +++ b/lib/vector-api-client/graphql/queries/components.graphql @@ -14,11 +14,8 @@ query ComponentsQuery($first: Int!) { } metrics { __typename - processedEventsTotal { - processedEventsTotal - } - processedBytesTotal { - processedBytesTotal + receivedBytesTotal { + receivedBytesTotal } receivedEventsTotal { receivedEventsTotal @@ -37,12 +34,6 @@ query ComponentsQuery($first: Int!) { } metrics { __typename - processedEventsTotal { - processedEventsTotal - } - processedBytesTotal { - processedBytesTotal - } receivedEventsTotal { receivedEventsTotal } @@ -54,15 +45,12 @@ query ComponentsQuery($first: Int!) { ... on Sink { metrics { __typename - processedEventsTotal { - processedEventsTotal - } - processedBytesTotal { - processedBytesTotal - } receivedEventsTotal { receivedEventsTotal } + sentBytesTotal { + sentBytesTotal + } sentEventsTotal { sentEventsTotal } diff --git a/lib/vector-api-client/graphql/schema.json b/lib/vector-api-client/graphql/schema.json index fee86167638c6..034780a80d718 100644 --- a/lib/vector-api-client/graphql/schema.json +++ b/lib/vector-api-client/graphql/schema.json @@ -9,6 +9,45 @@ "name": "Subscription" }, "types": [ + { + "kind": "OBJECT", + "name": "AllocatedBytes", + "description": null, + "fields": [ + { + "name": "timestamp", + "description": "Metric timestamp", + "args": [], + "type": { + "kind": "SCALAR", + "name": "DateTime", + "ofType": null + }, + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "allocatedBytes", + "description": "Allocated bytes", + "args": [], + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Float", + "ofType": null + } + }, + "isDeprecated": false, + "deprecationReason": null + } + ], + "inputFields": null, + "interfaces": [], + "enumValues": null, + "possibleTypes": null + }, { "kind": "SCALAR", "name": "Boolean", @@ -80,35 +119,19 @@ }, { "kind": "OBJECT", - "name": "ComponentConnection", + "name": "ComponentAllocatedBytes", "description": null, "fields": [ { - "name": "pageInfo", - "description": "Information to aid in pagination.", + "name": "componentId", + "description": "Component id", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "PageInfo", - "ofType": null - } - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "edges", - "description": "A list of edges.", - "args": [], - "type": { - "kind": "LIST", - "name": null, - "ofType": { - "kind": "OBJECT", - "name": "ComponentEdge", + "kind": "SCALAR", + "name": "String", "ofType": null } }, @@ -116,15 +139,15 @@ "deprecationReason": null }, { - "name": "totalCount", - "description": "Total result set count", + "name": "metric", + "description": "Allocated bytes metric", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "SCALAR", - "name": "Int", + "kind": "OBJECT", + "name": "AllocatedBytes", "ofType": null } }, @@ -139,19 +162,19 @@ }, { "kind": "OBJECT", - "name": "ComponentEdge", - "description": "An edge in a connection.", + "name": "ComponentConnection", + "description": null, "fields": [ { - "name": "node", - "description": "The item at the end of the edge", + "name": "pageInfo", + "description": "Information to aid in pagination.", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "INTERFACE", - "name": "Component", + "kind": "OBJECT", + "name": "PageInfo", "ofType": null } }, @@ -159,15 +182,63 @@ "deprecationReason": null }, { - "name": "cursor", - "description": "A cursor for use in pagination", + "name": "edges", + "description": "A list of edges.", + "args": [], + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "ComponentEdge", + "ofType": null + } + } + } + }, + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "nodes", + "description": "A list of nodes.", + "args": [], + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "INTERFACE", + "name": "Component", + "ofType": null + } + } + } + }, + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "totalCount", + "description": "Total result set count", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { "kind": "SCALAR", - "name": "String", + "name": "Int", "ofType": null } }, @@ -182,12 +253,12 @@ }, { "kind": "OBJECT", - "name": "ComponentAllocatedBytes", - "description": null, + "name": "ComponentEdge", + "description": "An edge in a connection.", "fields": [ { - "name": "componentId", - "description": "Component id", + "name": "cursor", + "description": "A cursor for use in pagination", "args": [], "type": { "kind": "NON_NULL", @@ -202,15 +273,15 @@ "deprecationReason": null }, { - "name": "metric", - "description": "Allocated bytes metric", + "name": "node", + "description": "The item at the end of the edge", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "AllocatedBytes", + "kind": "INTERFACE", + "name": "Component", "ofType": null } }, @@ -328,12 +399,12 @@ }, { "kind": "OBJECT", - "name": "ComponentProcessedBytesThroughput", + "name": "ComponentReceivedBytesThroughput", "description": null, "fields": [ { "name": "componentId", - "description": "Component id", + "description": "Component ID.", "args": [], "type": { "kind": "NON_NULL", @@ -349,7 +420,7 @@ }, { "name": "throughput", - "description": "Bytes processed throughput", + "description": "Throughput of bytes sent.", "args": [], "type": { "kind": "NON_NULL", @@ -371,12 +442,12 @@ }, { "kind": "OBJECT", - "name": "ComponentProcessedBytesTotal", + "name": "ComponentReceivedBytesTotal", "description": null, "fields": [ { "name": "componentId", - "description": "Component id", + "description": "Component ID.", "args": [], "type": { "kind": "NON_NULL", @@ -392,14 +463,14 @@ }, { "name": "metric", - "description": "Bytes processed total metric", + "description": "Metric for total bytes received.", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { "kind": "OBJECT", - "name": "ProcessedBytesTotal", + "name": "ReceivedBytesTotal", "ofType": null } }, @@ -414,7 +485,7 @@ }, { "kind": "OBJECT", - "name": "ComponentProcessedEventsThroughput", + "name": "ComponentReceivedEventsThroughput", "description": null, "fields": [ { @@ -435,7 +506,7 @@ }, { "name": "throughput", - "description": "Events processed throughput", + "description": "Received events throughput", "args": [], "type": { "kind": "NON_NULL", @@ -457,7 +528,7 @@ }, { "kind": "OBJECT", - "name": "ComponentProcessedEventsTotal", + "name": "ComponentReceivedEventsTotal", "description": null, "fields": [ { @@ -478,14 +549,14 @@ }, { "name": "metric", - "description": "Events processed total metric", + "description": "Total received events metric", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { "kind": "OBJECT", - "name": "ProcessedEventsTotal", + "name": "ReceivedEventsTotal", "ofType": null } }, @@ -500,12 +571,12 @@ }, { "kind": "OBJECT", - "name": "ComponentReceivedEventsThroughput", + "name": "ComponentSentBytesThroughput", "description": null, "fields": [ { "name": "componentId", - "description": "Component id", + "description": "Component ID.", "args": [], "type": { "kind": "NON_NULL", @@ -521,7 +592,7 @@ }, { "name": "throughput", - "description": "Received events throughput", + "description": "Throughput of bytes sent.", "args": [], "type": { "kind": "NON_NULL", @@ -543,12 +614,12 @@ }, { "kind": "OBJECT", - "name": "ComponentReceivedEventsTotal", + "name": "ComponentSentBytesTotal", "description": null, "fields": [ { "name": "componentId", - "description": "Component id", + "description": "Component ID.", "args": [], "type": { "kind": "NON_NULL", @@ -564,14 +635,14 @@ }, { "name": "metric", - "description": "Total received events metric", + "description": "Metric for total bytes sent.", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { "kind": "OBJECT", - "name": "ReceivedEventsTotal", + "name": "SentBytesTotal", "ofType": null } }, @@ -980,45 +1051,6 @@ "enumValues": null, "possibleTypes": null }, - { - "kind": "OBJECT", - "name": "AllocatedBytes", - "description": null, - "fields": [ - { - "name": "timestamp", - "description": "Metric timestamp", - "args": [], - "type": { - "kind": "SCALAR", - "name": "DateTime", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "allocatedBytes", - "description": "Total allocated bytes count", - "args": [], - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Float", - "ofType": null - } - }, - "isDeprecated": false, - "deprecationReason": null - } - ], - "inputFields": null, - "interfaces": [], - "enumValues": null, - "possibleTypes": null - }, { "kind": "OBJECT", "name": "ErrorsTotal", @@ -1132,33 +1164,57 @@ }, { "kind": "OBJECT", - "name": "EventsInTotal", + "name": "FileSourceMetricFile", "description": null, "fields": [ { - "name": "timestamp", - "description": "Metric timestamp", + "name": "name", + "description": "File name", "args": [], "type": { - "kind": "SCALAR", - "name": "DateTime", + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + }, + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "receivedBytesTotal", + "description": "Metric indicating bytes received for the current file", + "args": [], + "type": { + "kind": "OBJECT", + "name": "ReceivedBytesTotal", "ofType": null }, "isDeprecated": false, "deprecationReason": null }, { - "name": "eventsInTotal", - "description": "Total incoming events", + "name": "receivedEventsTotal", + "description": "Metric indicating received events for the current file", "args": [], "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Float", - "ofType": null - } + "kind": "OBJECT", + "name": "ReceivedEventsTotal", + "ofType": null + }, + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "sentEventsTotal", + "description": "Metric indicating outgoing events for the current file", + "args": [], + "type": { + "kind": "OBJECT", + "name": "SentEventsTotal", + "ofType": null }, "isDeprecated": false, "deprecationReason": null @@ -1171,58 +1227,19 @@ }, { "kind": "OBJECT", - "name": "EventsOutTotal", + "name": "FileSourceMetricFileConnection", "description": null, "fields": [ { - "name": "timestamp", - "description": "Metric timestamp", - "args": [], - "type": { - "kind": "SCALAR", - "name": "DateTime", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "eventsOutTotal", - "description": "Total outgoing events", - "args": [], - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Float", - "ofType": null - } - }, - "isDeprecated": false, - "deprecationReason": null - } - ], - "inputFields": null, - "interfaces": [], - "enumValues": null, - "possibleTypes": null - }, - { - "kind": "OBJECT", - "name": "FileSourceMetricFile", - "description": null, - "fields": [ - { - "name": "name", - "description": "File name", + "name": "pageInfo", + "description": "Information to aid in pagination.", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "SCALAR", - "name": "String", + "kind": "OBJECT", + "name": "PageInfo", "ofType": null } }, @@ -1230,115 +1247,48 @@ "deprecationReason": null }, { - "name": "processedEventsTotal", - "description": "Metric indicating events processed for the current file", - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedBytesTotal", - "description": "Metric indicating bytes processed for the current file", - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedBytesTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "eventsInTotal", - "description": "Metric indicating incoming events for the current file", - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsInTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "receivedEventsTotal", - "description": "Metric indicating received events for the current file", - "args": [], - "type": { - "kind": "OBJECT", - "name": "ReceivedEventsTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "eventsOutTotal", - "description": "Metric indicating outgoing events for the current file", - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsOutTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "sentEventsTotal", - "description": "Metric indicating outgoing events for the current file", - "args": [], - "type": { - "kind": "OBJECT", - "name": "SentEventsTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - } - ], - "inputFields": null, - "interfaces": [], - "enumValues": null, - "possibleTypes": null - }, - { - "kind": "OBJECT", - "name": "FileSourceMetricFileConnection", - "description": null, - "fields": [ - { - "name": "pageInfo", - "description": "Information to aid in pagination.", + "name": "edges", + "description": "A list of edges.", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "PageInfo", - "ofType": null + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "FileSourceMetricFileEdge", + "ofType": null + } + } } }, "isDeprecated": false, "deprecationReason": null }, { - "name": "edges", - "description": "A list of edges.", + "name": "nodes", + "description": "A list of nodes.", "args": [], "type": { - "kind": "LIST", + "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "FileSourceMetricFileEdge", - "ofType": null + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "FileSourceMetricFile", + "ofType": null + } + } } }, "isDeprecated": false, @@ -1372,15 +1322,15 @@ "description": "An edge in a connection.", "fields": [ { - "name": "node", - "description": "The item at the end of the edge", + "name": "cursor", + "description": "A cursor for use in pagination", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "FileSourceMetricFile", + "kind": "SCALAR", + "name": "String", "ofType": null } }, @@ -1388,15 +1338,15 @@ "deprecationReason": null }, { - "name": "cursor", - "description": "A cursor for use in pagination", + "name": "node", + "description": "The item at the end of the edge", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "SCALAR", - "name": "String", + "kind": "OBJECT", + "name": "FileSourceMetricFile", "ofType": null } }, @@ -1463,13 +1413,7 @@ "deprecationReason": null }, { - "name": "PROCESSED_BYTES_TOTAL", - "description": null, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "PROCESSED_EVENTS_TOTAL", + "name": "RECEIVED_BYTES_TOTAL", "description": null, "isDeprecated": false, "deprecationReason": null @@ -1480,23 +1424,11 @@ "isDeprecated": false, "deprecationReason": null }, - { - "name": "EVENTS_IN_TOTAL", - "description": null, - "isDeprecated": false, - "deprecationReason": null - }, { "name": "SENT_EVENTS_TOTAL", "description": null, "isDeprecated": false, "deprecationReason": null - }, - { - "name": "EVENTS_OUT_TOTAL", - "description": null, - "isDeprecated": false, - "deprecationReason": null } ], "possibleTypes": null @@ -1592,36 +1524,12 @@ "deprecationReason": null }, { - "name": "processedEventsTotal", - "description": "Events processed for the current file source", + "name": "receivedBytesTotal", + "description": "Total received bytes for the current file source", "args": [], "type": { "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedBytesTotal", - "description": "Bytes processed for the current file source", - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedBytesTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "eventsInTotal", - "description": "Total incoming events for the current file source", - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsInTotal", + "name": "ReceivedBytesTotal", "ofType": null }, "isDeprecated": false, @@ -1639,21 +1547,9 @@ "isDeprecated": false, "deprecationReason": null }, - { - "name": "eventsOutTotal", - "description": "Total outgoing events for the current file source", - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsOutTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, { "name": "sentEventsTotal", - "description": "Total outgoing events for the current file source", + "description": "Total sent events for the current file source", "args": [], "type": { "kind": "OBJECT", @@ -1796,42 +1692,6 @@ "name": "GenericSinkMetrics", "description": null, "fields": [ - { - "name": "processedEventsTotal", - "description": "Events processed for the current sink", - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedBytesTotal", - "description": "Bytes processed for the current sink", - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedBytesTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "eventsInTotal", - "description": "Total incoming events for the current sink", - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsInTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, { "name": "receivedEventsTotal", "description": "Total received events for the current sink", @@ -1845,12 +1705,12 @@ "deprecationReason": null }, { - "name": "eventsOutTotal", - "description": "Total outgoing events for the current sink", + "name": "sentBytesTotal", + "description": "Total sent bytes for the current sink", "args": [], "type": { "kind": "OBJECT", - "name": "EventsOutTotal", + "name": "SentBytesTotal", "ofType": null }, "isDeprecated": false, @@ -1858,7 +1718,7 @@ }, { "name": "sentEventsTotal", - "description": "Total outgoing events for the current sink", + "description": "Total sent events for the current sink", "args": [], "type": { "kind": "OBJECT", @@ -1886,36 +1746,12 @@ "description": null, "fields": [ { - "name": "processedEventsTotal", - "description": "Events processed for the current source", - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedBytesTotal", - "description": "Bytes processed for the current source", + "name": "receivedBytesTotal", + "description": "Total received bytes for the current source", "args": [], "type": { "kind": "OBJECT", - "name": "ProcessedBytesTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "eventsInTotal", - "description": "Total incoming events for the current source", - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsInTotal", + "name": "ReceivedBytesTotal", "ofType": null }, "isDeprecated": false, @@ -1933,21 +1769,9 @@ "isDeprecated": false, "deprecationReason": null }, - { - "name": "eventsOutTotal", - "description": "Total outgoing events for the current source", - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsOutTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, { "name": "sentEventsTotal", - "description": "Total outgoing events for the current source", + "description": "Total sent events for the current source", "args": [], "type": { "kind": "OBJECT", @@ -1974,42 +1798,6 @@ "name": "GenericTransformMetrics", "description": null, "fields": [ - { - "name": "processedEventsTotal", - "description": "Events processed for the current transform", - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedBytesTotal", - "description": "Bytes processed for the current transform", - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedBytesTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "eventsInTotal", - "description": "Total incoming events for the current transform", - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsInTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, { "name": "receivedEventsTotal", "description": "Total received events for the current transform", @@ -2022,21 +1810,9 @@ "isDeprecated": false, "deprecationReason": null }, - { - "name": "eventsOutTotal", - "description": "Total outgoing events for the current transform", - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsOutTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, { "name": "sentEventsTotal", - "description": "Total outgoing events for the current transform", + "description": "Total sent events for the current transform", "args": [], "type": { "kind": "OBJECT", @@ -2275,16 +2051,6 @@ "enumValues": null, "possibleTypes": null }, - { - "kind": "SCALAR", - "name": "Json", - "description": "Raw JSON data`", - "fields": null, - "inputFields": null, - "interfaces": null, - "enumValues": null, - "possibleTypes": null - }, { "kind": "OBJECT", "name": "LoadAverageMetrics", @@ -2473,7 +2239,7 @@ ], "type": { "kind": "SCALAR", - "name": "Json", + "name": "String", "ofType": null }, "isDeprecated": false, @@ -2984,16 +2750,6 @@ "kind": "OBJECT", "name": "Uptime", "ofType": null - }, - { - "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "ofType": null - }, - { - "kind": "OBJECT", - "name": "ProcessedBytesTotal", - "ofType": null } ] }, @@ -3345,96 +3101,6 @@ "enumValues": null, "possibleTypes": null }, - { - "kind": "OBJECT", - "name": "ProcessedBytesTotal", - "description": null, - "fields": [ - { - "name": "timestamp", - "description": "Metric timestamp", - "args": [], - "type": { - "kind": "SCALAR", - "name": "DateTime", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedBytesTotal", - "description": "Total number of bytes processed", - "args": [], - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Float", - "ofType": null - } - }, - "isDeprecated": false, - "deprecationReason": null - } - ], - "inputFields": null, - "interfaces": [ - { - "kind": "INTERFACE", - "name": "MetricType", - "ofType": null - } - ], - "enumValues": null, - "possibleTypes": null - }, - { - "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "description": null, - "fields": [ - { - "name": "timestamp", - "description": "Metric timestamp", - "args": [], - "type": { - "kind": "SCALAR", - "name": "DateTime", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedEventsTotal", - "description": "Total number of events processed", - "args": [], - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Float", - "ofType": null - } - }, - "isDeprecated": false, - "deprecationReason": null - } - ], - "inputFields": null, - "interfaces": [ - { - "kind": "INTERFACE", - "name": "MetricType", - "ofType": null - } - ], - "enumValues": null, - "possibleTypes": null - }, { "kind": "OBJECT", "name": "Query", @@ -3861,6 +3527,45 @@ "enumValues": null, "possibleTypes": null }, + { + "kind": "OBJECT", + "name": "ReceivedBytesTotal", + "description": null, + "fields": [ + { + "name": "timestamp", + "description": "Metric timestamp.", + "args": [], + "type": { + "kind": "SCALAR", + "name": "DateTime", + "ofType": null + }, + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "receivedBytesTotal", + "description": "Total number of bytes received.", + "args": [], + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Float", + "ofType": null + } + }, + "isDeprecated": false, + "deprecationReason": null + } + ], + "inputFields": null, + "interfaces": [], + "enumValues": null, + "possibleTypes": null + }, { "kind": "OBJECT", "name": "ReceivedEventsTotal", @@ -3900,6 +3605,45 @@ "enumValues": null, "possibleTypes": null }, + { + "kind": "OBJECT", + "name": "SentBytesTotal", + "description": null, + "fields": [ + { + "name": "timestamp", + "description": "Metric timestamp.", + "args": [], + "type": { + "kind": "SCALAR", + "name": "DateTime", + "ofType": null + }, + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "sentBytesTotal", + "description": "Total number of bytes sent.", + "args": [], + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Float", + "ofType": null + } + }, + "isDeprecated": false, + "deprecationReason": null + } + ], + "inputFields": null, + "interfaces": [], + "enumValues": null, + "possibleTypes": null + }, { "kind": "OBJECT", "name": "SentEventsTotal", @@ -4078,12 +3822,44 @@ "description": "A list of edges.", "args": [], "type": { - "kind": "LIST", + "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "SinkEdge", - "ofType": null + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "SinkEdge", + "ofType": null + } + } + } + }, + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "nodes", + "description": "A list of nodes.", + "args": [], + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "Sink", + "ofType": null + } + } } }, "isDeprecated": false, @@ -4117,15 +3893,15 @@ "description": "An edge in a connection.", "fields": [ { - "name": "node", - "description": "The item at the end of the edge", + "name": "cursor", + "description": "A cursor for use in pagination", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "Sink", + "kind": "SCALAR", + "name": "String", "ofType": null } }, @@ -4133,15 +3909,15 @@ "deprecationReason": null }, { - "name": "cursor", - "description": "A cursor for use in pagination", + "name": "node", + "description": "The item at the end of the edge", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "SCALAR", - "name": "String", + "kind": "OBJECT", + "name": "Sink", "ofType": null } }, @@ -4159,30 +3935,6 @@ "name": "SinkMetrics", "description": null, "fields": [ - { - "name": "processedEventsTotal", - "description": null, - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedBytesTotal", - "description": null, - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedBytesTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, { "name": "receivedEventsTotal", "description": null, @@ -4196,16 +3948,16 @@ "deprecationReason": null }, { - "name": "eventsInTotal", + "name": "sentBytesTotal", "description": null, "args": [], "type": { "kind": "OBJECT", - "name": "EventsInTotal", + "name": "SentBytesTotal", "ofType": null }, - "isDeprecated": true, - "deprecationReason": "Use received_events_total instead" + "isDeprecated": false, + "deprecationReason": null }, { "name": "sentEventsTotal", @@ -4218,18 +3970,6 @@ }, "isDeprecated": false, "deprecationReason": null - }, - { - "name": "eventsOutTotal", - "description": null, - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsOutTotal", - "ofType": null - }, - "isDeprecated": true, - "deprecationReason": "Use sent_events_total instead" } ], "inputFields": null, @@ -4557,12 +4297,44 @@ "description": "A list of edges.", "args": [], "type": { - "kind": "LIST", + "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "SourceEdge", - "ofType": null + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "SourceEdge", + "ofType": null + } + } + } + }, + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "nodes", + "description": "A list of nodes.", + "args": [], + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "Source", + "ofType": null + } + } } }, "isDeprecated": false, @@ -4596,15 +4368,15 @@ "description": "An edge in a connection.", "fields": [ { - "name": "node", - "description": "The item at the end of the edge", + "name": "cursor", + "description": "A cursor for use in pagination", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "Source", + "kind": "SCALAR", + "name": "String", "ofType": null } }, @@ -4612,15 +4384,15 @@ "deprecationReason": null }, { - "name": "cursor", - "description": "A cursor for use in pagination", + "name": "node", + "description": "The item at the end of the edge", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "SCALAR", - "name": "String", + "kind": "OBJECT", + "name": "Source", "ofType": null } }, @@ -4639,24 +4411,12 @@ "description": null, "fields": [ { - "name": "processedEventsTotal", - "description": null, - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedBytesTotal", + "name": "receivedBytesTotal", "description": null, "args": [], "type": { "kind": "OBJECT", - "name": "ProcessedBytesTotal", + "name": "ReceivedBytesTotal", "ofType": null }, "isDeprecated": false, @@ -4674,18 +4434,6 @@ "isDeprecated": false, "deprecationReason": null }, - { - "name": "eventsInTotal", - "description": null, - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsInTotal", - "ofType": null - }, - "isDeprecated": true, - "deprecationReason": "Use received_events_total instead" - }, { "name": "sentEventsTotal", "description": null, @@ -4697,18 +4445,6 @@ }, "isDeprecated": false, "deprecationReason": null - }, - { - "name": "eventsOutTotal", - "description": null, - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsOutTotal", - "ofType": null - }, - "isDeprecated": true, - "deprecationReason": "Use sent_events_total instead" } ], "inputFields": null, @@ -4977,7 +4713,7 @@ { "kind": "SCALAR", "name": "String", - "description": "The `String` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text.", + "description": "The `String` scalar type represents textual data, represented as UTF-8\ncharacter sequences. The String type is most often used by GraphQL to\nrepresent free-form human-readable text.", "fields": null, "inputFields": null, "interfaces": null, @@ -5016,177 +4752,53 @@ "type": { "kind": "SCALAR", "name": "String", - "ofType": null - }, - "defaultValue": null - }, - { - "name": "notContains", - "description": null, - "type": { - "kind": "SCALAR", - "name": "String", - "ofType": null - }, - "defaultValue": null - }, - { - "name": "startsWith", - "description": null, - "type": { - "kind": "SCALAR", - "name": "String", - "ofType": null - }, - "defaultValue": null - }, - { - "name": "endsWith", - "description": null, - "type": { - "kind": "SCALAR", - "name": "String", - "ofType": null - }, - "defaultValue": null - } - ], - "interfaces": null, - "enumValues": null, - "possibleTypes": null - }, - { - "kind": "OBJECT", - "name": "Subscription", - "description": null, - "fields": [ - { - "name": "heartbeat", - "description": "Heartbeat, containing the UTC timestamp of the last server-sent payload", - "args": [ - { - "name": "interval", - "description": null, - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Int", - "ofType": null - } - }, - "defaultValue": "1000" - } - ], - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "OBJECT", - "name": "Heartbeat", - "ofType": null - } - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "uptime", - "description": "Metrics for how long the Vector instance has been running", - "args": [ - { - "name": "interval", - "description": null, - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Int", - "ofType": null - } - }, - "defaultValue": "1000" - } - ], - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "OBJECT", - "name": "Uptime", - "ofType": null - } - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedEventsTotal", - "description": "Event processing metrics.", - "args": [ - { - "name": "interval", - "description": null, - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Int", - "ofType": null - } - }, - "defaultValue": "1000" - } - ], + "ofType": null + }, + "defaultValue": null + }, + { + "name": "notContains", + "description": null, "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "ofType": null - } + "kind": "SCALAR", + "name": "String", + "ofType": null }, - "isDeprecated": false, - "deprecationReason": null + "defaultValue": null }, { - "name": "processedEventsThroughput", - "description": "Event processing throughput sampled over the provided millisecond `interval`.", - "args": [ - { - "name": "interval", - "description": null, - "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Int", - "ofType": null - } - }, - "defaultValue": "1000" - } - ], + "name": "startsWith", + "description": null, "type": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "SCALAR", - "name": "Int", - "ofType": null - } + "kind": "SCALAR", + "name": "String", + "ofType": null }, - "isDeprecated": false, - "deprecationReason": null + "defaultValue": null }, { - "name": "componentProcessedEventsThroughputs", - "description": "Component event processing throughput metrics over `interval`.", + "name": "endsWith", + "description": null, + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + }, + "defaultValue": null + } + ], + "interfaces": null, + "enumValues": null, + "possibleTypes": null + }, + { + "kind": "OBJECT", + "name": "Subscription", + "description": null, + "fields": [ + { + "name": "heartbeat", + "description": "Heartbeat, containing the UTC timestamp of the last server-sent payload", "args": [ { "name": "interval", @@ -5207,25 +4819,17 @@ "kind": "NON_NULL", "name": null, "ofType": { - "kind": "LIST", - "name": null, - "ofType": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "OBJECT", - "name": "ComponentProcessedEventsThroughput", - "ofType": null - } - } + "kind": "OBJECT", + "name": "Heartbeat", + "ofType": null } }, "isDeprecated": false, "deprecationReason": null }, { - "name": "componentProcessedEventsTotals", - "description": "Component event processing metrics over `interval`.", + "name": "uptime", + "description": "Metrics for how long the Vector instance has been running", "args": [ { "name": "interval", @@ -5246,17 +4850,9 @@ "kind": "NON_NULL", "name": null, "ofType": { - "kind": "LIST", - "name": null, - "ofType": { - "kind": "NON_NULL", - "name": null, - "ofType": { - "kind": "OBJECT", - "name": "ComponentProcessedEventsTotal", - "ofType": null - } - } + "kind": "OBJECT", + "name": "Uptime", + "ofType": null } }, "isDeprecated": false, @@ -5543,8 +5139,8 @@ "deprecationReason": null }, { - "name": "processedBytesTotal", - "description": "Byte processing metrics.", + "name": "componentReceivedBytesTotals", + "description": "Component bytes received metrics over `interval`.", "args": [ { "name": "interval", @@ -5565,17 +5161,25 @@ "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "ProcessedBytesTotal", - "ofType": null + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "ComponentReceivedBytesTotal", + "ofType": null + } + } } }, "isDeprecated": false, "deprecationReason": null }, { - "name": "processedBytesThroughput", - "description": "Byte processing throughput sampled over a provided millisecond `interval`.", + "name": "componentReceivedBytesThroughputs", + "description": "Component bytes received throughput over `interval`", "args": [ { "name": "interval", @@ -5596,17 +5200,25 @@ "kind": "NON_NULL", "name": null, "ofType": { - "kind": "SCALAR", - "name": "Int", - "ofType": null + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "ComponentReceivedBytesThroughput", + "ofType": null + } + } } }, "isDeprecated": false, "deprecationReason": null }, { - "name": "componentProcessedBytesTotals", - "description": "Component byte processing metrics over `interval`.", + "name": "componentSentBytesTotals", + "description": "Component bytes sent metrics over `interval`.", "args": [ { "name": "interval", @@ -5634,7 +5246,7 @@ "name": null, "ofType": { "kind": "OBJECT", - "name": "ComponentProcessedBytesTotal", + "name": "ComponentSentBytesTotal", "ofType": null } } @@ -5644,8 +5256,8 @@ "deprecationReason": null }, { - "name": "componentProcessedBytesThroughputs", - "description": "Component byte processing throughput over `interval`", + "name": "componentSentBytesThroughputs", + "description": "Component bytes sent throughput over `interval`", "args": [ { "name": "interval", @@ -5673,7 +5285,7 @@ "name": null, "ofType": { "kind": "OBJECT", - "name": "ComponentProcessedBytesThroughput", + "name": "ComponentSentBytesThroughput", "ofType": null } } @@ -5713,9 +5325,40 @@ "isDeprecated": false, "deprecationReason": null }, + { + "name": "allocatedBytes", + "description": "Allocated bytes metrics.", + "args": [ + { + "name": "interval", + "description": null, + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Int", + "ofType": null + } + }, + "defaultValue": "1000" + } + ], + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "AllocatedBytes", + "ofType": null + } + }, + "isDeprecated": false, + "deprecationReason": null + }, { "name": "componentAllocatedBytes", - "description": "Component allocated bytes metrics over `interval`.", + "description": "Component allocation metrics", "args": [ { "name": "interval", @@ -6141,7 +5784,7 @@ ], "type": { "kind": "SCALAR", - "name": "Json", + "name": "String", "ofType": null }, "isDeprecated": false, @@ -6340,12 +5983,44 @@ "description": "A list of edges.", "args": [], "type": { - "kind": "LIST", + "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "TransformEdge", - "ofType": null + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "TransformEdge", + "ofType": null + } + } + } + }, + "isDeprecated": false, + "deprecationReason": null + }, + { + "name": "nodes", + "description": "A list of nodes.", + "args": [], + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "Transform", + "ofType": null + } + } } }, "isDeprecated": false, @@ -6379,15 +6054,15 @@ "description": "An edge in a connection.", "fields": [ { - "name": "node", - "description": "The item at the end of the edge", + "name": "cursor", + "description": "A cursor for use in pagination", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "OBJECT", - "name": "Transform", + "kind": "SCALAR", + "name": "String", "ofType": null } }, @@ -6395,15 +6070,15 @@ "deprecationReason": null }, { - "name": "cursor", - "description": "A cursor for use in pagination", + "name": "node", + "description": "The item at the end of the edge", "args": [], "type": { "kind": "NON_NULL", "name": null, "ofType": { - "kind": "SCALAR", - "name": "String", + "kind": "OBJECT", + "name": "Transform", "ofType": null } }, @@ -6421,30 +6096,6 @@ "name": "TransformMetrics", "description": null, "fields": [ - { - "name": "processedEventsTotal", - "description": null, - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedEventsTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, - { - "name": "processedBytesTotal", - "description": null, - "args": [], - "type": { - "kind": "OBJECT", - "name": "ProcessedBytesTotal", - "ofType": null - }, - "isDeprecated": false, - "deprecationReason": null - }, { "name": "receivedEventsTotal", "description": null, @@ -6457,18 +6108,6 @@ "isDeprecated": false, "deprecationReason": null }, - { - "name": "eventsInTotal", - "description": null, - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsInTotal", - "ofType": null - }, - "isDeprecated": true, - "deprecationReason": "Use received_events_total instead" - }, { "name": "sentEventsTotal", "description": null, @@ -6480,18 +6119,6 @@ }, "isDeprecated": false, "deprecationReason": null - }, - { - "name": "eventsOutTotal", - "description": null, - "args": [], - "type": { - "kind": "OBJECT", - "name": "EventsOutTotal", - "ofType": null - }, - "isDeprecated": true, - "deprecationReason": "Use sent_events_total instead" } ], "inputFields": null, @@ -6680,7 +6307,7 @@ { "kind": "OBJECT", "name": "__Directive", - "description": "A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.\n\nIn some cases, you need to provide options to alter GraphQL's execution behavior in ways field arguments will not suffice, such as conditionally including or skipping a field. Directives provide this by describing additional information to the executor.", + "description": "A Directive provides a way to describe alternate runtime execution and type\nvalidation behavior in a GraphQL document.\n\nIn some cases, you need to provide options to alter GraphQL's execution\nbehavior in ways field arguments will not suffice, such as conditionally\nincluding or skipping a field. Directives provide this by describing\nadditional information to the executor.", "fields": [ { "name": "name", @@ -6783,7 +6410,7 @@ { "kind": "ENUM", "name": "__DirectiveLocation", - "description": "A Directive can be adjacent to many parts of the GraphQL language, a __DirectiveLocation describes one such possible adjacencies.", + "description": "A Directive can be adjacent to many parts of the GraphQL language, a\n__DirectiveLocation describes one such possible adjacencies.", "fields": null, "inputFields": null, "interfaces": null, @@ -6908,7 +6535,7 @@ { "kind": "OBJECT", "name": "__EnumValue", - "description": "One possible value for a given Enum. Enum values are unique values, not a placeholder for a string or numeric value. However an Enum value is returned in a JSON response as a string.", + "description": "One possible value for a given Enum. Enum values are unique values, not a\nplaceholder for a string or numeric value. However an Enum value is returned\nin a JSON response as a string.", "fields": [ { "name": "name", @@ -6975,7 +6602,7 @@ { "kind": "OBJECT", "name": "__Field", - "description": "Object and Interface types are described by a list of Fields, each of which has a name, potentially a list of arguments, and a return type.", + "description": "Object and Interface types are described by a list of Fields, each of which\nhas a name, potentially a list of arguments, and a return type.", "fields": [ { "name": "name", @@ -7082,7 +6709,7 @@ { "kind": "OBJECT", "name": "__InputValue", - "description": "Arguments provided to Fields or Directives and the input fields of an InputObject are represented as Input Values which describe their type and optionally a default value.", + "description": "Arguments provided to Fields or Directives and the input fields of an\nInputObject are represented as Input Values which describe their type and\noptionally a default value.", "fields": [ { "name": "name", @@ -7149,7 +6776,7 @@ { "kind": "OBJECT", "name": "__Schema", - "description": "A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations.", + "description": "A GraphQL Schema defines the capabilities of a GraphQL server. It exposes\nall available types and directives on the server, as well as the entry\npoints for query, mutation, and subscription operations.", "fields": [ { "name": "types", @@ -7193,7 +6820,7 @@ }, { "name": "mutationType", - "description": "If this server supports mutation, the type that mutation operations will be rooted at.", + "description": "If this server supports mutation, the type that mutation operations will\nbe rooted at.", "args": [], "type": { "kind": "OBJECT", @@ -7205,7 +6832,7 @@ }, { "name": "subscriptionType", - "description": "If this server support subscription, the type that subscription operations will be rooted at.", + "description": "If this server support subscription, the type that subscription\noperations will be rooted at.", "args": [], "type": { "kind": "OBJECT", @@ -7248,7 +6875,7 @@ { "kind": "OBJECT", "name": "__Type", - "description": "The fundamental unit of any GraphQL Schema is the type. There are many kinds of types in GraphQL as represented by the `__TypeKind` enum.\n\nDepending on the kind of a type, certain fields describe information about that type. Scalar types provide no information beyond a name and description, while Enum types provide their values. Object and Interface types provide the fields they describe. Abstract types, Union and Interface, provide the Object types possible at runtime. List and NonNull types compose other types.", + "description": "The fundamental unit of any GraphQL Schema is the type. There are many kinds\nof types in GraphQL as represented by the `__TypeKind` enum.\n\nDepending on the kind of a type, certain fields describe information about\nthat type. Scalar types provide no information beyond a name and\ndescription, while Enum types provide their values. Object and Interface\ntypes provide the fields they describe. Abstract types, Union and Interface,\nprovide the Object types possible at runtime. List and NonNull types compose\nother types.", "fields": [ { "name": "kind", @@ -7443,6 +7070,18 @@ }, "isDeprecated": false, "deprecationReason": null + }, + { + "name": "isOneOf", + "description": null, + "args": [], + "type": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + }, + "isDeprecated": false, + "deprecationReason": null } ], "inputFields": null, @@ -7466,13 +7105,13 @@ }, { "name": "OBJECT", - "description": "Indicates this type is an object. `fields` and `interfaces` are valid fields.", + "description": "Indicates this type is an object. `fields` and `interfaces` are valid\nfields.", "isDeprecated": false, "deprecationReason": null }, { "name": "INTERFACE", - "description": "Indicates this type is an interface. `fields` and `possibleTypes` are valid fields.", + "description": "Indicates this type is an interface. `fields` and `possibleTypes` are\nvalid fields.", "isDeprecated": false, "deprecationReason": null }, diff --git a/lib/vector-api-client/graphql/subscriptions/component_processed_bytes_throughputs.graphql b/lib/vector-api-client/graphql/subscriptions/component_processed_bytes_throughputs.graphql deleted file mode 100644 index 4c9229b173927..0000000000000 --- a/lib/vector-api-client/graphql/subscriptions/component_processed_bytes_throughputs.graphql +++ /dev/null @@ -1,6 +0,0 @@ -subscription ComponentProcessedBytesThroughputsSubscription($interval: Int!) { - componentProcessedBytesThroughputs(interval: $interval) { - componentId - throughput - } -} diff --git a/lib/vector-api-client/graphql/subscriptions/component_processed_bytes_totals.graphql b/lib/vector-api-client/graphql/subscriptions/component_processed_bytes_totals.graphql deleted file mode 100644 index 9b0862c31b997..0000000000000 --- a/lib/vector-api-client/graphql/subscriptions/component_processed_bytes_totals.graphql +++ /dev/null @@ -1,8 +0,0 @@ -subscription ComponentProcessedBytesTotalsSubscription($interval: Int!) { - componentProcessedBytesTotals(interval: $interval) { - componentId - metric { - processedBytesTotal - } - } -} diff --git a/lib/vector-api-client/graphql/subscriptions/component_processed_events_throughputs.graphql b/lib/vector-api-client/graphql/subscriptions/component_processed_events_throughputs.graphql deleted file mode 100644 index 727c3440a9144..0000000000000 --- a/lib/vector-api-client/graphql/subscriptions/component_processed_events_throughputs.graphql +++ /dev/null @@ -1,6 +0,0 @@ -subscription ComponentProcessedEventsThroughputsSubscription($interval: Int!) { - componentProcessedEventsThroughputs(interval: $interval) { - componentId - throughput - } -} diff --git a/lib/vector-api-client/graphql/subscriptions/component_processed_events_totals.graphql b/lib/vector-api-client/graphql/subscriptions/component_processed_events_totals.graphql deleted file mode 100644 index bbfe1c20b573b..0000000000000 --- a/lib/vector-api-client/graphql/subscriptions/component_processed_events_totals.graphql +++ /dev/null @@ -1,8 +0,0 @@ -subscription ComponentProcessedEventsTotalsSubscription($interval: Int!) { - componentProcessedEventsTotals(interval: $interval) { - componentId - metric { - processedEventsTotal - } - } -} diff --git a/lib/vector-api-client/graphql/subscriptions/component_received_bytes_throughputs.graphql b/lib/vector-api-client/graphql/subscriptions/component_received_bytes_throughputs.graphql new file mode 100644 index 0000000000000..ec1b018d27107 --- /dev/null +++ b/lib/vector-api-client/graphql/subscriptions/component_received_bytes_throughputs.graphql @@ -0,0 +1,6 @@ +subscription ComponentReceivedBytesThroughputsSubscription($interval: Int!) { + componentReceivedBytesThroughputs(interval: $interval) { + componentId + throughput + } +} diff --git a/lib/vector-api-client/graphql/subscriptions/component_received_bytes_totals.graphql b/lib/vector-api-client/graphql/subscriptions/component_received_bytes_totals.graphql new file mode 100644 index 0000000000000..748b8930fb087 --- /dev/null +++ b/lib/vector-api-client/graphql/subscriptions/component_received_bytes_totals.graphql @@ -0,0 +1,8 @@ +subscription ComponentReceivedBytesTotalsSubscription($interval: Int!) { + componentReceivedBytesTotals(interval: $interval) { + componentId + metric { + receivedBytesTotal + } + } +} diff --git a/lib/vector-api-client/graphql/subscriptions/component_sent_bytes_throughputs.graphql b/lib/vector-api-client/graphql/subscriptions/component_sent_bytes_throughputs.graphql new file mode 100644 index 0000000000000..a1b4f54c1a300 --- /dev/null +++ b/lib/vector-api-client/graphql/subscriptions/component_sent_bytes_throughputs.graphql @@ -0,0 +1,6 @@ +subscription ComponentSentBytesThroughputsSubscription($interval: Int!) { + componentSentBytesThroughputs(interval: $interval) { + componentId + throughput + } +} diff --git a/lib/vector-api-client/graphql/subscriptions/component_sent_bytes_totals.graphql b/lib/vector-api-client/graphql/subscriptions/component_sent_bytes_totals.graphql new file mode 100644 index 0000000000000..aa2c2f6dd0832 --- /dev/null +++ b/lib/vector-api-client/graphql/subscriptions/component_sent_bytes_totals.graphql @@ -0,0 +1,8 @@ +subscription ComponentSentBytesTotalsSubscription($interval: Int!) { + componentSentBytesTotals(interval: $interval) { + componentId + metric { + sentBytesTotal + } + } +} diff --git a/lib/vector-api-client/graphql/subscriptions/processed_events_throughput.graphql b/lib/vector-api-client/graphql/subscriptions/processed_events_throughput.graphql deleted file mode 100644 index f6b5b37a29d91..0000000000000 --- a/lib/vector-api-client/graphql/subscriptions/processed_events_throughput.graphql +++ /dev/null @@ -1,3 +0,0 @@ -subscription ProcessedEventsThroughputSubscription($interval: Int!) { - processedEventsThroughput(interval: $interval) -} diff --git a/lib/vector-api-client/graphql/subscriptions/processed_events_total.graphql b/lib/vector-api-client/graphql/subscriptions/processed_events_total.graphql deleted file mode 100644 index d10dde89a6451..0000000000000 --- a/lib/vector-api-client/graphql/subscriptions/processed_events_total.graphql +++ /dev/null @@ -1,5 +0,0 @@ -subscription ProcessedEventsTotalSubscription($interval: Int!) { - processedEventsTotal(interval: $interval) { - processedEventsTotal - } -} diff --git a/lib/vector-api-client/src/gql/components.rs b/lib/vector-api-client/src/gql/components.rs index 4efb95c47316f..00dcb2f8a72e5 100644 --- a/lib/vector-api-client/src/gql/components.rs +++ b/lib/vector-api-client/src/gql/components.rs @@ -70,71 +70,53 @@ impl ComponentsSubscriptionExt for crate::SubscriptionClient { } impl components_query::ComponentsQueryComponentsEdgesNodeOn { - pub fn processed_events_total(&self) -> i64 { + pub fn received_bytes_total(&self) -> i64 { + // This is network bytes received, and only sources can receive events. match self { components_query::ComponentsQueryComponentsEdgesNodeOn::Source(s) => s .metrics - .processed_events_total + .received_bytes_total .as_ref() - .map(|p| p.processed_events_total as i64) - .unwrap_or(0), - components_query::ComponentsQueryComponentsEdgesNodeOn::Transform(t) => t - .metrics - .processed_events_total - .as_ref() - .map(|p| p.processed_events_total as i64) - .unwrap_or(0), - components_query::ComponentsQueryComponentsEdgesNodeOn::Sink(s) => s - .metrics - .processed_events_total - .as_ref() - .map(|p| p.processed_events_total as i64) + .map(|p| p.received_bytes_total as i64) .unwrap_or(0), + components_query::ComponentsQueryComponentsEdgesNodeOn::Transform(_) => 0, + components_query::ComponentsQueryComponentsEdgesNodeOn::Sink(_) => 0, } } - pub fn processed_bytes_total(&self) -> i64 { + pub fn received_events_total(&self) -> i64 { match self { components_query::ComponentsQueryComponentsEdgesNodeOn::Source(s) => s .metrics - .processed_bytes_total + .received_events_total .as_ref() - .map(|p| p.processed_bytes_total as i64) + .map(|p| p.received_events_total as i64) .unwrap_or(0), components_query::ComponentsQueryComponentsEdgesNodeOn::Transform(t) => t .metrics - .processed_bytes_total + .received_events_total .as_ref() - .map(|p| p.processed_bytes_total as i64) + .map(|p| p.received_events_total as i64) .unwrap_or(0), components_query::ComponentsQueryComponentsEdgesNodeOn::Sink(s) => s .metrics - .processed_bytes_total + .received_events_total .as_ref() - .map(|p| p.processed_bytes_total as i64) + .map(|p| p.received_events_total as i64) .unwrap_or(0), } } - pub fn received_events_total(&self) -> i64 { + pub fn sent_bytes_total(&self) -> i64 { + // This is network bytes sent, and only sinks can send out events. match self { - components_query::ComponentsQueryComponentsEdgesNodeOn::Source(s) => s - .metrics - .received_events_total - .as_ref() - .map(|p| p.received_events_total as i64) - .unwrap_or(0), - components_query::ComponentsQueryComponentsEdgesNodeOn::Transform(t) => t - .metrics - .received_events_total - .as_ref() - .map(|p| p.received_events_total as i64) - .unwrap_or(0), + components_query::ComponentsQueryComponentsEdgesNodeOn::Source(_) => 0, + components_query::ComponentsQueryComponentsEdgesNodeOn::Transform(_) => 0, components_query::ComponentsQueryComponentsEdgesNodeOn::Sink(s) => s .metrics - .received_events_total + .sent_bytes_total .as_ref() - .map(|p| p.received_events_total as i64) + .map(|p| p.sent_bytes_total as i64) .unwrap_or(0), } } diff --git a/lib/vector-api-client/src/gql/metrics.rs b/lib/vector-api-client/src/gql/metrics.rs index 516de83a95313..a9df90f7c0bd7 100644 --- a/lib/vector-api-client/src/gql/metrics.rs +++ b/lib/vector-api-client/src/gql/metrics.rs @@ -14,56 +14,6 @@ use crate::BoxedSubscription; )] pub struct UptimeSubscription; -/// ProcessedEventsTotalSubscription contains metrics on the number of events -/// that have been processed by a Vector instance. -#[derive(GraphQLQuery, Debug, Copy, Clone)] -#[graphql( - schema_path = "graphql/schema.json", - query_path = "graphql/subscriptions/processed_events_total.graphql", - response_derives = "Debug" -)] -pub struct ProcessedEventsTotalSubscription; - -/// ProcessedEventsThroughputSubscription contains metrics on the number of events -/// that have been processed between `interval` samples. -#[derive(GraphQLQuery, Debug, Copy, Clone)] -#[graphql( - schema_path = "graphql/schema.json", - query_path = "graphql/subscriptions/processed_events_throughput.graphql", - response_derives = "Debug" -)] -pub struct ProcessedEventsThroughputSubscription; - -/// ProcessedBytesThroughputSubscription contains metrics on the number of bytes -/// that have been processed between `interval` samples. -#[derive(GraphQLQuery, Debug, Copy, Clone)] -#[graphql( - schema_path = "graphql/schema.json", - query_path = "graphql/subscriptions/processed_bytes_throughput.graphql", - response_derives = "Debug" -)] -pub struct ProcessedBytesThroughputSubscription; - -/// ComponentProcessedEventsThroughputsSubscription contains metrics on the number of events -/// that have been processed between `interval` samples, against specific components. -#[derive(GraphQLQuery, Debug, Copy, Clone)] -#[graphql( - schema_path = "graphql/schema.json", - query_path = "graphql/subscriptions/component_processed_events_throughputs.graphql", - response_derives = "Debug" -)] -pub struct ComponentProcessedEventsThroughputsSubscription; - -/// ComponentProcessedEventsTotalsSubscription contains metrics on the number of events -/// that have been processed by a Vector instance, against specific components. -#[derive(GraphQLQuery, Debug, Copy, Clone)] -#[graphql( - schema_path = "graphql/schema.json", - query_path = "graphql/subscriptions/component_processed_events_totals.graphql", - response_derives = "Debug" -)] -pub struct ComponentProcessedEventsTotalsSubscription; - /// ComponentAllocatedBytesSubscription contains metrics on the number of allocated bytes /// that have been processed by a Vector instance, against specific components. #[derive(GraphQLQuery, Debug, Copy, Clone)] @@ -74,25 +24,25 @@ pub struct ComponentProcessedEventsTotalsSubscription; )] pub struct ComponentAllocatedBytesSubscription; -/// ComponentProcessedBytesThroughputsSubscription contains metrics on the number of bytes -/// that have been processed between `interval` samples, against specific components. +/// ComponentReceivedBytesThroughputsSubscription contains metrics on the number of bytes +/// that have been received between `interval` samples, against specific components. #[derive(GraphQLQuery, Debug, Copy, Clone)] #[graphql( schema_path = "graphql/schema.json", - query_path = "graphql/subscriptions/component_processed_bytes_throughputs.graphql", + query_path = "graphql/subscriptions/component_received_bytes_throughputs.graphql", response_derives = "Debug" )] -pub struct ComponentProcessedBytesThroughputsSubscription; +pub struct ComponentReceivedBytesThroughputsSubscription; -/// ComponentProcessedBytesTotalsSubscription contains metrics on the number of bytes -/// that have been processed by a Vector instance, against a specific component. +/// ComponentReceivedBytesTotalsSubscription contains metrics on the number of bytes +/// that have been received by a Vector instance, against a specific component. #[derive(GraphQLQuery, Debug, Copy, Clone)] #[graphql( schema_path = "graphql/schema.json", - query_path = "graphql/subscriptions/component_processed_bytes_totals.graphql", + query_path = "graphql/subscriptions/component_received_bytes_totals.graphql", response_derives = "Debug" )] -pub struct ComponentProcessedBytesTotalsSubscription; +pub struct ComponentReceivedBytesTotalsSubscription; /// ComponentReceivedEventsThroughputsSubscription contains metrics on the number of events /// that have been accepted for processing between `interval` samples, against specific components. @@ -114,6 +64,26 @@ pub struct ComponentReceivedEventsThroughputsSubscription; )] pub struct ComponentReceivedEventsTotalsSubscription; +/// ComponentSentBytesThroughputsSubscription contains metrics on the number of bytes +/// that have been received between `interval` samples, against specific components. +#[derive(GraphQLQuery, Debug, Copy, Clone)] +#[graphql( + schema_path = "graphql/schema.json", + query_path = "graphql/subscriptions/component_sent_bytes_throughputs.graphql", + response_derives = "Debug" +)] +pub struct ComponentSentBytesThroughputsSubscription; + +/// ComponentSentBytesTotalsSubscription contains metrics on the number of bytes +/// that have been received by a Vector instance, against a specific component. +#[derive(GraphQLQuery, Debug, Copy, Clone)] +#[graphql( + schema_path = "graphql/schema.json", + query_path = "graphql/subscriptions/component_sent_bytes_totals.graphql", + response_derives = "Debug" +)] +pub struct ComponentSentBytesTotalsSubscription; + /// ComponentSentEventsThroughputsSubscription contains metrics on the number of events /// that have been emitted between `interval` samples, against specific components. #[derive(GraphQLQuery, Debug, Copy, Clone)] @@ -188,47 +158,17 @@ pub trait MetricsSubscriptionExt { interval: i64, ) -> BoxedSubscription; - /// Executes an events processed metrics subscription. - fn processed_events_total_subscription( + /// Executes a component bytes received totals subscription. + fn component_received_bytes_totals_subscription( &self, interval: i64, - ) -> crate::BoxedSubscription; + ) -> crate::BoxedSubscription; - /// Executes an events processed throughput subscription. - fn processed_events_throughput_subscription( + /// Executes a component bytes received throughput subscription. + fn component_received_bytes_throughputs_subscription( &self, interval: i64, - ) -> crate::BoxedSubscription; - - /// Executes a bytes processed throughput subscription. - fn processed_bytes_throughput_subscription( - &self, - interval: i64, - ) -> crate::BoxedSubscription; - - /// Executes a component events processed totals subscription - fn component_processed_events_totals_subscription( - &self, - interval: i64, - ) -> crate::BoxedSubscription; - - /// Executes a component events processed throughputs subscription. - fn component_processed_events_throughputs_subscription( - &self, - interval: i64, - ) -> crate::BoxedSubscription; - - /// Executes a component bytes processed totals subscription. - fn component_processed_bytes_totals_subscription( - &self, - interval: i64, - ) -> crate::BoxedSubscription; - - /// Executes a component bytes processed throughputs subscription. - fn component_processed_bytes_throughputs_subscription( - &self, - interval: i64, - ) -> crate::BoxedSubscription; + ) -> crate::BoxedSubscription; /// Executes a component received events totals subscription. fn component_received_events_totals_subscription( @@ -242,6 +182,18 @@ pub trait MetricsSubscriptionExt { interval: i64, ) -> crate::BoxedSubscription; + /// Executes a component bytes sent totals subscription. + fn component_sent_bytes_totals_subscription( + &self, + interval: i64, + ) -> crate::BoxedSubscription; + + /// Executes a component bytes sent throughput subscription. + fn component_sent_bytes_throughputs_subscription( + &self, + interval: i64, + ) -> crate::BoxedSubscription; + /// Executes a component events totals subscription. fn component_sent_events_totals_subscription( &self, @@ -268,101 +220,42 @@ impl MetricsSubscriptionExt for crate::SubscriptionClient { self.start::(&request_body) } - /// Executes an events processed metrics subscription. - fn processed_events_total_subscription( - &self, - interval: i64, - ) -> BoxedSubscription { - let request_body = ProcessedEventsTotalSubscription::build_query( - processed_events_total_subscription::Variables { interval }, - ); - - self.start::(&request_body) - } - - /// Executes an events processed throughput subscription. - fn processed_events_throughput_subscription( - &self, - interval: i64, - ) -> BoxedSubscription { - let request_body = ProcessedEventsThroughputSubscription::build_query( - processed_events_throughput_subscription::Variables { interval }, - ); - - self.start::(&request_body) - } - - /// Executes a bytes processed throughput subscription. - fn processed_bytes_throughput_subscription( - &self, - interval: i64, - ) -> BoxedSubscription { - let request_body = ProcessedBytesThroughputSubscription::build_query( - processed_bytes_throughput_subscription::Variables { interval }, - ); - - self.start::(&request_body) - } - - /// Executes an all component events processed totals subscription. - fn component_processed_events_totals_subscription( - &self, - interval: i64, - ) -> BoxedSubscription { - let request_body = ComponentProcessedEventsTotalsSubscription::build_query( - component_processed_events_totals_subscription::Variables { interval }, - ); - - self.start::(&request_body) - } - - /// Executes an all component events processed throughputs subscription. - fn component_processed_events_throughputs_subscription( + /// Executes an all component allocated bytes subscription. + fn component_allocated_bytes_subscription( &self, interval: i64, - ) -> BoxedSubscription { - let request_body = ComponentProcessedEventsThroughputsSubscription::build_query( - component_processed_events_throughputs_subscription::Variables { interval }, + ) -> BoxedSubscription { + let request_body = ComponentAllocatedBytesSubscription::build_query( + component_allocated_bytes_subscription::Variables { interval }, ); - self.start::(&request_body) + self.start::(&request_body) } - /// Executes an all component bytes processed totals subscription. - fn component_processed_bytes_totals_subscription( + /// Executes an all component bytes received totals subscription. + fn component_received_bytes_totals_subscription( &self, interval: i64, - ) -> BoxedSubscription { - let request_body = ComponentProcessedBytesTotalsSubscription::build_query( - component_processed_bytes_totals_subscription::Variables { interval }, + ) -> BoxedSubscription { + let request_body = ComponentReceivedBytesTotalsSubscription::build_query( + component_received_bytes_totals_subscription::Variables { interval }, ); - self.start::(&request_body) + self.start::(&request_body) } - /// Executes an all component bytes processed throughputs subscription. - fn component_processed_bytes_throughputs_subscription( + /// Executes a component bytes received throughput subscription. + fn component_received_bytes_throughputs_subscription( &self, interval: i64, - ) -> BoxedSubscription { - let request_body = ComponentProcessedBytesThroughputsSubscription::build_query( - component_processed_bytes_throughputs_subscription::Variables { interval }, + ) -> BoxedSubscription { + let request_body = ComponentReceivedBytesThroughputsSubscription::build_query( + component_received_bytes_throughputs_subscription::Variables { interval }, ); - self.start::(&request_body) + self.start::(&request_body) } - /// Executes an all component allocated bytes subscription. - fn component_allocated_bytes_subscription( - &self, - interval: i64, - ) -> BoxedSubscription { - let request_body = ComponentAllocatedBytesSubscription::build_query( - component_allocated_bytes_subscription::Variables { interval }, - ); - - self.start::(&request_body) - } /// Executes an all component received events totals subscription. fn component_received_events_totals_subscription( &self, @@ -387,6 +280,30 @@ impl MetricsSubscriptionExt for crate::SubscriptionClient { self.start::(&request_body) } + /// Executes an all component bytes sent totals subscription. + fn component_sent_bytes_totals_subscription( + &self, + interval: i64, + ) -> BoxedSubscription { + let request_body = ComponentSentBytesTotalsSubscription::build_query( + component_sent_bytes_totals_subscription::Variables { interval }, + ); + + self.start::(&request_body) + } + + /// Executes a component bytes sent throughput subscription. + fn component_sent_bytes_throughputs_subscription( + &self, + interval: i64, + ) -> BoxedSubscription { + let request_body = ComponentSentBytesThroughputsSubscription::build_query( + component_sent_bytes_throughputs_subscription::Variables { interval }, + ); + + self.start::(&request_body) + } + /// Executes a component sent events totals subscription. fn component_sent_events_totals_subscription( &self, diff --git a/lib/vector-api-client/tests/queries/file_source_metrics.graphql b/lib/vector-api-client/tests/queries/file_source_metrics.graphql index 2a4cb2aae2fe6..7b9d2afb0f10e 100644 --- a/lib/vector-api-client/tests/queries/file_source_metrics.graphql +++ b/lib/vector-api-client/tests/queries/file_source_metrics.graphql @@ -11,9 +11,6 @@ query FileSourceMetricsQuery($after: String, $before: String, $first: Int, $last edges { node { name - processedEventsTotal { - processedEventsTotal - } receivedEventsTotal { receivedEventsTotal } diff --git a/lib/vector-common/src/internal_event/events_received.rs b/lib/vector-common/src/internal_event/events_received.rs index ee9ffa3ef0ee8..4021b3c578143 100644 --- a/lib/vector-common/src/internal_event/events_received.rs +++ b/lib/vector-common/src/internal_event/events_received.rs @@ -7,7 +7,6 @@ crate::registered_event!( EventsReceived => { events_count: Histogram = register_histogram!("component_received_events_count"), events: Counter = register_counter!("component_received_events_total"), - events_in: Counter = register_counter!("events_in_total"), event_bytes: Counter = register_counter!("component_received_event_bytes_total"), } @@ -19,7 +18,6 @@ crate::registered_event!( #[allow(clippy::cast_precision_loss)] self.events_count.record(count as f64); self.events.increment(count as u64); - self.events_in.increment(count as u64); self.event_bytes.increment(byte_size as u64); } ); diff --git a/lib/vector-common/src/internal_event/events_sent.rs b/lib/vector-common/src/internal_event/events_sent.rs index 061d11b6527af..7d9986fdf63c4 100644 --- a/lib/vector-common/src/internal_event/events_sent.rs +++ b/lib/vector-common/src/internal_event/events_sent.rs @@ -14,11 +14,6 @@ crate::registered_event!( } else { register_counter!("component_sent_events_total") }, - events_out: Counter = if let Some(output) = &self.output { - register_counter!("events_out_total", "output" => output.clone()) - } else { - register_counter!("events_out_total") - }, event_bytes: Counter = if let Some(output) = &self.output { register_counter!("component_sent_event_bytes_total", "output" => output.clone()) } else { @@ -40,7 +35,6 @@ crate::registered_event!( } self.events.increment(count as u64); - self.events_out.increment(count as u64); self.event_bytes.increment(byte_size as u64); } ); diff --git a/lib/vector-core/src/event/metric/mod.rs b/lib/vector-core/src/event/metric/mod.rs index 849823e301035..392e43a6cda74 100644 --- a/lib/vector-core/src/event/metric/mod.rs +++ b/lib/vector-core/src/event/metric/mod.rs @@ -430,7 +430,7 @@ impl Display for Metric { /// /// example: /// ```text - /// 2020-08-12T20:23:37.248661343Z vector_processed_bytes_total{component_kind="sink",component_type="blackhole"} = 6391 + /// 2020-08-12T20:23:37.248661343Z vector_received_bytes_total{component_kind="sink",component_type="blackhole"} = 6391 /// ``` fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), fmt::Error> { if let Some(timestamp) = &self.data.time.timestamp { diff --git a/lib/vector-core/src/metrics/mod.rs b/lib/vector-core/src/metrics/mod.rs index b842f90812e28..375dba4af624b 100644 --- a/lib/vector-core/src/metrics/mod.rs +++ b/lib/vector-core/src/metrics/mod.rs @@ -161,28 +161,6 @@ impl Controller { let mut metrics = self.recorder.with_registry(Registry::visit_metrics); - // Add aliases for deprecated metrics - for i in 0..metrics.len() { - let metric = &metrics[i]; - match metric.name() { - "component_sent_events_total" => { - let alias = metric.clone().with_name("processed_events_total"); - metrics.push(alias); - } - "component_sent_bytes_total" if metric.tag_matches("component_kind", "sink") => { - let alias = metric.clone().with_name("processed_bytes_total"); - metrics.push(alias); - } - "component_received_bytes_total" - if metric.tag_matches("component_kind", "source") => - { - let alias = metric.clone().with_name("processed_bytes_total"); - metrics.push(alias); - } - _ => {} - } - } - #[allow(clippy::cast_precision_loss)] let value = (metrics.len() + 2) as f64; metrics.push(Metric::from_metric_kv( diff --git a/src/api/schema/metrics/events_in.rs b/src/api/schema/metrics/events_in.rs deleted file mode 100644 index b9550457d09e0..0000000000000 --- a/src/api/schema/metrics/events_in.rs +++ /dev/null @@ -1,42 +0,0 @@ -use async_graphql::Object; -use chrono::{DateTime, Utc}; - -use crate::event::{Metric, MetricValue}; - -pub struct EventsInTotal(Metric); - -impl EventsInTotal { - pub const fn new(m: Metric) -> Self { - Self(m) - } - - pub fn get_timestamp(&self) -> Option> { - self.0.timestamp() - } - - pub fn get_events_in_total(&self) -> f64 { - match self.0.value() { - MetricValue::Counter { value } => *value, - _ => 0.00, - } - } -} - -#[Object] -impl EventsInTotal { - /// Metric timestamp - pub async fn timestamp(&self) -> Option> { - self.get_timestamp() - } - - /// Total incoming events - pub async fn events_in_total(&self) -> f64 { - self.get_events_in_total() - } -} - -impl From for EventsInTotal { - fn from(m: Metric) -> Self { - Self(m) - } -} diff --git a/src/api/schema/metrics/events_out.rs b/src/api/schema/metrics/events_out.rs deleted file mode 100644 index de565d56bf56f..0000000000000 --- a/src/api/schema/metrics/events_out.rs +++ /dev/null @@ -1,42 +0,0 @@ -use async_graphql::Object; -use chrono::{DateTime, Utc}; - -use crate::event::{Metric, MetricValue}; - -pub struct EventsOutTotal(Metric); - -impl EventsOutTotal { - pub const fn new(m: Metric) -> Self { - Self(m) - } - - pub fn get_timestamp(&self) -> Option> { - self.0.timestamp() - } - - pub fn get_events_out_total(&self) -> f64 { - match self.0.value() { - MetricValue::Counter { value } => *value, - _ => 0.00, - } - } -} - -#[Object] -impl EventsOutTotal { - /// Metric timestamp - pub async fn timestamp(&self) -> Option> { - self.get_timestamp() - } - - /// Total outgoing events - pub async fn events_out_total(&self) -> f64 { - self.get_events_out_total() - } -} - -impl From for EventsOutTotal { - fn from(m: Metric) -> Self { - Self(m) - } -} diff --git a/src/api/schema/metrics/filter.rs b/src/api/schema/metrics/filter.rs index 0c61a5794fffe..bd094d64ce1c5 100644 --- a/src/api/schema/metrics/filter.rs +++ b/src/api/schema/metrics/filter.rs @@ -5,8 +5,8 @@ use tokio::time::Duration; use tokio_stream::{Stream, StreamExt}; use super::{ - filter_output_metric, EventsInTotal, EventsOutTotal, OutputThroughput, ProcessedBytesTotal, - ProcessedEventsTotal, ReceivedEventsTotal, SentEventsTotal, + filter_output_metric, OutputThroughput, ReceivedBytesTotal, ReceivedEventsTotal, + SentBytesTotal, SentEventsTotal, }; use crate::{ config::ComponentKey, @@ -46,31 +46,20 @@ fn sum_metrics_owned>(metrics: I) -> Option { - fn processed_events_total(&self) -> Option; - fn processed_bytes_total(&self) -> Option; + fn received_bytes_total(&self) -> Option; fn received_events_total(&self) -> Option; - fn events_in_total(&self) -> Option; - fn events_out_total(&self) -> Option; + fn sent_bytes_total(&self) -> Option; fn sent_events_total(&self) -> Option; } impl<'a> MetricsFilter<'a> for Vec { - fn processed_events_total(&self) -> Option { - let sum = sum_metrics(self.iter().filter(|m| m.name() == "processed_events_total"))?; - - Some(ProcessedEventsTotal::new(sum)) - } - - fn processed_bytes_total(&self) -> Option { - let sum = sum_metrics(self.iter().filter(|m| m.name() == "processed_bytes_total"))?; - - Some(ProcessedBytesTotal::new(sum)) - } - - fn events_in_total(&self) -> Option { - let sum = sum_metrics(self.iter().filter(|m| m.name() == "events_in_total"))?; + fn received_bytes_total(&self) -> Option { + let sum = sum_metrics( + self.iter() + .filter(|m| m.name() == "component_received_bytes_total"), + )?; - Some(EventsInTotal::new(sum)) + Some(ReceivedBytesTotal::new(sum)) } fn received_events_total(&self) -> Option { @@ -82,10 +71,13 @@ impl<'a> MetricsFilter<'a> for Vec { Some(ReceivedEventsTotal::new(sum)) } - fn events_out_total(&self) -> Option { - let sum = sum_metrics(self.iter().filter(|m| m.name() == "events_out_total"))?; + fn sent_bytes_total(&self) -> Option { + let sum = sum_metrics( + self.iter() + .filter(|m| m.name() == "component_sent_bytes_total"), + )?; - Some(EventsOutTotal::new(sum)) + Some(SentBytesTotal::new(sum)) } fn sent_events_total(&self) -> Option { @@ -99,24 +91,14 @@ impl<'a> MetricsFilter<'a> for Vec { } impl<'a> MetricsFilter<'a> for Vec<&'a Metric> { - fn processed_events_total(&self) -> Option { - let sum = sum_metrics( - self.iter() - .filter(|m| m.name() == "processed_events_total") - .copied(), - )?; - - Some(ProcessedEventsTotal::new(sum)) - } - - fn processed_bytes_total(&self) -> Option { + fn received_bytes_total(&self) -> Option { let sum = sum_metrics( self.iter() - .filter(|m| m.name() == "processed_bytes_total") + .filter(|m| m.name() == "component_received_bytes_total") .copied(), )?; - Some(ProcessedBytesTotal::new(sum)) + Some(ReceivedBytesTotal::new(sum)) } fn received_events_total(&self) -> Option { @@ -129,24 +111,14 @@ impl<'a> MetricsFilter<'a> for Vec<&'a Metric> { Some(ReceivedEventsTotal::new(sum)) } - fn events_in_total(&self) -> Option { - let sum = sum_metrics( - self.iter() - .filter(|m| m.name() == "events_in_total") - .copied(), - )?; - - Some(EventsInTotal::new(sum)) - } - - fn events_out_total(&self) -> Option { + fn sent_bytes_total(&self) -> Option { let sum = sum_metrics( self.iter() - .filter(|m| m.name() == "events_out_total") + .filter(|m| m.name() == "component_sent_bytes_total") .copied(), )?; - Some(EventsOutTotal::new(sum)) + Some(SentBytesTotal::new(sum)) } fn sent_events_total(&self) -> Option { @@ -202,7 +174,7 @@ pub fn by_component_key(component_key: &ComponentKey) -> Vec { type MetricFilterFn = dyn Fn(&Metric) -> bool + Send + Sync; /// Returns a stream of `Vec`, where `metric_name` matches the name of the metric -/// (e.g. "processed_events_total"), and the value is derived from `MetricValue::Counter`. Uses a +/// (e.g. "component_sent_events_total"), and the value is derived from `MetricValue::Counter`. Uses a /// local cache to match against the `component_id` of a metric, to return results only when /// the value of a current iteration is greater than the previous. This is useful for the client /// to be notified as metrics increase without returning 'empty' or identical results. @@ -230,7 +202,7 @@ pub fn component_counter_metrics( } /// Returns a stream of `Vec`, where `metric_name` matches the name of the metric -/// (e.g. "processed_events_total"), and the value is derived from `MetricValue::Gauge`. Uses a +/// (e.g. "component_sent_events_total"), and the value is derived from `MetricValue::Gauge`. Uses a /// local cache to match against the `component_id` of a metric, to return results only when /// the value of a current iteration is greater than the previous. This is useful for the client /// to be notified as metrics increase without returning 'empty' or identical results. diff --git a/src/api/schema/metrics/mod.rs b/src/api/schema/metrics/mod.rs index d883d560645e2..11b8abc51ddc5 100644 --- a/src/api/schema/metrics/mod.rs +++ b/src/api/schema/metrics/mod.rs @@ -1,12 +1,10 @@ mod allocated_bytes; mod errors; -mod events_in; -mod events_out; pub mod filter; mod output; -mod processed_bytes; -mod processed_events; +mod received_bytes; mod received_events; +mod sent_bytes; mod sent_events; mod sink; pub mod source; @@ -20,19 +18,15 @@ pub use allocated_bytes::{AllocatedBytes, ComponentAllocatedBytes}; use async_graphql::{Interface, Object, Subscription}; use chrono::{DateTime, Utc}; pub use errors::{ComponentErrorsTotal, ErrorsTotal}; -pub use events_in::EventsInTotal; -pub use events_out::EventsOutTotal; pub use filter::*; pub use output::*; -pub use processed_bytes::{ - ComponentProcessedBytesThroughput, ComponentProcessedBytesTotal, ProcessedBytesTotal, -}; -pub use processed_events::{ - ComponentProcessedEventsThroughput, ComponentProcessedEventsTotal, ProcessedEventsTotal, +pub use received_bytes::{ + ComponentReceivedBytesThroughput, ComponentReceivedBytesTotal, ReceivedBytesTotal, }; pub use received_events::{ ComponentReceivedEventsThroughput, ComponentReceivedEventsTotal, ReceivedEventsTotal, }; +pub use sent_bytes::{ComponentSentBytesThroughput, ComponentSentBytesTotal, SentBytesTotal}; pub use sent_events::{ComponentSentEventsThroughput, ComponentSentEventsTotal, SentEventsTotal}; pub use sink::{IntoSinkMetrics, SinkMetrics}; pub use source::{IntoSourceMetrics, SourceMetrics}; @@ -46,8 +40,6 @@ use crate::config::ComponentKey; #[graphql(field(name = "timestamp", type = "Option>"))] pub enum MetricType { Uptime(Uptime), - ProcessedEventsTotal(ProcessedEventsTotal), - ProcessedBytesTotal(ProcessedBytesTotal), } #[derive(Default)] @@ -78,57 +70,6 @@ impl MetricsSubscription { }) } - /// Event processing metrics. - async fn processed_events_total( - &self, - #[graphql(default = 1000, validator(minimum = 10, maximum = 60_000))] interval: i32, - ) -> impl Stream { - get_metrics(interval).filter_map(|m| match m.name() { - "processed_events_total" => Some(ProcessedEventsTotal::new(m)), - _ => None, - }) - } - - /// Event processing throughput sampled over the provided millisecond `interval`. - async fn processed_events_throughput( - &self, - #[graphql(default = 1000, validator(minimum = 10, maximum = 60_000))] interval: i32, - ) -> impl Stream { - counter_throughput(interval, &|m| m.name() == "processed_events_total") - .map(|(_, throughput)| throughput as i64) - } - - /// Component event processing throughput metrics over `interval`. - async fn component_processed_events_throughputs( - &self, - #[graphql(default = 1000, validator(minimum = 10, maximum = 60_000))] interval: i32, - ) -> impl Stream> { - component_counter_throughputs(interval, &|m| m.name() == "processed_events_total").map( - |m| { - m.into_iter() - .map(|(m, throughput)| { - ComponentProcessedEventsThroughput::new( - ComponentKey::from(m.tag_value("component_id").unwrap()), - throughput as i64, - ) - }) - .collect() - }, - ) - } - - /// Component event processing metrics over `interval`. - async fn component_processed_events_totals( - &self, - #[graphql(default = 1000, validator(minimum = 10, maximum = 60_000))] interval: i32, - ) -> impl Stream> { - component_counter_metrics(interval, &|m| m.name() == "processed_events_total").map(|m| { - m.into_iter() - .map(ComponentProcessedEventsTotal::new) - .collect() - }) - } - /// Total received events metrics #[graphql(deprecation = "Use component_received_events_totals instead")] async fn received_events_total( @@ -231,53 +172,64 @@ impl MetricsSubscription { }) } - /// Byte processing metrics. - async fn processed_bytes_total( + /// Component bytes received metrics over `interval`. + async fn component_received_bytes_totals( &self, #[graphql(default = 1000, validator(minimum = 10, maximum = 60_000))] interval: i32, - ) -> impl Stream { - get_metrics(interval).filter_map(|m| match m.name() { - "processed_bytes_total" => Some(ProcessedBytesTotal::new(m)), - _ => None, - }) + ) -> impl Stream> { + component_counter_metrics(interval, &|m| m.name() == "component_received_bytes_total").map( + |m| { + m.into_iter() + .map(ComponentReceivedBytesTotal::new) + .collect() + }, + ) } - /// Byte processing throughput sampled over a provided millisecond `interval`. - async fn processed_bytes_throughput( + /// Component bytes received throughput over `interval` + async fn component_received_bytes_throughputs( &self, #[graphql(default = 1000, validator(minimum = 10, maximum = 60_000))] interval: i32, - ) -> impl Stream { - counter_throughput(interval, &|m| m.name() == "processed_bytes_total") - .map(|(_, throughput)| throughput as i64) + ) -> impl Stream> { + component_counter_throughputs(interval, &|m| m.name() == "component_received_bytes_total") + .map(|m| { + m.into_iter() + .map(|(m, throughput)| { + ComponentReceivedBytesThroughput::new( + ComponentKey::from(m.tag_value("component_id").unwrap()), + throughput as i64, + ) + }) + .collect() + }) } - /// Component byte processing metrics over `interval`. - async fn component_processed_bytes_totals( + /// Component bytes sent metrics over `interval`. + async fn component_sent_bytes_totals( &self, #[graphql(default = 1000, validator(minimum = 10, maximum = 60_000))] interval: i32, - ) -> impl Stream> { - component_counter_metrics(interval, &|m| m.name() == "processed_bytes_total").map(|m| { - m.into_iter() - .map(ComponentProcessedBytesTotal::new) - .collect() - }) + ) -> impl Stream> { + component_counter_metrics(interval, &|m| m.name() == "component_sent_bytes_total") + .map(|m| m.into_iter().map(ComponentSentBytesTotal::new).collect()) } - /// Component byte processing throughput over `interval` - async fn component_processed_bytes_throughputs( + /// Component bytes sent throughput over `interval` + async fn component_sent_bytes_throughputs( &self, #[graphql(default = 1000, validator(minimum = 10, maximum = 60_000))] interval: i32, - ) -> impl Stream> { - component_counter_throughputs(interval, &|m| m.name() == "processed_bytes_total").map(|m| { - m.into_iter() - .map(|(m, throughput)| { - ComponentProcessedBytesThroughput::new( - ComponentKey::from(m.tag_value("component_id").unwrap()), - throughput as i64, - ) - }) - .collect() - }) + ) -> impl Stream> { + component_counter_throughputs(interval, &|m| m.name() == "component_sent_bytes_total").map( + |m| { + m.into_iter() + .map(|(m, throughput)| { + ComponentSentBytesThroughput::new( + ComponentKey::from(m.tag_value("component_id").unwrap()), + throughput as i64, + ) + }) + .collect() + }, + ) } /// Total error metrics. @@ -325,8 +277,6 @@ impl MetricsSubscription { ) -> impl Stream { get_metrics(interval).filter_map(|m| match m.name() { "uptime_seconds" => Some(MetricType::Uptime(m.into())), - "processed_events_total" => Some(MetricType::ProcessedEventsTotal(m.into())), - "processed_bytes_total" => Some(MetricType::ProcessedBytesTotal(m.into())), _ => None, }) } diff --git a/src/api/schema/metrics/processed_bytes.rs b/src/api/schema/metrics/received_bytes.rs similarity index 57% rename from src/api/schema/metrics/processed_bytes.rs rename to src/api/schema/metrics/received_bytes.rs index 9711272cb08e3..516631559cbae 100644 --- a/src/api/schema/metrics/processed_bytes.rs +++ b/src/api/schema/metrics/received_bytes.rs @@ -6,9 +6,9 @@ use crate::{ event::{Metric, MetricValue}, }; -pub struct ProcessedBytesTotal(Metric); +pub struct ReceivedBytesTotal(Metric); -impl ProcessedBytesTotal { +impl ReceivedBytesTotal { pub const fn new(m: Metric) -> Self { Self(m) } @@ -17,7 +17,7 @@ impl ProcessedBytesTotal { self.0.timestamp() } - pub fn get_processed_bytes_total(&self) -> f64 { + pub fn get_received_bytes_total(&self) -> f64 { match self.0.value() { MetricValue::Counter { value } => *value, _ => 0.00, @@ -26,32 +26,33 @@ impl ProcessedBytesTotal { } #[Object] -impl ProcessedBytesTotal { - /// Metric timestamp +impl ReceivedBytesTotal { + /// Metric timestamp. pub async fn timestamp(&self) -> Option> { self.get_timestamp() } - /// Total number of bytes processed - pub async fn processed_bytes_total(&self) -> f64 { - self.get_processed_bytes_total() + /// Total number of bytes received. + pub async fn received_bytes_total(&self) -> f64 { + self.get_received_bytes_total() } } -impl From for ProcessedBytesTotal { +impl From for ReceivedBytesTotal { fn from(m: Metric) -> Self { Self(m) } } -pub struct ComponentProcessedBytesTotal { +pub struct ComponentReceivedBytesTotal { component_key: ComponentKey, metric: Metric, } -impl ComponentProcessedBytesTotal { - /// Returns a new `ComponentProcessedBytesTotal` struct, which is a GraphQL type. The - /// component id is hoisted for clear field resolution in the resulting payload +impl ComponentReceivedBytesTotal { + /// Returns a new `ComponentReceivedBytesTotal`. + /// + /// Expects that the metric contains a tag for the component ID the metric is referenced to. pub fn new(metric: Metric) -> Self { let component_key = metric.tag_value("component_id").expect( "Returned a metric without a `component_id`, which shouldn't happen. Please report.", @@ -66,25 +67,25 @@ impl ComponentProcessedBytesTotal { } #[Object] -impl ComponentProcessedBytesTotal { - /// Component id +impl ComponentReceivedBytesTotal { + /// Component ID. async fn component_id(&self) -> &str { self.component_key.id() } - /// Bytes processed total metric - async fn metric(&self) -> ProcessedBytesTotal { - ProcessedBytesTotal::new(self.metric.clone()) + /// Metric for total bytes received. + async fn metric(&self) -> ReceivedBytesTotal { + ReceivedBytesTotal::new(self.metric.clone()) } } -pub struct ComponentProcessedBytesThroughput { +pub struct ComponentReceivedBytesThroughput { component_key: ComponentKey, throughput: i64, } -impl ComponentProcessedBytesThroughput { - /// Returns a new `ComponentProcessedBytesThroughput`, set to the provided id/throughput values +impl ComponentReceivedBytesThroughput { + /// Returns a new `ComponentReceivedBytesThroughput` for the given component. pub const fn new(component_key: ComponentKey, throughput: i64) -> Self { Self { component_key, @@ -94,13 +95,13 @@ impl ComponentProcessedBytesThroughput { } #[Object] -impl ComponentProcessedBytesThroughput { - /// Component id +impl ComponentReceivedBytesThroughput { + /// Component ID. async fn component_id(&self) -> &str { self.component_key.id() } - /// Bytes processed throughput + /// Throughput of bytes sent. async fn throughput(&self) -> i64 { self.throughput } diff --git a/src/api/schema/metrics/processed_events.rs b/src/api/schema/metrics/sent_bytes.rs similarity index 57% rename from src/api/schema/metrics/processed_events.rs rename to src/api/schema/metrics/sent_bytes.rs index 9b3a5c974fc3e..02fb833a39adc 100644 --- a/src/api/schema/metrics/processed_events.rs +++ b/src/api/schema/metrics/sent_bytes.rs @@ -6,9 +6,9 @@ use crate::{ event::{Metric, MetricValue}, }; -pub struct ProcessedEventsTotal(Metric); +pub struct SentBytesTotal(Metric); -impl ProcessedEventsTotal { +impl SentBytesTotal { pub const fn new(m: Metric) -> Self { Self(m) } @@ -17,7 +17,7 @@ impl ProcessedEventsTotal { self.0.timestamp() } - pub fn get_processed_events_total(&self) -> f64 { + pub fn get_sent_bytes_total(&self) -> f64 { match self.0.value() { MetricValue::Counter { value } => *value, _ => 0.00, @@ -26,32 +26,33 @@ impl ProcessedEventsTotal { } #[Object] -impl ProcessedEventsTotal { - /// Metric timestamp +impl SentBytesTotal { + /// Metric timestamp. pub async fn timestamp(&self) -> Option> { self.get_timestamp() } - /// Total number of events processed - pub async fn processed_events_total(&self) -> f64 { - self.get_processed_events_total() + /// Total number of bytes sent. + pub async fn sent_bytes_total(&self) -> f64 { + self.get_sent_bytes_total() } } -impl From for ProcessedEventsTotal { +impl From for SentBytesTotal { fn from(m: Metric) -> Self { Self(m) } } -pub struct ComponentProcessedEventsTotal { +pub struct ComponentSentBytesTotal { component_key: ComponentKey, metric: Metric, } -impl ComponentProcessedEventsTotal { - /// Returns a new `ComponentProcessedEventsTotal` struct, which is a GraphQL type. The - /// component id is hoisted for clear field resolution in the resulting payload +impl ComponentSentBytesTotal { + /// Returns a new `ComponentSentBytesTotal` for the given metric. + /// + /// Expects that the metric contains a tag for the component ID the metric is referenced to. pub fn new(metric: Metric) -> Self { let component_key = metric.tag_value("component_id").expect( "Returned a metric without a `component_id`, which shouldn't happen. Please report.", @@ -66,25 +67,25 @@ impl ComponentProcessedEventsTotal { } #[Object] -impl ComponentProcessedEventsTotal { - /// Component id +impl ComponentSentBytesTotal { + /// Component ID. async fn component_id(&self) -> &str { self.component_key.id() } - /// Events processed total metric - async fn metric(&self) -> ProcessedEventsTotal { - ProcessedEventsTotal::new(self.metric.clone()) + /// Metric for total bytes sent. + async fn metric(&self) -> SentBytesTotal { + SentBytesTotal::new(self.metric.clone()) } } -pub struct ComponentProcessedEventsThroughput { +pub struct ComponentSentBytesThroughput { component_key: ComponentKey, throughput: i64, } -impl ComponentProcessedEventsThroughput { - /// Returns a new `ComponentProcessedEventsThroughput`, set to the provided id/throughput values +impl ComponentSentBytesThroughput { + /// Returns a new `ComponentSentBytesThroughput` for the given component. pub const fn new(component_key: ComponentKey, throughput: i64) -> Self { Self { component_key, @@ -94,13 +95,13 @@ impl ComponentProcessedEventsThroughput { } #[Object] -impl ComponentProcessedEventsThroughput { - /// Component id +impl ComponentSentBytesThroughput { + /// Component ID. async fn component_id(&self) -> &str { self.component_key.id() } - /// Events processed throughput + /// Throughput of bytes sent. async fn throughput(&self) -> i64 { self.throughput } diff --git a/src/api/schema/metrics/sink/generic.rs b/src/api/schema/metrics/sink/generic.rs index e2a01b55d5961..e0f96e38fbd6d 100644 --- a/src/api/schema/metrics/sink/generic.rs +++ b/src/api/schema/metrics/sink/generic.rs @@ -16,32 +16,17 @@ impl GenericSinkMetrics { #[Object] impl GenericSinkMetrics { - /// Events processed for the current sink - pub async fn processed_events_total(&self) -> Option { - self.0.processed_events_total() - } - - /// Bytes processed for the current sink - pub async fn processed_bytes_total(&self) -> Option { - self.0.processed_bytes_total() - } - - /// Total incoming events for the current sink - pub async fn events_in_total(&self) -> Option { - self.0.events_in_total() - } - /// Total received events for the current sink pub async fn received_events_total(&self) -> Option { self.0.received_events_total() } - /// Total outgoing events for the current sink - pub async fn events_out_total(&self) -> Option { - self.0.events_out_total() + /// Total sent bytes for the current sink + pub async fn sent_bytes_total(&self) -> Option { + self.0.sent_bytes_total() } - /// Total outgoing events for the current sink + /// Total sent events for the current sink pub async fn sent_events_total(&self) -> Option { self.0.sent_events_total() } diff --git a/src/api/schema/metrics/sink/mod.rs b/src/api/schema/metrics/sink/mod.rs index c6c25c3f8a726..97f943a7dbef7 100644 --- a/src/api/schema/metrics/sink/mod.rs +++ b/src/api/schema/metrics/sink/mod.rs @@ -2,28 +2,14 @@ mod generic; use async_graphql::Interface; -use super::{ - EventsInTotal, EventsOutTotal, ProcessedBytesTotal, ProcessedEventsTotal, ReceivedEventsTotal, - SentEventsTotal, -}; +use super::{ReceivedEventsTotal, SentBytesTotal, SentEventsTotal}; use crate::event::Metric; #[derive(Debug, Clone, Interface)] #[graphql( - field(name = "processed_events_total", type = "Option"), - field(name = "processed_bytes_total", type = "Option"), field(name = "received_events_total", type = "Option"), - field( - name = "events_in_total", - type = "Option", - deprecation = "Use received_events_total instead" - ), - field(name = "sent_events_total", type = "Option"), - field( - name = "events_out_total", - type = "Option", - deprecation = "Use sent_events_total instead" - ) + field(name = "sent_bytes_total", type = "Option"), + field(name = "sent_events_total", type = "Option") )] pub enum SinkMetrics { GenericSinkMetrics(generic::GenericSinkMetrics), diff --git a/src/api/schema/metrics/source/file.rs b/src/api/schema/metrics/source/file.rs index 4108f68d7210e..daff4648cc967 100644 --- a/src/api/schema/metrics/source/file.rs +++ b/src/api/schema/metrics/source/file.rs @@ -37,19 +37,9 @@ impl<'a> FileSourceMetricFile<'a> { &*self.name } - /// Metric indicating events processed for the current file - async fn processed_events_total(&self) -> Option { - self.metrics.processed_events_total() - } - - /// Metric indicating bytes processed for the current file - async fn processed_bytes_total(&self) -> Option { - self.metrics.processed_bytes_total() - } - - /// Metric indicating incoming events for the current file - async fn events_in_total(&self) -> Option { - self.metrics.events_in_total() + /// Metric indicating bytes received for the current file + async fn received_bytes_total(&self) -> Option { + self.metrics.received_bytes_total() } /// Metric indicating received events for the current file @@ -57,11 +47,6 @@ impl<'a> FileSourceMetricFile<'a> { self.metrics.received_events_total() } - /// Metric indicating outgoing events for the current file - async fn events_out_total(&self) -> Option { - self.metrics.events_out_total() - } - /// Metric indicating outgoing events for the current file async fn sent_events_total(&self) -> Option { self.metrics.sent_events_total() @@ -93,38 +78,24 @@ impl FileSourceMetrics { #[derive(Enum, Copy, Clone, Eq, PartialEq)] pub enum FileSourceMetricFilesSortFieldName { Name, - ProcessedBytesTotal, - ProcessedEventsTotal, + ReceivedBytesTotal, ReceivedEventsTotal, - EventsInTotal, SentEventsTotal, - EventsOutTotal, } impl sort::SortableByField for FileSourceMetricFile<'_> { fn sort(&self, rhs: &Self, field: &FileSourceMetricFilesSortFieldName) -> Ordering { match field { FileSourceMetricFilesSortFieldName::Name => Ord::cmp(&self.name, &rhs.name), - FileSourceMetricFilesSortFieldName::ProcessedBytesTotal => Ord::cmp( - &self - .metrics - .processed_bytes_total() - .map(|m| m.get_processed_bytes_total() as i64) - .unwrap_or(0), - &rhs.metrics - .processed_bytes_total() - .map(|m| m.get_processed_bytes_total() as i64) - .unwrap_or(0), - ), - FileSourceMetricFilesSortFieldName::ProcessedEventsTotal => Ord::cmp( + FileSourceMetricFilesSortFieldName::ReceivedBytesTotal => Ord::cmp( &self .metrics - .processed_events_total() - .map(|m| m.get_processed_events_total() as i64) + .received_bytes_total() + .map(|m| m.get_received_bytes_total() as i64) .unwrap_or(0), &rhs.metrics - .processed_events_total() - .map(|m| m.get_processed_events_total() as i64) + .received_bytes_total() + .map(|m| m.get_received_bytes_total() as i64) .unwrap_or(0), ), FileSourceMetricFilesSortFieldName::ReceivedEventsTotal => Ord::cmp( @@ -138,17 +109,6 @@ impl sort::SortableByField for FileSourceMet .map(|m| m.get_received_events_total() as i64) .unwrap_or(0), ), - FileSourceMetricFilesSortFieldName::EventsInTotal => Ord::cmp( - &self - .metrics - .events_in_total() - .map(|m| m.get_events_in_total() as i64) - .unwrap_or(0), - &rhs.metrics - .events_in_total() - .map(|m| m.get_events_in_total() as i64) - .unwrap_or(0), - ), FileSourceMetricFilesSortFieldName::SentEventsTotal => Ord::cmp( &self .metrics @@ -160,17 +120,6 @@ impl sort::SortableByField for FileSourceMet .map(|m| m.get_sent_events_total() as i64) .unwrap_or(0), ), - FileSourceMetricFilesSortFieldName::EventsOutTotal => Ord::cmp( - &self - .metrics - .events_out_total() - .map(|m| m.get_events_out_total() as i64) - .unwrap_or(0), - &rhs.metrics - .events_out_total() - .map(|m| m.get_events_out_total() as i64) - .unwrap_or(0), - ), } } } @@ -223,19 +172,9 @@ impl FileSourceMetrics { .await } - /// Events processed for the current file source - pub async fn processed_events_total(&self) -> Option { - self.0.processed_events_total() - } - - /// Bytes processed for the current file source - pub async fn processed_bytes_total(&self) -> Option { - self.0.processed_bytes_total() - } - - /// Total incoming events for the current file source - pub async fn events_in_total(&self) -> Option { - self.0.events_in_total() + /// Total received bytes for the current file source + pub async fn received_bytes_total(&self) -> Option { + self.0.received_bytes_total() } /// Total received events for the current file source @@ -243,12 +182,7 @@ impl FileSourceMetrics { self.0.received_events_total() } - /// Total outgoing events for the current file source - pub async fn events_out_total(&self) -> Option { - self.0.events_out_total() - } - - /// Total outgoing events for the current file source + /// Total sent events for the current file source pub async fn sent_events_total(&self) -> Option { self.0.sent_events_total() } @@ -272,8 +206,8 @@ mod tests { fn new(name: &'static str, events_processed: f64, bytes_processed: f64) -> Self { Self { name, - events_metric: metric("processed_events_total", events_processed), - bytes_metric: metric("processed_bytes_total", bytes_processed), + events_metric: metric("component_sent_events_total", events_processed), + bytes_metric: metric("component_received_bytes_total", bytes_processed), } } @@ -343,7 +277,7 @@ mod tests { let mut files = vec![t1.get_metric(), t2.get_metric(), t3.get_metric()]; let fields = vec![SortField:: { - field: FileSourceMetricFilesSortFieldName::ProcessedEventsTotal, + field: FileSourceMetricFilesSortFieldName::SentEventsTotal, direction: sort::Direction::Asc, }]; @@ -362,7 +296,7 @@ mod tests { let mut files = vec![t1.get_metric(), t2.get_metric(), t3.get_metric()]; let fields = vec![SortField:: { - field: FileSourceMetricFilesSortFieldName::ProcessedEventsTotal, + field: FileSourceMetricFilesSortFieldName::SentEventsTotal, direction: sort::Direction::Desc, }]; @@ -374,14 +308,14 @@ mod tests { } #[test] - fn processed_bytes_asc() { + fn received_bytes_asc() { let t1 = FileSourceMetricTest::new("a", 1000.00, 100.00); let t2 = FileSourceMetricTest::new("b", 500.00, 300.00); let t3 = FileSourceMetricTest::new("c", 250.00, 200.00); let mut files = vec![t1.get_metric(), t2.get_metric(), t3.get_metric()]; let fields = vec![SortField:: { - field: FileSourceMetricFilesSortFieldName::ProcessedBytesTotal, + field: FileSourceMetricFilesSortFieldName::ReceivedBytesTotal, direction: sort::Direction::Asc, }]; @@ -393,14 +327,14 @@ mod tests { } #[test] - fn processed_bytes_desc() { + fn received_bytes_desc() { let t1 = FileSourceMetricTest::new("a", 1000.00, 100.00); let t2 = FileSourceMetricTest::new("b", 500.00, 300.00); let t3 = FileSourceMetricTest::new("c", 250.00, 200.00); let mut files = vec![t1.get_metric(), t2.get_metric(), t3.get_metric()]; let fields = vec![SortField:: { - field: FileSourceMetricFilesSortFieldName::ProcessedBytesTotal, + field: FileSourceMetricFilesSortFieldName::ReceivedBytesTotal, direction: sort::Direction::Desc, }]; diff --git a/src/api/schema/metrics/source/generic.rs b/src/api/schema/metrics/source/generic.rs index f37cf8b13a716..c66d50a841c91 100644 --- a/src/api/schema/metrics/source/generic.rs +++ b/src/api/schema/metrics/source/generic.rs @@ -16,19 +16,9 @@ impl GenericSourceMetrics { #[Object] impl GenericSourceMetrics { - /// Events processed for the current source - pub async fn processed_events_total(&self) -> Option { - self.0.processed_events_total() - } - - /// Bytes processed for the current source - pub async fn processed_bytes_total(&self) -> Option { - self.0.processed_bytes_total() - } - - /// Total incoming events for the current source - pub async fn events_in_total(&self) -> Option { - self.0.events_in_total() + /// Total received bytes for the current source + pub async fn received_bytes_total(&self) -> Option { + self.0.received_bytes_total() } /// Total received events for the current source @@ -36,12 +26,7 @@ impl GenericSourceMetrics { self.0.received_events_total() } - /// Total outgoing events for the current source - pub async fn events_out_total(&self) -> Option { - self.0.events_out_total() - } - - /// Total outgoing events for the current source + /// Total sent events for the current source pub async fn sent_events_total(&self) -> Option { self.0.sent_events_total() } diff --git a/src/api/schema/metrics/source/mod.rs b/src/api/schema/metrics/source/mod.rs index e47cb48cf1333..5463ac0ecf545 100644 --- a/src/api/schema/metrics/source/mod.rs +++ b/src/api/schema/metrics/source/mod.rs @@ -3,28 +3,14 @@ mod generic; use async_graphql::Interface; -use super::{ - EventsInTotal, EventsOutTotal, ProcessedBytesTotal, ProcessedEventsTotal, ReceivedEventsTotal, - SentEventsTotal, -}; +use super::{ReceivedBytesTotal, ReceivedEventsTotal, SentEventsTotal}; use crate::event::Metric; #[derive(Debug, Clone, Interface)] #[graphql( - field(name = "processed_events_total", type = "Option"), - field(name = "processed_bytes_total", type = "Option"), + field(name = "received_bytes_total", type = "Option"), field(name = "received_events_total", type = "Option"), - field( - name = "events_in_total", - type = "Option", - deprecation = "Use received_events_total instead" - ), - field(name = "sent_events_total", type = "Option"), - field( - name = "events_out_total", - type = "Option", - deprecation = "Use sent_events_total instead" - ) + field(name = "sent_events_total", type = "Option") )] pub enum SourceMetrics { GenericSourceMetrics(generic::GenericSourceMetrics), diff --git a/src/api/schema/metrics/transform/generic.rs b/src/api/schema/metrics/transform/generic.rs index e6c5af7c90a0a..0fd2569551473 100644 --- a/src/api/schema/metrics/transform/generic.rs +++ b/src/api/schema/metrics/transform/generic.rs @@ -16,32 +16,12 @@ impl GenericTransformMetrics { #[Object] impl GenericTransformMetrics { - /// Events processed for the current transform - pub async fn processed_events_total(&self) -> Option { - self.0.processed_events_total() - } - - /// Bytes processed for the current transform - pub async fn processed_bytes_total(&self) -> Option { - self.0.processed_bytes_total() - } - - /// Total incoming events for the current transform - pub async fn events_in_total(&self) -> Option { - self.0.events_in_total() - } - /// Total received events for the current transform pub async fn received_events_total(&self) -> Option { self.0.received_events_total() } - /// Total outgoing events for the current transform - pub async fn events_out_total(&self) -> Option { - self.0.events_out_total() - } - - /// Total outgoing events for the current transform + /// Total sent events for the current transform pub async fn sent_events_total(&self) -> Option { self.0.sent_events_total() } diff --git a/src/api/schema/metrics/transform/mod.rs b/src/api/schema/metrics/transform/mod.rs index b408ac490a783..bfdc8a5a08b30 100644 --- a/src/api/schema/metrics/transform/mod.rs +++ b/src/api/schema/metrics/transform/mod.rs @@ -2,28 +2,13 @@ mod generic; use async_graphql::Interface; -use super::{ - EventsInTotal, EventsOutTotal, ProcessedBytesTotal, ProcessedEventsTotal, ReceivedEventsTotal, - SentEventsTotal, -}; +use super::{ReceivedEventsTotal, SentEventsTotal}; use crate::event::Metric; #[derive(Debug, Clone, Interface)] #[graphql( - field(name = "processed_events_total", type = "Option"), - field(name = "processed_bytes_total", type = "Option"), field(name = "received_events_total", type = "Option"), - field( - name = "events_in_total", - type = "Option", - deprecation = "Use received_events_total instead" - ), - field(name = "sent_events_total", type = "Option"), - field( - name = "events_out_total", - type = "Option", - deprecation = "Use sent_events_total instead" - ) + field(name = "sent_events_total", type = "Option") )] pub enum TransformMetrics { GenericTransformMetrics(generic::GenericTransformMetrics), diff --git a/src/config/mod.rs b/src/config/mod.rs index c1f949d3506fa..1de078ae873b9 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -1102,7 +1102,7 @@ mod tests { type = "filter" inputs = ["internal_metrics"] condition = """ - .name == "processed_bytes_total" + .name == "component_received_bytes_total" """ [sinks.out] @@ -1133,7 +1133,7 @@ mod tests { type = "filter" inputs = ["internal_metrics"] condition = """ - .name == "processed_bytes_total" + .name == "component_received_bytes_total" """ [sinks.out] diff --git a/src/internal_events/apache_metrics.rs b/src/internal_events/apache_metrics.rs index ac09f4f71ada5..86b5bfbdfd61c 100644 --- a/src/internal_events/apache_metrics.rs +++ b/src/internal_events/apache_metrics.rs @@ -14,6 +14,7 @@ pub struct ApacheMetricsEventsReceived<'a> { } impl<'a> InternalEvent for ApacheMetricsEventsReceived<'a> { + // ## skip check-duplicate-events ## fn emit(self) { trace!(message = "Events received.", count = %self.count, byte_size = %self.byte_size, endpoint = %self.endpoint); counter!( @@ -24,10 +25,6 @@ impl<'a> InternalEvent for ApacheMetricsEventsReceived<'a> { "component_received_event_bytes_total", self.byte_size as u64, "endpoint" => self.endpoint.to_owned(), ); - counter!( - "events_in_total", self.count as u64, - "uri" => self.endpoint.to_owned(), - ); } } diff --git a/src/internal_events/aws_ecs_metrics.rs b/src/internal_events/aws_ecs_metrics.rs index bb394b0f491db..92340ba484110 100644 --- a/src/internal_events/aws_ecs_metrics.rs +++ b/src/internal_events/aws_ecs_metrics.rs @@ -30,8 +30,6 @@ impl<'a> InternalEvent for AwsEcsMetricsEventsReceived<'a> { "component_received_event_bytes_total", self.byte_size as u64, "endpoint" => self.endpoint.to_string(), ); - // deprecated - counter!("events_in_total", self.count as u64); } } diff --git a/src/internal_events/conditions.rs b/src/internal_events/conditions.rs index 4b88685d32edf..31ddeecc60bae 100644 --- a/src/internal_events/conditions.rs +++ b/src/internal_events/conditions.rs @@ -22,7 +22,5 @@ impl<'a> InternalEvent for VrlConditionExecutionError<'a> { "error_type" => error_type::SCRIPT_FAILED, "stage" => error_stage::PROCESSING, ); - // deprecated - counter!("processing_errors_total", 1); } } diff --git a/src/internal_events/docker_logs.rs b/src/internal_events/docker_logs.rs index f02ce627a3310..d126b656a6773 100644 --- a/src/internal_events/docker_logs.rs +++ b/src/internal_events/docker_logs.rs @@ -27,11 +27,6 @@ impl InternalEvent for DockerLogsEventsReceived<'_> { "component_received_event_bytes_total", self.byte_size as u64, "container_name" => self.container_name.to_owned() ); - // deprecated - counter!( - "events_in_total", 1, - "container_name" => self.container_name.to_owned() - ); } } diff --git a/src/internal_events/exec.rs b/src/internal_events/exec.rs index 99f41f143d490..f171ca374fbb1 100644 --- a/src/internal_events/exec.rs +++ b/src/internal_events/exec.rs @@ -33,11 +33,6 @@ impl InternalEvent for ExecEventsReceived<'_> { "component_received_event_bytes_total", self.byte_size as u64, "command" => self.command.to_owned(), ); - // deprecated - counter!( - "events_in_total", self.count as u64, - "command" => self.command.to_owned(), - ); } } @@ -65,13 +60,6 @@ impl InternalEvent for ExecFailedError<'_> { "error_code" => io_error_code(&self.error), "stage" => error_stage::RECEIVING, ); - // deprecated - counter!( - "processing_errors_total", 1, - "command" => self.command.to_owned(), - "error_type" => error_type::COMMAND_FAILED, - "stage" => error_stage::RECEIVING, - ); } } @@ -99,13 +87,6 @@ impl InternalEvent for ExecTimeoutError<'_> { "error_type" => error_type::TIMED_OUT, "stage" => error_stage::RECEIVING, ); - // deprecated - counter!( - "processing_errors_total", 1, - "command" => self.command.to_owned(), - "error_type" => error_type::TIMED_OUT, - "stage" => error_stage::RECEIVING, - ); } } @@ -216,14 +197,6 @@ impl InternalEvent for ExecFailedToSignalChildError<'_> { "error_type" => error_type::COMMAND_FAILED, "stage" => error_stage::RECEIVING, ); - // deprecated - counter!( - "processing_errors_total", 1, - "command_code" => format!("{:?}", self.command.as_std()), - "error" => self.error.to_error_code(), - "error_type" => error_type::COMMAND_FAILED, - "stage" => error_stage::RECEIVING, - ); } } diff --git a/src/internal_events/file.rs b/src/internal_events/file.rs index 2e3cb17344fb9..6e76be2f5d398 100644 --- a/src/internal_events/file.rs +++ b/src/internal_events/file.rs @@ -125,10 +125,6 @@ mod source { byte_size = %self.byte_size, file = %self.file ); - counter!( - "events_in_total", self.count as u64, - "file" => self.file.to_owned(), - ); counter!( "component_received_events_total", self.count as u64, "file" => self.file.to_owned(), diff --git a/src/internal_events/fluent.rs b/src/internal_events/fluent.rs index a325dc969eeac..9fbcdc0031e5f 100644 --- a/src/internal_events/fluent.rs +++ b/src/internal_events/fluent.rs @@ -13,7 +13,6 @@ impl InternalEvent for FluentMessageReceived { fn emit(self) { trace!(message = "Received fluent message.", byte_size = %self.byte_size); counter!("component_received_events_total", 1); - counter!("events_in_total", 1); } } diff --git a/src/internal_events/http.rs b/src/internal_events/http.rs index a1879c2379bcb..a016998b7655b 100644 --- a/src/internal_events/http.rs +++ b/src/internal_events/http.rs @@ -58,7 +58,6 @@ impl InternalEvent for HttpEventsReceived<'_> { "http_path" => self.http_path.to_string(), "protocol" => self.protocol, ); - counter!("events_in_total", self.count as u64); } } diff --git a/src/internal_events/http_client_source.rs b/src/internal_events/http_client_source.rs index 6d9d6a1e1c3e8..b5e7ec2d8b68a 100644 --- a/src/internal_events/http_client_source.rs +++ b/src/internal_events/http_client_source.rs @@ -27,11 +27,6 @@ impl InternalEvent for HttpClientEventsReceived { "component_received_event_bytes_total", self.byte_size as u64, "uri" => self.url.clone(), ); - // deprecated - counter!( - "events_in_total", self.count as u64, - "uri" => self.url, - ); } } diff --git a/src/internal_events/kafka.rs b/src/internal_events/kafka.rs index 92afe66b98a53..b17e04e6396b1 100644 --- a/src/internal_events/kafka.rs +++ b/src/internal_events/kafka.rs @@ -54,8 +54,6 @@ impl<'a> InternalEvent for KafkaEventsReceived<'a> { "topic" => self.topic.to_string(), "partition" => self.partition.to_string(), ); - // deprecated - counter!("events_in_total", self.count as u64); } } diff --git a/src/internal_events/kubernetes_logs.rs b/src/internal_events/kubernetes_logs.rs index 02aefa9bbe374..a008fdb4499c1 100644 --- a/src/internal_events/kubernetes_logs.rs +++ b/src/internal_events/kubernetes_logs.rs @@ -34,8 +34,7 @@ impl InternalEvent for KubernetesLogsEventsReceived<'_> { let pod_namespace = pod_info.namespace; counter!("component_received_events_total", 1, "pod_name" => pod_name.clone(), "pod_namespace" => pod_namespace.clone()); - counter!("component_received_event_bytes_total", self.byte_size as u64, "pod_name" => pod_name.clone(), "pod_namespace" => pod_namespace.clone()); - counter!("events_in_total", 1, "pod_name" => pod_name, "pod_namespace" => pod_namespace); + counter!("component_received_event_bytes_total", self.byte_size as u64, "pod_name" => pod_name, "pod_namespace" => pod_namespace); } None => { counter!("component_received_events_total", 1); @@ -43,7 +42,6 @@ impl InternalEvent for KubernetesLogsEventsReceived<'_> { "component_received_event_bytes_total", self.byte_size as u64 ); - counter!("events_in_total", 1); } } } diff --git a/src/internal_events/log_to_metric.rs b/src/internal_events/log_to_metric.rs index df9027a40b234..1925dd135efba 100644 --- a/src/internal_events/log_to_metric.rs +++ b/src/internal_events/log_to_metric.rs @@ -30,11 +30,6 @@ impl<'a> InternalEvent for LogToMetricFieldNullError<'a> { "stage" => error_stage::PROCESSING, "null_field" => self.field.to_string(), ); - // deprecated - counter!( - "processing_errors_total", 1, - "error_type" => "field_null", - ); emit!(ComponentEventsDropped:: { count: 1, reason }) } @@ -64,11 +59,6 @@ impl<'a> InternalEvent for LogToMetricParseFloatError<'a> { "stage" => error_stage::PROCESSING, "field" => self.field.to_string(), ); - // deprecated - counter!( - "processing_errors_total", 1, - "error_type" => "parse_error", - ); emit!(ComponentEventsDropped:: { count: 1, reason }) } diff --git a/src/internal_events/mongodb_metrics.rs b/src/internal_events/mongodb_metrics.rs index d5038e54a5f47..eb585fbccf6de 100644 --- a/src/internal_events/mongodb_metrics.rs +++ b/src/internal_events/mongodb_metrics.rs @@ -12,6 +12,7 @@ pub struct MongoDbMetricsEventsReceived<'a> { } impl<'a> InternalEvent for MongoDbMetricsEventsReceived<'a> { + // ## skip check-duplicate-events ## fn emit(self) { trace!( message = "Events received.", @@ -27,11 +28,6 @@ impl<'a> InternalEvent for MongoDbMetricsEventsReceived<'a> { "component_received_event_bytes_total", self.byte_size as u64, "endpoint" => self.endpoint.to_owned(), ); - // deprecated - counter!( - "events_in_total", self.count as u64, - "endpoint" => self.endpoint.to_owned(), - ); } } diff --git a/src/internal_events/nginx_metrics.rs b/src/internal_events/nginx_metrics.rs index d8db19fcb7c60..46da39b6288f4 100644 --- a/src/internal_events/nginx_metrics.rs +++ b/src/internal_events/nginx_metrics.rs @@ -27,11 +27,6 @@ impl<'a> InternalEvent for NginxMetricsEventsReceived<'a> { "component_received_event_bytes_total", self.byte_size as u64, "endpoint" => self.endpoint.to_owned(), ); - // deprecated - counter!( - "events_in_total", self.count as u64, - "endpoint" => self.endpoint.to_owned(), - ); } } diff --git a/src/internal_events/socket.rs b/src/internal_events/socket.rs index 28ac92270f3d6..58c0c3b69f4fb 100644 --- a/src/internal_events/socket.rs +++ b/src/internal_events/socket.rs @@ -60,8 +60,6 @@ impl InternalEvent for SocketEventsReceived { ); counter!("component_received_events_total", self.count as u64, "mode" => mode); counter!("component_received_event_bytes_total", self.byte_size as u64, "mode" => mode); - // deprecated - counter!("events_in_total", self.count as u64, "mode" => mode); } } diff --git a/src/top/cmd.rs b/src/top/cmd.rs index f967b7d2c98ce..16aa38126548a 100644 --- a/src/top/cmd.rs +++ b/src/top/cmd.rs @@ -1,5 +1,6 @@ use std::time::Duration; +use chrono::Local; use futures_util::future::join_all; use tokio::sync::oneshot; use url::Url; @@ -86,7 +87,9 @@ pub async fn cmd(opts: &super::Opts) -> exitcode::ExitCode { metrics::subscribe(subscription_client, tx.clone(), opts_clone.interval as i64); _ = tx - .send(EventType::ConnectionUpdated(ConnectionStatus::Connected)) + .send(EventType::ConnectionUpdated(ConnectionStatus::Connected( + Local::now(), + ))) .await; // Tasks spawned in metrics::subscribe finish when the subscription // streams have completed. Currently, subscription streams only diff --git a/src/top/dashboard.rs b/src/top/dashboard.rs index 3f73b3424685e..ccf0b49085a5c 100644 --- a/src/top/dashboard.rs +++ b/src/top/dashboard.rs @@ -104,10 +104,25 @@ fn format_metric(total: i64, throughput: i64, human_metrics: bool) -> String { } } +fn format_metric_bytes(total: i64, throughput: i64, human_metrics: bool) -> String { + match total { + 0 => "N/A".to_string(), + v => format!( + "{} ({}/s)", + if human_metrics { + v.human_format_bytes() + } else { + v.thousands_format() + }, + throughput.human_format_bytes() + ), + } +} + const NUM_COLUMNS: usize = if is_allocation_tracking_enabled() { - 9 + 10 } else { - 8 + 9 }; static HEADER: [&str; NUM_COLUMNS] = [ @@ -116,11 +131,12 @@ static HEADER: [&str; NUM_COLUMNS] = [ "Kind", "Type", "Events In", + "Bytes In", "Events Out", - "Bytes", + "Bytes Out", "Errors", #[cfg(feature = "allocation-tracing")] - "Mem Usage Bytes", + "Memory Used", ]; struct Widgets<'a> { @@ -152,15 +168,17 @@ impl<'a> Widgets<'a> { area: Rect, connection_status: &ConnectionStatus, ) { - let text = vec![Spans::from(vec![ + let mut text = vec![ Span::from(self.url_string), Span::styled( format!(" | Sampling @ {}ms", self.opts.interval.thousands_format()), Style::default().fg(Color::Gray), ), Span::from(" | "), - Span::styled(connection_status.to_string(), connection_status.style()), - ])]; + ]; + text.extend(connection_status.as_ui_spans()); + + let text = vec![Spans::from(text)]; let block = Block::default().borders(Borders::ALL).title(Span::styled( "Vector", @@ -201,14 +219,19 @@ impl<'a> Widgets<'a> { r.received_events_throughput_sec, self.opts.human_metrics, ), + format_metric_bytes( + r.received_bytes_total, + r.received_bytes_throughput_sec, + self.opts.human_metrics, + ), format_metric( r.sent_events_total, r.sent_events_throughput_sec, self.opts.human_metrics, ), - format_metric( - r.processed_bytes_total, - r.processed_bytes_throughput_sec, + format_metric_bytes( + r.sent_bytes_total, + r.sent_bytes_throughput_sec, self.opts.human_metrics, ), if self.opts.human_metrics { @@ -217,7 +240,7 @@ impl<'a> Widgets<'a> { r.errors.thousands_format() }, #[cfg(feature = "allocation-tracing")] - r.allocated_bytes.human_format(), + r.allocated_bytes.human_format_bytes(), ]; data.extend_from_slice(&formatted_metrics); @@ -248,26 +271,28 @@ impl<'a> Widgets<'a> { .column_spacing(2) .widths(if is_allocation_tracking_enabled() { &[ - Constraint::Percentage(15), // ID - Constraint::Percentage(6), // Output - Constraint::Percentage(8), // Kind - Constraint::Percentage(10), // Type + Constraint::Percentage(13), // ID + Constraint::Percentage(8), // Output + Constraint::Percentage(4), // Kind + Constraint::Percentage(9), // Type Constraint::Percentage(10), // Events In + Constraint::Percentage(12), // Bytes In Constraint::Percentage(10), // Events Out - Constraint::Percentage(10), // Bytes - Constraint::Percentage(5), // Errors - Constraint::Percentage(16), // Allocated Bytes + Constraint::Percentage(12), // Bytes Out + Constraint::Percentage(8), // Errors + Constraint::Percentage(14), // Allocated Bytes ] } else { &[ - Constraint::Percentage(15), // ID - Constraint::Percentage(15), // Output - Constraint::Percentage(10), // Kind - Constraint::Percentage(10), // Type - Constraint::Percentage(10), // Events In - Constraint::Percentage(10), // Events Out - Constraint::Percentage(10), // Bytes - Constraint::Percentage(10), // Errors + Constraint::Percentage(13), // ID + Constraint::Percentage(12), // Output + Constraint::Percentage(9), // Kind + Constraint::Percentage(6), // Type + Constraint::Percentage(12), // Events In + Constraint::Percentage(14), // Bytes In + Constraint::Percentage(12), // Events Out + Constraint::Percentage(14), // Bytes Out + Constraint::Percentage(8), // Errors ] }); f.render_widget(w, area); diff --git a/src/top/metrics.rs b/src/top/metrics.rs index 8afbdbb9727f5..cbacc5ab9c904 100644 --- a/src/top/metrics.rs +++ b/src/top/metrics.rs @@ -29,12 +29,14 @@ async fn component_added(client: Arc, tx: state::EventTx) { kind: c.on.to_string(), component_type: c.component_type, outputs: HashMap::new(), + received_bytes_total: 0, + received_bytes_throughput_sec: 0, received_events_total: 0, received_events_throughput_sec: 0, + sent_bytes_total: 0, + sent_bytes_throughput_sec: 0, sent_events_total: 0, sent_events_throughput_sec: 0, - processed_bytes_total: 0, - processed_bytes_throughput_sec: 0, #[cfg(feature = "allocation-tracing")] allocated_bytes: 0, errors: 0, @@ -84,6 +86,54 @@ async fn component_removed(client: Arc, tx: state::EventTx) } } +async fn received_bytes_totals(client: Arc, tx: state::EventTx, interval: i64) { + tokio::pin! { + let stream = client.component_received_bytes_totals_subscription(interval); + }; + + while let Some(Some(res)) = stream.next().await { + if let Some(d) = res.data { + let c = d.component_received_bytes_totals; + _ = tx + .send(state::EventType::ReceivedBytesTotals( + c.into_iter() + .map(|c| { + ( + ComponentKey::from(c.component_id.as_str()), + c.metric.received_bytes_total as i64, + ) + }) + .collect(), + )) + .await; + } + } +} + +async fn received_bytes_throughputs( + client: Arc, + tx: state::EventTx, + interval: i64, +) { + tokio::pin! { + let stream = client.component_received_bytes_throughputs_subscription(interval); + }; + + while let Some(Some(res)) = stream.next().await { + if let Some(d) = res.data { + let c = d.component_received_bytes_throughputs; + _ = tx + .send(state::EventType::ReceivedBytesThroughputs( + interval, + c.into_iter() + .map(|c| (ComponentKey::from(c.component_id.as_str()), c.throughput)) + .collect(), + )) + .await; + } + } +} + async fn received_events_totals( client: Arc, tx: state::EventTx, @@ -136,21 +186,22 @@ async fn received_events_throughputs( } } -async fn sent_events_totals(client: Arc, tx: state::EventTx, interval: i64) { +async fn sent_bytes_totals(client: Arc, tx: state::EventTx, interval: i64) { tokio::pin! { - let stream = client.component_sent_events_totals_subscription(interval); + let stream = client.component_sent_bytes_totals_subscription(interval); }; while let Some(Some(res)) = stream.next().await { if let Some(d) = res.data { - let c = d.component_sent_events_totals; + let c = d.component_sent_bytes_totals; _ = tx - .send(state::EventType::SentEventsTotals( + .send(state::EventType::SentBytesTotals( c.into_iter() - .map(|c| SentEventsMetric { - key: ComponentKey::from(c.component_id.as_str()), - total: c.metric.sent_events_total as i64, - outputs: c.outputs().into_iter().collect(), + .map(|c| { + ( + ComponentKey::from(c.component_id.as_str()), + c.metric.sent_bytes_total as i64, + ) }) .collect(), )) @@ -159,27 +210,23 @@ async fn sent_events_totals(client: Arc, tx: state::EventTx, } } -async fn sent_events_throughputs( +async fn sent_bytes_throughputs( client: Arc, tx: state::EventTx, interval: i64, ) { tokio::pin! { - let stream = client.component_sent_events_throughputs_subscription(interval); + let stream = client.component_sent_bytes_throughputs_subscription(interval); }; while let Some(Some(res)) = stream.next().await { if let Some(d) = res.data { - let c = d.component_sent_events_throughputs; + let c = d.component_sent_bytes_throughputs; _ = tx - .send(state::EventType::SentEventsThroughputs( + .send(state::EventType::SentBytesThroughputs( interval, c.into_iter() - .map(|c| SentEventsMetric { - key: ComponentKey::from(c.component_id.as_str()), - total: c.throughput, - outputs: c.outputs().into_iter().collect(), - }) + .map(|c| (ComponentKey::from(c.component_id.as_str()), c.throughput)) .collect(), )) .await; @@ -187,26 +234,21 @@ async fn sent_events_throughputs( } } -async fn processed_bytes_totals( - client: Arc, - tx: state::EventTx, - interval: i64, -) { +async fn sent_events_totals(client: Arc, tx: state::EventTx, interval: i64) { tokio::pin! { - let stream = client.component_processed_bytes_totals_subscription(interval); + let stream = client.component_sent_events_totals_subscription(interval); }; while let Some(Some(res)) = stream.next().await { if let Some(d) = res.data { - let c = d.component_processed_bytes_totals; + let c = d.component_sent_events_totals; _ = tx - .send(state::EventType::ProcessedBytesTotals( + .send(state::EventType::SentEventsTotals( c.into_iter() - .map(|c| { - ( - ComponentKey::from(c.component_id.as_str()), - c.metric.processed_bytes_total as i64, - ) + .map(|c| SentEventsMetric { + key: ComponentKey::from(c.component_id.as_str()), + total: c.metric.sent_events_total as i64, + outputs: c.outputs().into_iter().collect(), }) .collect(), )) @@ -215,23 +257,27 @@ async fn processed_bytes_totals( } } -async fn processed_bytes_throughputs( +async fn sent_events_throughputs( client: Arc, tx: state::EventTx, interval: i64, ) { tokio::pin! { - let stream = client.component_processed_bytes_throughputs_subscription(interval); + let stream = client.component_sent_events_throughputs_subscription(interval); }; while let Some(Some(res)) = stream.next().await { if let Some(d) = res.data { - let c = d.component_processed_bytes_throughputs; + let c = d.component_sent_events_throughputs; _ = tx - .send(state::EventType::ProcessedBytesThroughputs( + .send(state::EventType::SentEventsThroughputs( interval, c.into_iter() - .map(|c| (ComponentKey::from(c.component_id.as_str()), c.throughput)) + .map(|c| SentEventsMetric { + key: ComponentKey::from(c.component_id.as_str()), + total: c.throughput, + outputs: c.outputs().into_iter().collect(), + }) .collect(), )) .await; @@ -275,32 +321,38 @@ pub fn subscribe( vec![ tokio::spawn(component_added(Arc::clone(&client), tx.clone())), tokio::spawn(component_removed(Arc::clone(&client), tx.clone())), - tokio::spawn(received_events_totals( + tokio::spawn(received_bytes_totals( Arc::clone(&client), tx.clone(), interval, )), - tokio::spawn(received_events_throughputs( + tokio::spawn(received_bytes_throughputs( Arc::clone(&client), tx.clone(), interval, )), - tokio::spawn(sent_events_totals( + tokio::spawn(received_events_totals( Arc::clone(&client), tx.clone(), interval, )), - tokio::spawn(sent_events_throughputs( + tokio::spawn(received_events_throughputs( + Arc::clone(&client), + tx.clone(), + interval, + )), + tokio::spawn(sent_bytes_totals(Arc::clone(&client), tx.clone(), interval)), + tokio::spawn(sent_bytes_throughputs( Arc::clone(&client), tx.clone(), interval, )), - tokio::spawn(processed_bytes_totals( + tokio::spawn(sent_events_totals( Arc::clone(&client), tx.clone(), interval, )), - tokio::spawn(processed_bytes_throughputs( + tokio::spawn(sent_events_throughputs( Arc::clone(&client), tx.clone(), interval, @@ -326,36 +378,34 @@ pub async fn init_components(client: &Client) -> Result { .components .edges .into_iter() - .flat_map(|d| { - d.into_iter().filter_map(|edge| { - let d = edge?.node; - let key = ComponentKey::from(d.component_id); - Some(( - key.clone(), - state::ComponentRow { - key, - kind: d.on.to_string(), - component_type: d.component_type, - outputs: d - .on - .outputs() - .into_iter() - .map(|(id, sent_events_total)| { - (id, OutputMetrics::from(sent_events_total)) - }) - .collect(), - received_events_total: d.on.received_events_total(), - received_events_throughput_sec: 0, - sent_events_total: d.on.sent_events_total(), - sent_events_throughput_sec: 0, - processed_bytes_total: d.on.processed_bytes_total(), - processed_bytes_throughput_sec: 0, - #[cfg(feature = "allocation-tracing")] - allocated_bytes: 0, - errors: 0, - }, - )) - }) + .flat_map(|edge| { + let d = edge.node; + let key = ComponentKey::from(d.component_id); + Some(( + key.clone(), + state::ComponentRow { + key, + kind: d.on.to_string(), + component_type: d.component_type, + outputs: d + .on + .outputs() + .into_iter() + .map(|(id, sent_events_total)| (id, OutputMetrics::from(sent_events_total))) + .collect(), + received_bytes_total: d.on.received_bytes_total(), + received_bytes_throughput_sec: 0, + received_events_total: d.on.received_events_total(), + received_events_throughput_sec: 0, + sent_bytes_total: d.on.sent_bytes_total(), + sent_bytes_throughput_sec: 0, + sent_events_total: d.on.sent_events_total(), + sent_events_throughput_sec: 0, + #[cfg(feature = "allocation-tracing")] + allocated_bytes: 0, + errors: 0, + }, + )) }) .collect::>(); diff --git a/src/top/mod.rs b/src/top/mod.rs index e2b8f3347e810..24a0e8b48c99c 100644 --- a/src/top/mod.rs +++ b/src/top/mod.rs @@ -12,7 +12,7 @@ use url::Url; #[command(rename_all = "kebab-case")] pub struct Opts { /// Interval to sample metrics at, in milliseconds - #[arg(default_value = "500", short = 'i', long)] + #[arg(default_value = "1000", short = 'i', long)] interval: u32, /// Vector GraphQL API server endpoint @@ -20,10 +20,12 @@ pub struct Opts { url: Option, /// Humanize metrics, using numeric suffixes - e.g. 1,100 = 1.10 k, 1,000,000 = 1.00 M - #[arg(short = 'H', long)] + #[arg(short = 'H', long, default_value_t = true)] human_metrics: bool, - /// Whether to reconnect if the underlying Vector API connection drops. By default, top will attempt to reconnect if the connection drops. + /// Whether to reconnect if the underlying Vector API connection drops. + /// + /// By default, top will attempt to reconnect if the connection drops. #[arg(short, long)] no_reconnect: bool, } diff --git a/src/top/state.rs b/src/top/state.rs index dcc10409d4136..8e8d4e51fe770 100644 --- a/src/top/state.rs +++ b/src/top/state.rs @@ -1,10 +1,11 @@ -use std::{ - collections::{BTreeMap, HashMap}, - fmt::Display, -}; +use std::collections::{BTreeMap, HashMap}; +use chrono::{DateTime, Local}; use tokio::sync::mpsc; -use tui::style::{Color, Style}; +use tui::{ + style::{Color, Style}, + text::Span, +}; use vector_core::internal_event::DEFAULT_OUTPUT; use crate::config::ComponentKey; @@ -21,16 +22,19 @@ pub struct SentEventsMetric { #[derive(Debug)] pub enum EventType { InitializeState(State), + ReceivedBytesTotals(Vec), + /// Interval + identified metric + ReceivedBytesThroughputs(i64, Vec), ReceivedEventsTotals(Vec), /// Interval in ms + identified metric ReceivedEventsThroughputs(i64, Vec), + SentBytesTotals(Vec), + /// Interval + identified metric + SentBytesThroughputs(i64, Vec), // Identified overall metric + output-specific metrics SentEventsTotals(Vec), /// Interval in ms + identified overall metric + output-specific metrics SentEventsThroughputs(i64, Vec), - ProcessedBytesTotals(Vec), - /// Interval + identified metric - ProcessedBytesThroughputs(i64, Vec), ErrorsTotals(Vec), #[cfg(feature = "allocation-tracing")] AllocatedBytes(Vec), @@ -47,30 +51,24 @@ pub enum ConnectionStatus { // reconnect attempts Disconnected(u64), // Connection is working - Connected, + Connected(DateTime), } impl ConnectionStatus { - /// Color styling to apply depending on the connection status - pub fn style(&self) -> Style { + pub fn as_ui_spans(&self) -> Vec { match self { - ConnectionStatus::Pending => Style::default().fg(Color::Yellow), - ConnectionStatus::Disconnected(_) => Style::default().fg(Color::Red), - ConnectionStatus::Connected => Style::default().fg(Color::Green), - } - } -} - -impl Display for ConnectionStatus { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ConnectionStatus::Pending => write!(f, "Initializing connection"), - ConnectionStatus::Disconnected(delay) => write!( - f, - "Disconnected: reconnecting every {} seconds", - delay / 1000 - ), - ConnectionStatus::Connected => write!(f, "Connected"), + Self::Pending => vec![Span::styled( + "Connecting...", + Style::default().fg(Color::Yellow), + )], + Self::Disconnected(delay) => vec![ + Span::styled("Disconnected", Style::default().fg(Color::Red)), + Span::from(format!(" (reconnecting every {} seconds)", delay / 1000)), + ], + Self::Connected(since) => vec![ + Span::styled("Connected", Style::default().fg(Color::Green)), + Span::from(format!(" (since {})", since.format("%F %r %Z"))), + ], } } } @@ -114,10 +112,12 @@ pub struct ComponentRow { pub kind: String, pub component_type: String, pub outputs: HashMap, - pub processed_bytes_total: i64, - pub processed_bytes_throughput_sec: i64, + pub received_bytes_total: i64, + pub received_bytes_throughput_sec: i64, pub received_events_total: i64, pub received_events_throughput_sec: i64, + pub sent_bytes_total: i64, + pub sent_bytes_throughput_sec: i64, pub sent_events_total: i64, pub sent_events_throughput_sec: i64, #[cfg(feature = "allocation-tracing")] @@ -147,6 +147,21 @@ pub async fn updater(mut event_rx: EventRx) -> StateRx { EventType::InitializeState(new_state) => { state = new_state; } + EventType::ReceivedBytesTotals(rows) => { + for (key, v) in rows { + if let Some(r) = state.components.get_mut(&key) { + r.received_bytes_total = v; + } + } + } + EventType::ReceivedBytesThroughputs(interval, rows) => { + for (key, v) in rows { + if let Some(r) = state.components.get_mut(&key) { + r.received_bytes_throughput_sec = + (v as f64 * (1000.0 / interval as f64)) as i64; + } + } + } EventType::ReceivedEventsTotals(rows) => { for (key, v) in rows { if let Some(r) = state.components.get_mut(&key) { @@ -162,6 +177,21 @@ pub async fn updater(mut event_rx: EventRx) -> StateRx { } } } + EventType::SentBytesTotals(rows) => { + for (key, v) in rows { + if let Some(r) = state.components.get_mut(&key) { + r.sent_bytes_total = v; + } + } + } + EventType::SentBytesThroughputs(interval, rows) => { + for (key, v) in rows { + if let Some(r) = state.components.get_mut(&key) { + r.sent_bytes_throughput_sec = + (v as f64 * (1000.0 / interval as f64)) as i64; + } + } + } EventType::SentEventsTotals(rows) => { for m in rows { if let Some(r) = state.components.get_mut(&m.key) { @@ -190,21 +220,6 @@ pub async fn updater(mut event_rx: EventRx) -> StateRx { } } } - EventType::ProcessedBytesTotals(rows) => { - for (key, v) in rows { - if let Some(r) = state.components.get_mut(&key) { - r.processed_bytes_total = v; - } - } - } - EventType::ProcessedBytesThroughputs(interval, rows) => { - for (key, v) in rows { - if let Some(r) = state.components.get_mut(&key) { - r.processed_bytes_throughput_sec = - (v as f64 * (1000.0 / interval as f64)) as i64; - } - } - } EventType::ErrorsTotals(rows) => { for (key, v) in rows { if let Some(r) = state.components.get_mut(&key) { diff --git a/website/content/en/blog/graphql-api.md b/website/content/en/blog/graphql-api.md index f1df0159155f5..4c093bc215324 100644 --- a/website/content/en/blog/graphql-api.md +++ b/website/content/en/blog/graphql-api.md @@ -114,9 +114,9 @@ query { node { componentId metrics { - # Total bytes processed by this sink. - processedBytesTotal { - processedBytesTotal + # Total bytes sent by this sink. + sentBytesTotal { + sentBytesTotal } } } diff --git a/website/cue/reference/components.cue b/website/cue/reference/components.cue index ad40bf4e1630f..ff3c36b24d862 100644 --- a/website/cue/reference/components.cue +++ b/website/cue/reference/components.cue @@ -1277,14 +1277,6 @@ components: { } } - if Kind == "transform" { - telemetry: metrics: { - // Default metrics for each transform - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - } - } - how_it_works: { state: { title: "State" diff --git a/website/cue/reference/components/sinks.cue b/website/cue/reference/components/sinks.cue index e44491fedd0cc..d3df682927263 100644 --- a/website/cue/reference/components/sinks.cue +++ b/website/cue/reference/components/sinks.cue @@ -655,7 +655,6 @@ components: sinks: [Name=string]: { component_received_events_count: components.sources.internal_metrics.output.metrics.component_received_events_count component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total utilization: components.sources.internal_metrics.output.metrics.utilization buffer_byte_size: components.sources.internal_metrics.output.metrics.buffer_byte_size buffer_events: components.sources.internal_metrics.output.metrics.buffer_events diff --git a/website/cue/reference/components/sinks/aws_kinesis_streams.cue b/website/cue/reference/components/sinks/aws_kinesis_streams.cue index ed496572228b0..c980d6df5de6c 100644 --- a/website/cue/reference/components/sinks/aws_kinesis_streams.cue +++ b/website/cue/reference/components/sinks/aws_kinesis_streams.cue @@ -146,7 +146,5 @@ components: sinks: aws_kinesis_streams: components._aws & { component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total } } diff --git a/website/cue/reference/components/sinks/aws_sqs.cue b/website/cue/reference/components/sinks/aws_sqs.cue index 49b9ba9556e0b..4d3d40a303735 100644 --- a/website/cue/reference/components/sinks/aws_sqs.cue +++ b/website/cue/reference/components/sinks/aws_sqs.cue @@ -98,8 +98,6 @@ components: sinks: aws_sqs: components._aws & { component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total } } diff --git a/website/cue/reference/components/sinks/axiom.cue b/website/cue/reference/components/sinks/axiom.cue index fdfac0ff8677c..631c3876fc848 100644 --- a/website/cue/reference/components/sinks/axiom.cue +++ b/website/cue/reference/components/sinks/axiom.cue @@ -96,7 +96,6 @@ components: sinks: axiom: { component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total } } diff --git a/website/cue/reference/components/sinks/azure_blob.cue b/website/cue/reference/components/sinks/azure_blob.cue index c870eeba52530..82c701969415f 100644 --- a/website/cue/reference/components/sinks/azure_blob.cue +++ b/website/cue/reference/components/sinks/azure_blob.cue @@ -131,6 +131,5 @@ components: sinks: azure_blob: { processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total } } diff --git a/website/cue/reference/components/sinks/azure_monitor_logs.cue b/website/cue/reference/components/sinks/azure_monitor_logs.cue index 357d798c7592e..a5e0949c5a596 100644 --- a/website/cue/reference/components/sinks/azure_monitor_logs.cue +++ b/website/cue/reference/components/sinks/azure_monitor_logs.cue @@ -73,6 +73,5 @@ components: sinks: azure_monitor_logs: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total } } diff --git a/website/cue/reference/components/sinks/blackhole.cue b/website/cue/reference/components/sinks/blackhole.cue index 56cd2560f4e7c..74d0945a6ac8a 100644 --- a/website/cue/reference/components/sinks/blackhole.cue +++ b/website/cue/reference/components/sinks/blackhole.cue @@ -46,7 +46,5 @@ components: sinks: blackhole: { } telemetry: metrics: { - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total } } diff --git a/website/cue/reference/components/sinks/clickhouse.cue b/website/cue/reference/components/sinks/clickhouse.cue index 6db4cea9b782a..1f3e48fcb1219 100644 --- a/website/cue/reference/components/sinks/clickhouse.cue +++ b/website/cue/reference/components/sinks/clickhouse.cue @@ -85,6 +85,5 @@ components: sinks: clickhouse: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total } } diff --git a/website/cue/reference/components/sinks/console.cue b/website/cue/reference/components/sinks/console.cue index 5d1cf433a46f2..c0aa859bd9176 100644 --- a/website/cue/reference/components/sinks/console.cue +++ b/website/cue/reference/components/sinks/console.cue @@ -57,8 +57,6 @@ components: sinks: console: { } telemetry: metrics: { - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total } } diff --git a/website/cue/reference/components/sinks/databend.cue b/website/cue/reference/components/sinks/databend.cue index 467e5f1f1a92f..a9bfab6a27f37 100644 --- a/website/cue/reference/components/sinks/databend.cue +++ b/website/cue/reference/components/sinks/databend.cue @@ -106,6 +106,5 @@ components: sinks: databend: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total } } diff --git a/website/cue/reference/components/sinks/datadog_events.cue b/website/cue/reference/components/sinks/datadog_events.cue index 504ee4a8d8305..5fa9cf1ef18aa 100644 --- a/website/cue/reference/components/sinks/datadog_events.cue +++ b/website/cue/reference/components/sinks/datadog_events.cue @@ -57,6 +57,5 @@ components: sinks: datadog_events: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total } } diff --git a/website/cue/reference/components/sinks/elasticsearch.cue b/website/cue/reference/components/sinks/elasticsearch.cue index 3ce0fdb2c1d62..6768159b643c5 100644 --- a/website/cue/reference/components/sinks/elasticsearch.cue +++ b/website/cue/reference/components/sinks/elasticsearch.cue @@ -140,7 +140,6 @@ components: sinks: elasticsearch: { component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total } } diff --git a/website/cue/reference/components/sinks/gcp_pubsub.cue b/website/cue/reference/components/sinks/gcp_pubsub.cue index 4d75205967c2a..6d0307d430534 100644 --- a/website/cue/reference/components/sinks/gcp_pubsub.cue +++ b/website/cue/reference/components/sinks/gcp_pubsub.cue @@ -98,6 +98,5 @@ components: sinks: gcp_pubsub: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total } } diff --git a/website/cue/reference/components/sinks/gcp_stackdriver_logs.cue b/website/cue/reference/components/sinks/gcp_stackdriver_logs.cue index 46c0adcf211c8..632dcdeddb624 100644 --- a/website/cue/reference/components/sinks/gcp_stackdriver_logs.cue +++ b/website/cue/reference/components/sinks/gcp_stackdriver_logs.cue @@ -120,6 +120,5 @@ components: sinks: gcp_stackdriver_logs: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total } } diff --git a/website/cue/reference/components/sinks/gcp_stackdriver_metrics.cue b/website/cue/reference/components/sinks/gcp_stackdriver_metrics.cue index 8938c237215f0..93200c3f9f0d1 100644 --- a/website/cue/reference/components/sinks/gcp_stackdriver_metrics.cue +++ b/website/cue/reference/components/sinks/gcp_stackdriver_metrics.cue @@ -1,7 +1,7 @@ package metadata components: sinks: gcp_stackdriver_metrics: { - title: "GCP Cloud Monitoring (formerly Stackdrive) Metrics" + title: "GCP Cloud Monitoring (formerly Stackdriver) Metrics" classes: { commonly_used: true @@ -108,6 +108,5 @@ components: sinks: gcp_stackdriver_metrics: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total } } diff --git a/website/cue/reference/components/sinks/honeycomb.cue b/website/cue/reference/components/sinks/honeycomb.cue index c2a4a79e173d3..2fa680df67eaa 100644 --- a/website/cue/reference/components/sinks/honeycomb.cue +++ b/website/cue/reference/components/sinks/honeycomb.cue @@ -82,6 +82,5 @@ components: sinks: honeycomb: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total } } diff --git a/website/cue/reference/components/sinks/http.cue b/website/cue/reference/components/sinks/http.cue index 4e3c2dd45a7ef..bb626d846f88d 100644 --- a/website/cue/reference/components/sinks/http.cue +++ b/website/cue/reference/components/sinks/http.cue @@ -96,10 +96,7 @@ components: sinks: http: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total http_bad_requests_total: components.sources.internal_metrics.output.metrics.http_bad_requests_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total } } diff --git a/website/cue/reference/components/sinks/humio.cue b/website/cue/reference/components/sinks/humio.cue index e1f7dd5caae6c..3b7f6101d6c94 100644 --- a/website/cue/reference/components/sinks/humio.cue +++ b/website/cue/reference/components/sinks/humio.cue @@ -155,6 +155,5 @@ components: sinks: _humio: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total } } diff --git a/website/cue/reference/components/sinks/influxdb_logs.cue b/website/cue/reference/components/sinks/influxdb_logs.cue index 2f27e7059091d..13fa0182145a9 100644 --- a/website/cue/reference/components/sinks/influxdb_logs.cue +++ b/website/cue/reference/components/sinks/influxdb_logs.cue @@ -102,6 +102,5 @@ components: sinks: influxdb_logs: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total } } diff --git a/website/cue/reference/components/sinks/loki.cue b/website/cue/reference/components/sinks/loki.cue index 5111f39ec9778..057df8af0b2d4 100644 --- a/website/cue/reference/components/sinks/loki.cue +++ b/website/cue/reference/components/sinks/loki.cue @@ -163,8 +163,6 @@ components: sinks: loki: { component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total streams_total: components.sources.internal_metrics.output.metrics.streams_total } diff --git a/website/cue/reference/components/sinks/mezmo.cue b/website/cue/reference/components/sinks/mezmo.cue index 3eb922656be49..6a284c4b4f001 100644 --- a/website/cue/reference/components/sinks/mezmo.cue +++ b/website/cue/reference/components/sinks/mezmo.cue @@ -68,7 +68,6 @@ components: sinks: mezmo: { component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total } } diff --git a/website/cue/reference/components/sinks/nats.cue b/website/cue/reference/components/sinks/nats.cue index e25e68e08ac58..494bd131f85e7 100644 --- a/website/cue/reference/components/sinks/nats.cue +++ b/website/cue/reference/components/sinks/nats.cue @@ -66,8 +66,6 @@ components: sinks: nats: { telemetry: metrics: { events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total send_errors_total: components.sources.internal_metrics.output.metrics.send_errors_total } } diff --git a/website/cue/reference/components/sinks/redis.cue b/website/cue/reference/components/sinks/redis.cue index 73c10dd8bfd7d..6f4e44cc1a386 100644 --- a/website/cue/reference/components/sinks/redis.cue +++ b/website/cue/reference/components/sinks/redis.cue @@ -77,10 +77,6 @@ components: sinks: redis: { telemetry: metrics: { component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total send_errors_total: components.sources.internal_metrics.output.metrics.send_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total } } diff --git a/website/cue/reference/components/sinks/socket.cue b/website/cue/reference/components/sinks/socket.cue index e058ff0bc9fb2..7bb7064e660d6 100644 --- a/website/cue/reference/components/sinks/socket.cue +++ b/website/cue/reference/components/sinks/socket.cue @@ -69,7 +69,5 @@ components: sinks: socket: { telemetry: metrics: { connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total } } diff --git a/website/cue/reference/components/sinks/splunk_hec_logs.cue b/website/cue/reference/components/sinks/splunk_hec_logs.cue index 751dc6063b2a0..c3915ec5be422 100644 --- a/website/cue/reference/components/sinks/splunk_hec_logs.cue +++ b/website/cue/reference/components/sinks/splunk_hec_logs.cue @@ -87,9 +87,7 @@ components: sinks: splunk_hec_logs: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total } diff --git a/website/cue/reference/components/sinks/vector.cue b/website/cue/reference/components/sinks/vector.cue index a8b1828986a7f..922836ce15484 100644 --- a/website/cue/reference/components/sinks/vector.cue +++ b/website/cue/reference/components/sinks/vector.cue @@ -83,8 +83,6 @@ components: sinks: vector: { component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total protobuf_decode_errors_total: components.sources.internal_metrics.output.metrics.protobuf_decode_errors_total } } diff --git a/website/cue/reference/components/sinks/websocket.cue b/website/cue/reference/components/sinks/websocket.cue index 95341770cdfbf..21d5d7efce561 100644 --- a/website/cue/reference/components/sinks/websocket.cue +++ b/website/cue/reference/components/sinks/websocket.cue @@ -78,11 +78,8 @@ components: sinks: websocket: { connection_failed_total: components.sources.internal_metrics.output.metrics.connection_failed_total connection_shutdown_total: components.sources.internal_metrics.output.metrics.connection_shutdown_total connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total } } diff --git a/website/cue/reference/components/sources.cue b/website/cue/reference/components/sources.cue index 459076b31f34e..659753d81dc57 100644 --- a/website/cue/reference/components/sources.cue +++ b/website/cue/reference/components/sources.cue @@ -401,7 +401,6 @@ components: sources: [Name=string]: { } telemetry: metrics: { - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total source_lag_time_seconds: components.sources.internal_metrics.output.metrics.source_lag_time_seconds diff --git a/website/cue/reference/components/sources/amqp.cue b/website/cue/reference/components/sources/amqp.cue index 19b6349b7f8d1..b2647cbbad352 100644 --- a/website/cue/reference/components/sources/amqp.cue +++ b/website/cue/reference/components/sources/amqp.cue @@ -78,11 +78,8 @@ components: sources: amqp: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total consumer_offset_updates_failed_total: components.sources.internal_metrics.output.metrics.consumer_offset_updates_failed_total events_failed_total: components.sources.internal_metrics.output.metrics.events_failed_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total } how_it_works: components._amqp.how_it_works diff --git a/website/cue/reference/components/sources/apache_metrics.cue b/website/cue/reference/components/sources/apache_metrics.cue index 2a4ef6f069d3f..f39a895f79a77 100644 --- a/website/cue/reference/components/sources/apache_metrics.cue +++ b/website/cue/reference/components/sources/apache_metrics.cue @@ -166,12 +166,9 @@ components: sources: apache_metrics: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds } diff --git a/website/cue/reference/components/sources/aws_ecs_metrics.cue b/website/cue/reference/components/sources/aws_ecs_metrics.cue index 88b17b37b5349..7eb00540a88e3 100644 --- a/website/cue/reference/components/sources/aws_ecs_metrics.cue +++ b/website/cue/reference/components/sources/aws_ecs_metrics.cue @@ -192,12 +192,9 @@ components: sources: aws_ecs_metrics: { component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds } diff --git a/website/cue/reference/components/sources/aws_kinesis_firehose.cue b/website/cue/reference/components/sources/aws_kinesis_firehose.cue index e144247d84190..7d9997685b41d 100644 --- a/website/cue/reference/components/sources/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sources/aws_kinesis_firehose.cue @@ -194,8 +194,6 @@ components: sources: aws_kinesis_firehose: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total request_read_errors_total: components.sources.internal_metrics.output.metrics.request_read_errors_total requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total request_automatic_decode_errors_total: components.sources.internal_metrics.output.metrics.request_automatic_decode_errors_total diff --git a/website/cue/reference/components/sources/aws_s3.cue b/website/cue/reference/components/sources/aws_s3.cue index 38a62d3f21616..d8316999b521f 100644 --- a/website/cue/reference/components/sources/aws_s3.cue +++ b/website/cue/reference/components/sources/aws_s3.cue @@ -162,8 +162,6 @@ components: sources: aws_s3: components._aws & { ] telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/datadog_agent.cue b/website/cue/reference/components/sources/datadog_agent.cue index 05df12d903601..e83edf69d723d 100644 --- a/website/cue/reference/components/sources/datadog_agent.cue +++ b/website/cue/reference/components/sources/datadog_agent.cue @@ -224,6 +224,5 @@ components: sources: datadog_agent: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total } } diff --git a/website/cue/reference/components/sources/demo_logs.cue b/website/cue/reference/components/sources/demo_logs.cue index c985142352570..e7b506a9edb51 100644 --- a/website/cue/reference/components/sources/demo_logs.cue +++ b/website/cue/reference/components/sources/demo_logs.cue @@ -62,6 +62,5 @@ components: sources: demo_logs: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total } } diff --git a/website/cue/reference/components/sources/dnstap.cue b/website/cue/reference/components/sources/dnstap.cue index 63096c448c0d3..aa38de8f1f711 100644 --- a/website/cue/reference/components/sources/dnstap.cue +++ b/website/cue/reference/components/sources/dnstap.cue @@ -1171,9 +1171,6 @@ components: sources: dnstap: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total diff --git a/website/cue/reference/components/sources/docker_logs.cue b/website/cue/reference/components/sources/docker_logs.cue index 33b82984a9dad..f9786bce712af 100644 --- a/website/cue/reference/components/sources/docker_logs.cue +++ b/website/cue/reference/components/sources/docker_logs.cue @@ -208,15 +208,12 @@ components: sources: docker_logs: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total communication_errors_total: components.sources.internal_metrics.output.metrics.communication_errors_total container_metadata_fetch_errors_total: components.sources.internal_metrics.output.metrics.container_metadata_fetch_errors_total container_processed_events_total: components.sources.internal_metrics.output.metrics.container_processed_events_total containers_unwatched_total: components.sources.internal_metrics.output.metrics.containers_unwatched_total containers_watched_total: components.sources.internal_metrics.output.metrics.containers_watched_total logging_driver_errors_total: components.sources.internal_metrics.output.metrics.logging_driver_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/eventstoredb_metrics.cue b/website/cue/reference/components/sources/eventstoredb_metrics.cue index 09b38eff7ba67..5421f4d94624d 100644 --- a/website/cue/reference/components/sources/eventstoredb_metrics.cue +++ b/website/cue/reference/components/sources/eventstoredb_metrics.cue @@ -116,10 +116,8 @@ components: sources: eventstoredb_metrics: { } } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total diff --git a/website/cue/reference/components/sources/exec.cue b/website/cue/reference/components/sources/exec.cue index 34927d7907291..0bf792c886781 100644 --- a/website/cue/reference/components/sources/exec.cue +++ b/website/cue/reference/components/sources/exec.cue @@ -128,9 +128,6 @@ components: sources: exec: { telemetry: metrics: { command_executed_total: components.sources.internal_metrics.output.metrics.command_executed_total command_execution_duration_seconds: components.sources.internal_metrics.output.metrics.command_execution_duration_seconds - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total diff --git a/website/cue/reference/components/sources/file.cue b/website/cue/reference/components/sources/file.cue index 906c76eda4ba1..61564ea8d1850 100644 --- a/website/cue/reference/components/sources/file.cue +++ b/website/cue/reference/components/sources/file.cue @@ -427,7 +427,6 @@ components: sources: file: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total checkpoint_write_errors_total: components.sources.internal_metrics.output.metrics.checkpoint_write_errors_total checkpoints_total: components.sources.internal_metrics.output.metrics.checkpoints_total checksum_errors_total: components.sources.internal_metrics.output.metrics.checksum_errors_total diff --git a/website/cue/reference/components/sources/file_descriptor.cue b/website/cue/reference/components/sources/file_descriptor.cue index 803012459d9c4..0a54a4b67251c 100644 --- a/website/cue/reference/components/sources/file_descriptor.cue +++ b/website/cue/reference/components/sources/file_descriptor.cue @@ -79,9 +79,6 @@ components: sources: file_descriptor: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/fluent.cue b/website/cue/reference/components/sources/fluent.cue index 06a14a67166b3..a8492e1adcbdf 100644 --- a/website/cue/reference/components/sources/fluent.cue +++ b/website/cue/reference/components/sources/fluent.cue @@ -177,10 +177,7 @@ components: sources: fluent: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total decode_errors_total: components.sources.internal_metrics.output.metrics.decode_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total } diff --git a/website/cue/reference/components/sources/heroku_logs.cue b/website/cue/reference/components/sources/heroku_logs.cue index 7cefd1dfeca4c..1741703745193 100644 --- a/website/cue/reference/components/sources/heroku_logs.cue +++ b/website/cue/reference/components/sources/heroku_logs.cue @@ -105,8 +105,6 @@ components: sources: heroku_logs: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total request_read_errors_total: components.sources.internal_metrics.output.metrics.request_read_errors_total requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total } diff --git a/website/cue/reference/components/sources/host_metrics.cue b/website/cue/reference/components/sources/host_metrics.cue index 77c4680810e12..0018e403b7117 100644 --- a/website/cue/reference/components/sources/host_metrics.cue +++ b/website/cue/reference/components/sources/host_metrics.cue @@ -259,7 +259,6 @@ components: sources: host_metrics: { } telemetry: metrics: { - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/http_client.cue b/website/cue/reference/components/sources/http_client.cue index 6bb9a0e21b9f9..304e18070743f 100644 --- a/website/cue/reference/components/sources/http_client.cue +++ b/website/cue/reference/components/sources/http_client.cue @@ -139,12 +139,9 @@ components: sources: http_client: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/http_server.cue b/website/cue/reference/components/sources/http_server.cue index 398aaaf175d5b..317de8954e618 100644 --- a/website/cue/reference/components/sources/http_server.cue +++ b/website/cue/reference/components/sources/http_server.cue @@ -182,7 +182,6 @@ components: sources: http_server: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total http_bad_requests_total: components.sources.internal_metrics.output.metrics.http_bad_requests_total parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total diff --git a/website/cue/reference/components/sources/internal_metrics.cue b/website/cue/reference/components/sources/internal_metrics.cue index 2331b23401b15..36b5757141a94 100644 --- a/website/cue/reference/components/sources/internal_metrics.cue +++ b/website/cue/reference/components/sources/internal_metrics.cue @@ -380,38 +380,6 @@ components: sources: internal_metrics: { default_namespace: "vector" tags: _component_tags } - events_in_total: { - description: """ - The number of events accepted by this component either from tagged - origins like file and uri, or cumulatively from other origins. - This metric is deprecated and will be removed in a future version. - Use [`component_received_events_total`](\(urls.vector_sources)/internal_metrics/#component_received_events_total) instead. - """ - type: "counter" - default_namespace: "vector" - tags: component_received_events_total.tags - } - events_out_total: { - description: """ - The total number of events emitted by this component. - This metric is deprecated and will be removed in a future version. - Use [`component_sent_events_total`](\(urls.vector_sources)/internal_metrics/#component_sent_events_total) instead. - """ - type: "counter" - default_namespace: "vector" - tags: _component_tags & {output: _output} - } - processed_events_total: { - description: """ - The total number of events processed by this component. - This metric is deprecated in place of using - [`component_received_events_total`](\(urls.vector_sources)/internal_metrics/#component_received_events_total) and - [`component_sent_events_total`](\(urls.vector_sources)/internal_metrics/#component_sent_events_total) metrics. - """ - type: "counter" - default_namespace: "vector" - tags: _component_tags - } buffer_byte_size: { description: "The number of bytes current in the buffer." type: "gauge" @@ -851,38 +819,6 @@ components: sources: internal_metrics: { default_namespace: "vector" tags: _internal_metrics_tags } - processed_bytes_total: { - description: "The number of bytes processed by the component." - type: "counter" - default_namespace: "vector" - tags: _component_tags & { - file: { - description: "The file from which the bytes originate." - required: false - } - uri: { - description: "The sanitized URI from which the bytes originate." - required: false - } - container_name: { - description: "The name of the container from which the bytes originate." - required: false - } - pod_name: { - description: "The name of the pod from which the bytes originate." - required: false - } - peer_addr: { - description: "The IP from which the bytes originate." - required: false - } - peer_path: { - description: "The pathname from which the bytes originate." - required: false - } - mode: _mode - } - } processing_errors_total: { description: "The total number of processing errors encountered by this component. This metric is deprecated in favor of `component_errors_total`." type: "counter" diff --git a/website/cue/reference/components/sources/journald.cue b/website/cue/reference/components/sources/journald.cue index 7c1cc99a3a09c..c64c1fdcaa486 100644 --- a/website/cue/reference/components/sources/journald.cue +++ b/website/cue/reference/components/sources/journald.cue @@ -154,10 +154,7 @@ components: sources: journald: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total invalid_record_total: components.sources.internal_metrics.output.metrics.invalid_record_total invalid_record_bytes_total: components.sources.internal_metrics.output.metrics.invalid_record_bytes_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total } } diff --git a/website/cue/reference/components/sources/kafka.cue b/website/cue/reference/components/sources/kafka.cue index 281c1a2a97389..d5d56563b5bac 100644 --- a/website/cue/reference/components/sources/kafka.cue +++ b/website/cue/reference/components/sources/kafka.cue @@ -88,7 +88,6 @@ components: sources: kafka: { telemetry: metrics: { events_failed_total: components.sources.internal_metrics.output.metrics.events_failed_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total consumer_offset_updates_failed_total: components.sources.internal_metrics.output.metrics.consumer_offset_updates_failed_total kafka_queue_messages: components.sources.internal_metrics.output.metrics.kafka_queue_messages kafka_queue_messages_bytes: components.sources.internal_metrics.output.metrics.kafka_queue_messages_bytes @@ -101,8 +100,6 @@ components: sources: kafka: { kafka_consumed_messages_total: components.sources.internal_metrics.output.metrics.kafka_consumed_messages_total kafka_consumed_messages_bytes_total: components.sources.internal_metrics.output.metrics.kafka_consumed_messages_bytes_total kafka_consumer_lag: components.sources.internal_metrics.output.metrics.kafka_consumer_lag - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/kubernetes_logs.cue b/website/cue/reference/components/sources/kubernetes_logs.cue index 3e40e6935eac5..50ee665bdcd6a 100644 --- a/website/cue/reference/components/sources/kubernetes_logs.cue +++ b/website/cue/reference/components/sources/kubernetes_logs.cue @@ -465,7 +465,6 @@ components: sources: kubernetes_logs: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total k8s_format_picker_edge_cases_total: components.sources.internal_metrics.output.metrics.k8s_format_picker_edge_cases_total k8s_docker_format_parse_failures_total: components.sources.internal_metrics.output.metrics.k8s_docker_format_parse_failures_total k8s_event_annotation_failures_total: components.sources.internal_metrics.output.metrics.k8s_event_annotation_failures_total @@ -478,8 +477,6 @@ components: sources: kubernetes_logs: { k8s_watch_stream_failed_total: components.sources.internal_metrics.output.metrics.k8s_watch_stream_failed_total k8s_watch_stream_items_obtained_total: components.sources.internal_metrics.output.metrics.k8s_watch_stream_items_obtained_total k8s_watcher_http_error_total: components.sources.internal_metrics.output.metrics.k8s_watcher_http_error_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/logstash.cue b/website/cue/reference/components/sources/logstash.cue index 9c24a7a78ad90..0329e2ea50484 100644 --- a/website/cue/reference/components/sources/logstash.cue +++ b/website/cue/reference/components/sources/logstash.cue @@ -308,10 +308,7 @@ components: sources: logstash: { connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total connection_send_ack_errors_total: components.sources.internal_metrics.output.metrics.connection_send_ack_errors_total decode_errors_total: components.sources.internal_metrics.output.metrics.decode_errors_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total open_connections: components.sources.internal_metrics.output.metrics.open_connections - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total } diff --git a/website/cue/reference/components/sources/mongodb_metrics.cue b/website/cue/reference/components/sources/mongodb_metrics.cue index 2e12d11114e01..92f142e7318f7 100644 --- a/website/cue/reference/components/sources/mongodb_metrics.cue +++ b/website/cue/reference/components/sources/mongodb_metrics.cue @@ -722,7 +722,6 @@ components: sources: mongodb_metrics: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total collect_completed_total: components.sources.internal_metrics.output.metrics.collect_completed_total collect_duration_seconds: components.sources.internal_metrics.output.metrics.collect_duration_seconds parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total diff --git a/website/cue/reference/components/sources/nats.cue b/website/cue/reference/components/sources/nats.cue index ea067705bcf97..80e92a4c7c7d1 100644 --- a/website/cue/reference/components/sources/nats.cue +++ b/website/cue/reference/components/sources/nats.cue @@ -69,9 +69,6 @@ components: sources: nats: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/nginx_metrics.cue b/website/cue/reference/components/sources/nginx_metrics.cue index 59abcecdfc96e..57c31f3ec8294 100644 --- a/website/cue/reference/components/sources/nginx_metrics.cue +++ b/website/cue/reference/components/sources/nginx_metrics.cue @@ -128,7 +128,6 @@ components: sources: nginx_metrics: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total collect_completed_total: components.sources.internal_metrics.output.metrics.collect_completed_total collect_duration_seconds: components.sources.internal_metrics.output.metrics.collect_duration_seconds http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total diff --git a/website/cue/reference/components/sources/opentelemetry.cue b/website/cue/reference/components/sources/opentelemetry.cue index 5559ce52bfaed..3638a9cd95085 100644 --- a/website/cue/reference/components/sources/opentelemetry.cue +++ b/website/cue/reference/components/sources/opentelemetry.cue @@ -136,7 +136,7 @@ components: sources: opentelemetry: { severity_number: { description: """ Numerical value of the severity. - + Smaller numerical values correspond to less severe events (such as debug events), larger numerical values correspond to more severe events (such as errors and critical events). """ required: false @@ -168,7 +168,7 @@ components: sources: opentelemetry: { timestamp: { description: """ The UTC Datetime when the event occurred. If this value is unset, or `0`, it will be set to the `observed_timestamp` field. - + This field is converted from the `time_unix_nano` Protobuf field. """ required: true @@ -177,7 +177,7 @@ components: sources: opentelemetry: { observed_timestamp: { description: """ The UTC Datetime when the event was observed by the collection system. If this value is unset, or `0`, it will be set to the current time. - + This field is converted from the `observed_time_unix_nano` Protobuf field. """ required: true @@ -200,7 +200,6 @@ components: sources: opentelemetry: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total } how_it_works: { diff --git a/website/cue/reference/components/sources/postgresql_metrics.cue b/website/cue/reference/components/sources/postgresql_metrics.cue index 9eed3e74e01b4..f1573ea4e8564 100644 --- a/website/cue/reference/components/sources/postgresql_metrics.cue +++ b/website/cue/reference/components/sources/postgresql_metrics.cue @@ -69,7 +69,6 @@ components: sources: postgresql_metrics: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total collect_completed_total: components.sources.internal_metrics.output.metrics.collect_completed_total collect_duration_seconds: components.sources.internal_metrics.output.metrics.collect_duration_seconds component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total diff --git a/website/cue/reference/components/sources/prometheus_remote_write.cue b/website/cue/reference/components/sources/prometheus_remote_write.cue index 7ef6028e70b10..02ccbb25b63a2 100644 --- a/website/cue/reference/components/sources/prometheus_remote_write.cue +++ b/website/cue/reference/components/sources/prometheus_remote_write.cue @@ -88,10 +88,7 @@ components: sources: prometheus_remote_write: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds diff --git a/website/cue/reference/components/sources/prometheus_scrape.cue b/website/cue/reference/components/sources/prometheus_scrape.cue index 31f6c8ce0009f..ac051c92ebe86 100644 --- a/website/cue/reference/components/sources/prometheus_scrape.cue +++ b/website/cue/reference/components/sources/prometheus_scrape.cue @@ -97,12 +97,9 @@ components: sources: prometheus_scrape: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/redis.cue b/website/cue/reference/components/sources/redis.cue index a12634023a562..19e7a2780da8d 100644 --- a/website/cue/reference/components/sources/redis.cue +++ b/website/cue/reference/components/sources/redis.cue @@ -97,10 +97,6 @@ components: sources: redis: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total } } diff --git a/website/cue/reference/components/sources/socket.cue b/website/cue/reference/components/sources/socket.cue index 36f202f1e1928..f6bed0e610611 100644 --- a/website/cue/reference/components/sources/socket.cue +++ b/website/cue/reference/components/sources/socket.cue @@ -111,7 +111,6 @@ components: sources: socket: { ] telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total connection_failed_total: components.sources.internal_metrics.output.metrics.connection_failed_total connection_established_total: components.sources.internal_metrics.output.metrics.connection_established_total diff --git a/website/cue/reference/components/sources/splunk_hec.cue b/website/cue/reference/components/sources/splunk_hec.cue index 27e7064c72e3f..43be16cdfc7dd 100644 --- a/website/cue/reference/components/sources/splunk_hec.cue +++ b/website/cue/reference/components/sources/splunk_hec.cue @@ -83,7 +83,6 @@ components: sources: splunk_hec: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total } diff --git a/website/cue/reference/components/sources/statsd.cue b/website/cue/reference/components/sources/statsd.cue index b137956d69bf2..cb190cd2b503a 100644 --- a/website/cue/reference/components/sources/statsd.cue +++ b/website/cue/reference/components/sources/statsd.cue @@ -79,12 +79,9 @@ components: sources: statsd: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total invalid_record_total: components.sources.internal_metrics.output.metrics.invalid_record_total invalid_record_bytes_total: components.sources.internal_metrics.output.metrics.invalid_record_bytes_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/stdin.cue b/website/cue/reference/components/sources/stdin.cue index 743bf76995183..e823a74c4156a 100644 --- a/website/cue/reference/components/sources/stdin.cue +++ b/website/cue/reference/components/sources/stdin.cue @@ -85,9 +85,6 @@ components: sources: stdin: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total diff --git a/website/cue/reference/components/sources/syslog.cue b/website/cue/reference/components/sources/syslog.cue index 61bec82319f7d..dc228327221b7 100644 --- a/website/cue/reference/components/sources/syslog.cue +++ b/website/cue/reference/components/sources/syslog.cue @@ -205,10 +205,7 @@ components: sources: syslog: { } telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total connection_read_errors_total: components.sources.internal_metrics.output.metrics.connection_read_errors_total - processed_bytes_total: components.sources.internal_metrics.output.metrics.processed_bytes_total - processed_events_total: components.sources.internal_metrics.output.metrics.processed_events_total component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total utf8_convert_errors_total: components.sources.internal_metrics.output.metrics.utf8_convert_errors_total diff --git a/website/cue/reference/components/sources/vector.cue b/website/cue/reference/components/sources/vector.cue index 305bb8a983f43..2b933f40fef4a 100644 --- a/website/cue/reference/components/sources/vector.cue +++ b/website/cue/reference/components/sources/vector.cue @@ -105,7 +105,6 @@ components: sources: vector: { component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total protobuf_decode_errors_total: components.sources.internal_metrics.output.metrics.protobuf_decode_errors_total } } diff --git a/website/cue/reference/components/transforms.cue b/website/cue/reference/components/transforms.cue index 7ff42fd869116..f548f7be69c35 100644 --- a/website/cue/reference/components/transforms.cue +++ b/website/cue/reference/components/transforms.cue @@ -13,8 +13,6 @@ components: transforms: [Name=string]: { configuration: base.components.transforms.configuration telemetry: metrics: { - events_in_total: components.sources.internal_metrics.output.metrics.events_in_total - events_out_total: components.sources.internal_metrics.output.metrics.events_out_total component_received_events_count: components.sources.internal_metrics.output.metrics.component_received_events_count component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total From bf372fd7cdef40704205e5fb5bf10bc50e002d94 Mon Sep 17 00:00:00 2001 From: neuronull Date: Mon, 29 May 2023 12:03:43 -0600 Subject: [PATCH 056/236] chore(ci): fix a few logic bugs and more strict comment parsing (#17502) - Made integration comment parsing more strict to not run on unintended comments - Fix logic concurrency group logic issues for comment triggers on k8s and regression workflows. - Use the merge queue's head sha for concurrency cancellation check, this is more correct and enables us to use the concurrency features of the merge queue --- .github/workflows/integration-comment.yml | 72 +++++++++++------------ .github/workflows/integration.yml | 2 +- .github/workflows/k8s_e2e.yml | 2 +- .github/workflows/master_merge_queue.yml | 20 +------ .github/workflows/regression.yml | 2 +- .github/workflows/test.yml | 2 +- 6 files changed, 39 insertions(+), 61 deletions(-) diff --git a/.github/workflows/integration-comment.yml b/.github/workflows/integration-comment.yml index ce72d06959317..629278ff95c20 100644 --- a/.github/workflows/integration-comment.yml +++ b/.github/workflows/integration-comment.yml @@ -40,10 +40,6 @@ env: CI: true PROFILE: debug -concurrency: - group: ${{ github.workflow }}-${{ github.event.issue.id }} - cancel-in-progress: true - jobs: prep-pr: name: (PR comment) Signal pending to PR @@ -81,73 +77,73 @@ jobs: matrix: run: - test_name: 'amqp' - if: ${{ contains(github.event.comment.body, 'amqp') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-amqp') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'appsignal' - if: ${{ contains(github.event.comment.body, 'appsignal') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-appsignal') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'aws' - if: ${{ contains(github.event.comment.body, 'aws') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-aws') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'axiom' - if: ${{ contains(github.event.comment.body, 'axiom') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-axiom') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'azure' - if: ${{ contains(github.event.comment.body, 'azure') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-azure') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'clickhouse' - if: ${{ contains(github.event.comment.body, 'clickhouse') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-clickhouse') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'databend' - if: ${{ contains(github.event.comment.body, 'databend') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-databend') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'datadog-agent' - if: ${{ contains(github.event.comment.body, 'datadog') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-datadog') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'datadog-logs' - if: ${{ contains(github.event.comment.body, 'datadog') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-datadog') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'datadog-metrics' - if: ${{ contains(github.event.comment.body, 'datadog') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-datadog') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'datadog-traces' - if: ${{ contains(github.event.comment.body, 'datadog') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-datadog') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'dnstap' - if: ${{ contains(github.event.comment.body, 'dnstap') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-dnstap') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'docker-logs' - if: ${{ contains(github.event.comment.body, 'docker-logs') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-docker-logs') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'elasticsearch' - if: ${{ contains(github.event.comment.body, 'elasticsearch') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-elasticsearch') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'eventstoredb' - if: ${{ contains(github.event.comment.body, 'eventstoredb') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-eventstoredb') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'fluent' - if: ${{ contains(github.event.comment.body, 'fluent') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-fluent') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'gcp' - if: ${{ contains(github.event.comment.body, 'gcp') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-gcp') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'humio' - if: ${{ contains(github.event.comment.body, 'humio') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-humio') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'http-client' - if: ${{ contains(github.event.comment.body, 'http-client') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-http-client') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'influxdb' - if: ${{ contains(github.event.comment.body, 'influxdb') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-influxdb') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'kafka' - if: ${{ contains(github.event.comment.body, 'kafka') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-kafka') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'logstash' - if: ${{ contains(github.event.comment.body, 'logstash') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-logstash') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'loki' - if: ${{ contains(github.event.comment.body, 'loki') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-loki') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'mongodb' - if: ${{ contains(github.event.comment.body, 'mongodb') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-mongodb') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'nats' - if: ${{ contains(github.event.comment.body, 'nats') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-nats') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'nginx' - if: ${{ contains(github.event.comment.body, 'nginx') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-nginx') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'opentelemetry' - if: ${{ contains(github.event.comment.body, 'opentelemetry') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-opentelemetry') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'postgres' - if: ${{ contains(github.event.comment.body, 'postgres') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-postgres') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'prometheus' - if: ${{ contains(github.event.comment.body, 'prometheus') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-prometheus') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'pulsar' - if: ${{ contains(github.event.comment.body, 'pulsar') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-pulsar') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'redis' - if: ${{ contains(github.event.comment.body, 'redis') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-redis') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'shutdown' - if: ${{ contains(github.event.comment.body, 'shutdown') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-shutdown') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'splunk' - if: ${{ contains(github.event.comment.body, 'splunk') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-splunk') || contains(github.event.comment.body, '/ci-run-all') }} - test_name: 'webhdfs' - if: ${{ contains(github.event.comment.body, 'webhdfs') || contains(github.event.comment.body, 'all') }} + if: ${{ contains(github.event.comment.body, '/ci-run-integration-webhdfs') || contains(github.event.comment.body, '/ci-run-all') }} update-pr-status: name: Signal result to PR diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index d8ebebfc7f50e..959c43dde7058 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -13,7 +13,7 @@ on: concurrency: # `github.event.number` exists for pull requests, otherwise fall back to SHA for merge queue - group: ${{ github.workflow }}-${{ github.event.number || github.event.merge_group.base_sha }} + group: ${{ github.workflow }}-${{ github.event.number || github.event.merge_group.head_sha }} cancel-in-progress: true env: diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index 06c5ace79cddd..f9360132d71e0 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -26,7 +26,7 @@ on: - cron: '0 0 * * 2-6' concurrency: - group: ${{ github.workflow }}-${{ github.event.number || github.event.issue.id || github.event.merge_group.base_sha || github.event.schedule || github.sha }} + group: ${{ github.workflow }}-${{ github.event.number || github.event.comment.html_url || github.event.merge_group.head_sha || github.event.schedule || github.sha }} cancel-in-progress: true diff --git a/.github/workflows/master_merge_queue.yml b/.github/workflows/master_merge_queue.yml index 6e38bd7f29a24..eaf5b7bf08295 100644 --- a/.github/workflows/master_merge_queue.yml +++ b/.github/workflows/master_merge_queue.yml @@ -21,7 +21,7 @@ on: concurrency: # `github.event.number` exists for pull requests, otherwise fall back to SHA for merge queue - group: ${{ github.workflow }}-${{ github.event.number || github.event.merge_group.base_sha }} + group: ${{ github.workflow }}-${{ github.event.number || github.event.merge_group.head_sha }} cancel-in-progress: true env: @@ -106,24 +106,6 @@ jobs: needs: changes secrets: inherit - # TODO: in a followup PR, run the regression workflow here, as a single reusable workflow. - # - # NOTE: This design of passing in the pr-number to the Regression workflow requires that the merge queue - # be configured contain a maximum of one PR per execution. This is so that the regression report generated - # by the workflow can be posted as a comment to the PR. - # At a later time, we may want to revisit this in order to allow multiple PRs to be included in a merge - # queue execution. At such time, the logic of uploading of the report will need to change to account for - # multiple PRs. - # regression: - # if: needs.changes.outputs.source == 'true' - # uses: ./.github/workflows/regression.yml - # with: - # pr_number: ${{ needs.changes.outputs.pr-number }} - # base_sha: ${{ github.event.merge_group.base_sha }} - # head_sha: ${{ github.event.merge_group.head_sha }} - # needs: changes - # secrets: inherit - master-merge-queue-check: name: Master Merge Queue Suite # Always run this so that pull_request triggers are marked as success. diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 681a6757cfafa..629ae62c201db 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -34,7 +34,7 @@ on: pull_request: concurrency: - group: ${{ github.workflow }}-${{ github.event.issue.id || github.event.merge_group.base_sha || github.sha }} + group: ${{ github.workflow }}-${{ github.event.merge_group.head_sha || github.sha }} cancel-in-progress: true jobs: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1219da7a73d61..b36aba1f3108d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,7 +7,7 @@ on: concurrency: # `github.event.number` exists for pull requests, otherwise fall back to SHA for merge queue - group: ${{ github.workflow }}-${{ github.event.number || github.event.merge_group.base_sha }} + group: ${{ github.workflow }}-${{ github.event.number || github.event.merge_group.head_sha }} cancel-in-progress: true env: From cc703da814928b41e0d9c0d7d211181f4aa5758a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 16:10:19 -0600 Subject: [PATCH 057/236] chore(deps): Bump tokio from 1.28.1 to 1.28.2 (#17525) Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.28.1 to 1.28.2.
Release notes

Sourced from tokio's releases.

Tokio v1.28.2

1.28.2 (May 28, 2023)

Forward ports 1.18.6 changes.

Fixed

  • deps: disable default features for mio (#5728)

#5728: tokio-rs/tokio#5728

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tokio&package-manager=cargo&previous-version=1.28.1&new-version=1.28.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 4 ++-- lib/file-source/Cargo.toml | 2 +- lib/k8s-e2e-tests/Cargo.toml | 2 +- lib/k8s-test-framework/Cargo.toml | 2 +- lib/vector-api-client/Cargo.toml | 2 +- lib/vector-buffers/Cargo.toml | 2 +- lib/vector-common/Cargo.toml | 4 ++-- lib/vector-core/Cargo.toml | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a4c43c93cd19b..db39d1db287ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8209,9 +8209,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.28.1" +version = "1.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" +checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" dependencies = [ "autocfg", "bytes 1.4.0", diff --git a/Cargo.toml b/Cargo.toml index 095abe2743a30..4d3f37cad5ac8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -140,7 +140,7 @@ loki-logproto = { path = "lib/loki-logproto", optional = true } async-stream = { version = "0.3.5", default-features = false } async-trait = { version = "0.1.68", default-features = false } futures = { version = "0.3.28", default-features = false, features = ["compat", "io-compat"], package = "futures" } -tokio = { version = "1.28.1", default-features = false, features = ["full"] } +tokio = { version = "1.28.2", default-features = false, features = ["full"] } tokio-openssl = { version = "0.6.3", default-features = false } tokio-stream = { version = "0.1.14", default-features = false, features = ["net", "sync", "time"] } tokio-util = { version = "0.7", default-features = false, features = ["io", "time"] } @@ -354,7 +354,7 @@ reqwest = { version = "0.11", features = ["json"] } tempfile = "3.5.0" test-generator = "0.3.1" tokio-test = "0.4.2" -tokio = { version = "1.28.1", features = ["test-util"] } +tokio = { version = "1.28.2", features = ["test-util"] } tower-test = "0.4.0" vector-core = { path = "lib/vector-core", default-features = false, features = ["vrl", "test"] } wiremock = "0.5.18" diff --git a/lib/file-source/Cargo.toml b/lib/file-source/Cargo.toml index 3057ea20e726a..aa249a7e2c913 100644 --- a/lib/file-source/Cargo.toml +++ b/lib/file-source/Cargo.toml @@ -69,7 +69,7 @@ default-features = false features = [] [dependencies.tokio] -version = "1.28.1" +version = "1.28.2" default-features = false features = ["full"] diff --git a/lib/k8s-e2e-tests/Cargo.toml b/lib/k8s-e2e-tests/Cargo.toml index ad164e128355b..8e28fb6c89f7f 100644 --- a/lib/k8s-e2e-tests/Cargo.toml +++ b/lib/k8s-e2e-tests/Cargo.toml @@ -14,7 +14,7 @@ k8s-test-framework = { version = "0.1", path = "../k8s-test-framework" } regex = "1" reqwest = { version = "0.11.18", features = ["json"] } serde_json = "1" -tokio = { version = "1.28.1", features = ["full"] } +tokio = { version = "1.28.2", features = ["full"] } indoc = "2.0.1" env_logger = "0.10" tracing = { version = "0.1", features = ["log"] } diff --git a/lib/k8s-test-framework/Cargo.toml b/lib/k8s-test-framework/Cargo.toml index cdf8e55178a1a..534a97f81262d 100644 --- a/lib/k8s-test-framework/Cargo.toml +++ b/lib/k8s-test-framework/Cargo.toml @@ -11,5 +11,5 @@ license = "MPL-2.0" k8s-openapi = { version = "0.16.0", default-features = false, features = ["v1_19"] } serde_json = "1" tempfile = "3" -tokio = { version = "1.28.1", features = ["full"] } +tokio = { version = "1.28.2", features = ["full"] } log = "0.4" diff --git a/lib/vector-api-client/Cargo.toml b/lib/vector-api-client/Cargo.toml index 36c40bcd7e9e7..9337589aa4ca5 100644 --- a/lib/vector-api-client/Cargo.toml +++ b/lib/vector-api-client/Cargo.toml @@ -18,7 +18,7 @@ anyhow = { version = "1.0.71", default-features = false, features = ["std"] } # Tokio / Futures async-trait = { version = "0.1", default-features = false } futures = { version = "0.3", default-features = false, features = ["compat", "io-compat"] } -tokio = { version = "1.28.1", default-features = false, features = ["macros", "rt", "sync"] } +tokio = { version = "1.28.2", default-features = false, features = ["macros", "rt", "sync"] } tokio-stream = { version = "0.1.14", default-features = false, features = ["sync"] } # GraphQL diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index b454916b3f65d..28f767129c9a1 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -24,7 +24,7 @@ rkyv = { version = "0.7.40", default-features = false, features = ["size_32", "s serde = { version = "1.0.163", default-features = false, features = ["derive"] } snafu = { version = "0.7.4", default-features = false, features = ["std"] } tokio-util = { version = "0.7.0", default-features = false } -tokio = { version = "1.28.1", default-features = false, features = ["rt", "macros", "rt-multi-thread", "sync", "fs", "io-util", "time"] } +tokio = { version = "1.28.2", default-features = false, features = ["rt", "macros", "rt-multi-thread", "sync", "fs", "io-util", "time"] } tracing = { version = "0.1.34", default-features = false, features = ["attributes"] } vector-config = { path = "../vector-config", default-features = false } vector-config-common = { path = "../vector-config-common", default-features = false } diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index c2d26eb49e14f..04a916445ff0b 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -60,7 +60,7 @@ serde = { version = "1.0.163", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false } snafu = { version = "0.7", optional = true } stream-cancel = { version = "0.8.1", default-features = false } -tokio = { version = "1.28.1", default-features = false, features = ["macros", "time"] } +tokio = { version = "1.28.2", default-features = false, features = ["macros", "time"] } tracing = { version = "0.1.34", default-features = false } vrl = { version = "0.4.0", default-features = false, features = ["value", "core", "compiler"] } vector-config = { path = "../vector-config" } @@ -69,6 +69,6 @@ vector-config-macros = { path = "../vector-config-macros" } [dev-dependencies] futures = { version = "0.3.28", default-features = false, features = ["async-await", "std"] } -tokio = { version = "1.28.1", default-features = false, features = ["rt", "time"] } +tokio = { version = "1.28.2", default-features = false, features = ["rt", "time"] } quickcheck = "1" quickcheck_macros = "1" diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index c62ebed0d56d3..06c8a640abedf 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -46,7 +46,7 @@ serde_with = { version = "2.3.2", default-features = false, features = ["std", " smallvec = { version = "1", default-features = false, features = ["serde", "const_generics"] } snafu = { version = "0.7.4", default-features = false } socket2 = { version = "0.5.3", default-features = false } -tokio = { version = "1.28.1", default-features = false, features = ["net"] } +tokio = { version = "1.28.2", default-features = false, features = ["net"] } tokio-openssl = { version = "0.6.3", default-features = false } tokio-stream = { version = "0.1", default-features = false, features = ["time"], optional = true } tokio-util = { version = "0.7.0", default-features = false, features = ["time"] } From 2388c2f492a4952e48f1c1f8469045378ec60739 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 22:11:22 +0000 Subject: [PATCH 058/236] chore(deps): Bump quanta from 0.11.0 to 0.11.1 (#17524) Bumps [quanta](https://github.com/metrics-rs/quanta) from 0.11.0 to 0.11.1.
Changelog

Sourced from quanta's changelog.

[0.11.1] - 2023-05-28

Added

  • Added a new method, Clock::delta_as_nanos, for getting the delta between two raw measurements as the whole number of nanoseconds instead of the initial conversion to Duration. (#86)

#86: metrics-rs/quanta#86

Commits
  • be64401 (cargo-release) version 0.11.1
  • 5ed5520 fix fmt + update changelog
  • 894ebdf Add Clock::delta_as_nanos for skipping conversion to Duration (#86)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=quanta&package-manager=cargo&previous-version=0.11.0&new-version=0.11.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- lib/vector-core/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index db39d1db287ca..b069e6e718b6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6387,9 +6387,9 @@ checksum = "658fa1faf7a4cc5f057c9ee5ef560f717ad9d8dc66d975267f709624d6e1ab88" [[package]] name = "quanta" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cc73c42f9314c4bdce450c77e6f09ecbddefbeddb1b5979ded332a3913ded33" +checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ "crossbeam-utils", "libc", diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 06c8a640abedf..f3cb73ebfa03c 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -37,7 +37,7 @@ pin-project = { version = "1.1.0", default-features = false } proptest = { version = "1.2", optional = true } prost-types = { version = "0.11", default-features = false } prost = { version = "0.11", default-features = false, features = ["std"] } -quanta = { version = "0.11.0", default-features = false } +quanta = { version = "0.11.1", default-features = false } regex = { version = "1.8.3", default-features = false, features = ["std", "perf"] } ryu = { version = "1", default-features = false } serde = { version = "1.0.163", default-features = false, features = ["derive", "rc"] } From da7bc951c450c1274fa37abb2d19b83dd3f965ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 22:12:17 +0000 Subject: [PATCH 059/236] chore(deps): Bump criterion from 0.5.0 to 0.5.1 (#17500) Bumps [criterion](https://github.com/bheisler/criterion.rs) from 0.5.0 to 0.5.1.
Changelog

Sourced from criterion's changelog.

[0.5.1] - 2023-05-26

Fixed

  • Quick mode (--quick) no longer crashes with measured times over 5 seconds when --noplot is not active
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=criterion&package-manager=cargo&previous-version=0.5.0&new-version=0.5.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b069e6e718b6f..58b29f6177a3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2256,9 +2256,9 @@ dependencies = [ [[package]] name = "criterion" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f9c16c823fba76d9643cc387e9677d9771abe0827561381815215c47f808da9" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ "anes", "cast", diff --git a/Cargo.toml b/Cargo.toml index 4d3f37cad5ac8..0352c7822c2f3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -344,7 +344,7 @@ azure_identity = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev azure_storage_blobs = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["azurite_workaround"] } azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["azurite_workaround"] } base64 = "0.21.2" -criterion = { version = "0.5.0", features = ["html_reports", "async_tokio"] } +criterion = { version = "0.5.1", features = ["html_reports", "async_tokio"] } itertools = { version = "0.10.5", default-features = false } libc = "0.2.144" similar-asserts = "1.4.2" diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index f3cb73ebfa03c..94935a7ea45d0 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -79,7 +79,7 @@ prost-build = "0.11" [dev-dependencies] base64 = "0.21.2" chrono-tz = { version = "0.8.2", default-features = false } -criterion = { version = "0.5.0", features = ["html_reports"] } +criterion = { version = "0.5.1", features = ["html_reports"] } env-test-util = "1.0.1" quickcheck = "1" quickcheck_macros = "1" From aa014528ca83bd3f1d17604d8c138ac2d0484074 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Tue, 30 May 2023 10:17:29 -0600 Subject: [PATCH 060/236] chore(ci): Drop VRL license exceptions (#17529) Ref: https://github.com/vectordotdev/vector/pull/17378#pullrequestreview-1424780253 --- license-tool.toml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/license-tool.toml b/license-tool.toml index dcefb5284d8cb..64e86fe9837b0 100644 --- a/license-tool.toml +++ b/license-tool.toml @@ -4,15 +4,6 @@ "openssl-macros" = { origin = "https://github.com/sfackler/rust-openssl" } "serde_nanos" = { origin = "https://github.com/caspervonb/serde_nanos" } -# These can go away once Vector starts using a release of the VRL crate with a -# library field set up. -"vrl" = { license = "MPL-2.0" } -"vrl-compiler" = { license = "MPL-2.0" } -"vrl-core" = { license = "MPL-2.0" } -"vrl-diagnostic" = { license = "MPL-2.0" } -"vrl-parser" = { license = "MPL-2.0" } -"vrl-tests" = { license = "MPL-2.0" } - # `ring` has a custom license that is mostly "ISC-style" but parts of it also fall under OpenSSL licensing. "ring-0.16.20" = { license = "ISC AND Custom" } From 078de661e7146a1924c0c31fed65b8b0ccbb7316 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 16:05:02 +0000 Subject: [PATCH 061/236] chore(deps): Bump openssl from 0.10.52 to 0.10.53 (#17534) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.52 to 0.10.53.
Release notes

Sourced from openssl's releases.

openssl-v0.10.53

What's Changed

New Contributors

Full Changelog: https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.52...openssl-v0.10.53

Commits
  • 79ad66a Merge pull request #1940 from reaperhulk/version-bump
  • 7a040da Update openssl/CHANGELOG.md
  • 6a65a2b version bump 0.9.88 and 0.10.53
  • 8f72f7b Merge pull request #1939 from alex/der-dsa
  • b3cdda0 Added DER serialization for DSAPrivateKey
  • c9ce286 Merge pull request #1938 from reaperhulk/dsa-simplify
  • c972e70 reimplement Dsa::generate in terms of generate_params/generate_key
  • d07363e Merge pull request #1937 from reaperhulk/dsa-params
  • b937b66 add Dsa<Params> with some helper functions
  • 987c9aa Merge pull request #1935 from vishwin/master
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=openssl&package-manager=cargo&previous-version=0.10.52&new-version=0.10.53)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 58b29f6177a3f..f9f2377c04437 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5583,9 +5583,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.52" +version = "0.10.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" +checksum = "12df40a956736488b7b44fe79fe12d4f245bb5b3f5a1f6095e499760015be392" dependencies = [ "bitflags", "cfg-if", @@ -5624,9 +5624,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.87" +version = "0.9.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" +checksum = "c2ce0f250f34a308dcfdbb351f511359857d4ed2134ba715a4eadd46e1ffd617" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index 0352c7822c2f3..e7fb3e18c9b29 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -281,7 +281,7 @@ nkeys = { version = "0.3.0", default-features = false, optional = true } nom = { version = "7.1.3", default-features = false, optional = true } notify = { version = "6.0.0", default-features = false, features = ["macos_fsevent"] } once_cell = { version = "1.17", default-features = false } -openssl = { version = "0.10.52", default-features = false, features = ["vendored"] } +openssl = { version = "0.10.53", default-features = false, features = ["vendored"] } openssl-probe = { version = "0.1.5", default-features = false } ordered-float = { version = "3.7.0", default-features = false } paste = "1.0.12" diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 94935a7ea45d0..9ef187e61bc68 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -31,7 +31,7 @@ mlua = { version = "0.8.9", default-features = false, features = ["lua54", "send no-proxy = { version = "0.3.2", default-features = false, features = ["serialize"] } once_cell = { version = "1.17", default-features = false } ordered-float = { version = "3.7.0", default-features = false } -openssl = { version = "0.10.52", default-features = false, features = ["vendored"] } +openssl = { version = "0.10.53", default-features = false, features = ["vendored"] } parking_lot = { version = "0.12.1", default-features = false } pin-project = { version = "1.1.0", default-features = false } proptest = { version = "1.2", optional = true } From 1565985746868265a1582a1b33b4eb56cc046c26 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 16:06:30 +0000 Subject: [PATCH 062/236] chore(deps): Bump indicatif from 0.17.3 to 0.17.4 (#17532) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [indicatif](https://github.com/console-rs/indicatif) from 0.17.3 to 0.17.4.
Release notes

Sourced from indicatif's releases.

0.17.4

Another small bugfix release.

  • Handle newline in msg and empty msg (#540, thanks to @​RDruon)
  • Handle terminal line wrap to avoid new line (#533, thanks to @​RDruon)
  • Resetting the elapsed time also resets ETA (#538, thanks to @​afontenot)
  • Mention the prefix and message placeholders in the with_ docs (#529, thanks to @​lnicola)
  • Allow rate-limiting TermLike targets (#526, thanks to @​akx)
  • Fix docs for ProgressDrawTarget (#523, thanks to @​tillarnold)
  • Change "OS X" to "macOS" (#519, thanks to @​atouchet)
  • Fix MultiProgress alignment handling and migrate from structopt => clap (#516)
  • Don't deadlock when double-adding ProgressBar (#515)
  • Use instant::Instant when compiling to WASM (#514, thanks to @​azriel91)
  • Update portable-atomic requirement from 0.3.15 to 1.0.0 (#512)
  • Add contents_formatted() method to `InMemoryTerm (#531, thanks to @​dfaust)
  • inc() after work in examples (#522, thanks to @​tatref)

On behalf of the indicatif team (@​chris-laplante and @​djc), thanks to all contributors!

Commits
  • 6cc3814 Bump CI MSRV to 1.62.1 to resolve Cargo metadata for vte
  • 7c9465b Bump version to 0.17.4
  • 5dc966c Bump clap dependency to 4
  • 577a345 Only check library target against MSRV
  • 9deb9eb Add contents_formatted to InMemoryTerm
  • e0494e4 fix: inc after work
  • 59cd606 Handle newline in msg and empty msg
  • 6d31845 resetting the elapsed time also resets ETA
  • 3d5dbb3 Add render tests
  • 66a90a6 Handle MultiProgress println
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=indicatif&package-manager=cargo&previous-version=0.17.3&new-version=0.17.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 15 +++++---------- vdev/Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f9f2377c04437..e47a640577979 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4093,13 +4093,14 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.3" +version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef509aa9bc73864d6756f0d34d35504af3cf0844373afe9b8669a5b8005a729" +checksum = "db45317f37ef454e6519b6c3ed7d377e5f23346f0823f86e65ca36912d1d0ef8" dependencies = [ "console", + "instant", "number_prefix", - "portable-atomic 0.3.15", + "portable-atomic", "unicode-segmentation", "unicode-width", ] @@ -4890,7 +4891,7 @@ checksum = "aa8ebbd1a9e57bbab77b9facae7f5136aea44c356943bf9a198f647da64285d6" dependencies = [ "ahash 0.8.2", "metrics-macros", - "portable-atomic 1.3.1", + "portable-atomic", ] [[package]] @@ -6020,12 +6021,6 @@ dependencies = [ "windows-sys 0.42.0", ] -[[package]] -name = "portable-atomic" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15eb2c6e362923af47e13c23ca5afb859e83d54452c55b0b9ac763b8f7c1ac16" - [[package]] name = "portable-atomic" version = "1.3.1" diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index a71429cc5aecb..54038b4c213ae 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -22,7 +22,7 @@ dunce = "1.0.4" glob = { version = "0.3.1", default-features = false } hashlink = { version = "0.8.2", features = ["serde_impl"] } hex = "0.4.3" -indicatif = { version = "0.17.3", features = ["improved_unicode"] } +indicatif = { version = "0.17.4", features = ["improved_unicode"] } itertools = "0.10.5" log = "0.4.17" once_cell = "1.17" From 8e113addc48328f3918e6abc7623284d93d4030b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 16:07:26 +0000 Subject: [PATCH 063/236] chore(deps): Bump once_cell from 1.17.1 to 1.17.2 (#17531) Bumps [once_cell](https://github.com/matklad/once_cell) from 1.17.1 to 1.17.2.
Changelog

Sourced from once_cell's changelog.

1.17.2

  • Avoid unnecessary synchronization in Lazy::{force,deref}_mut(), #231.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=once_cell&package-manager=cargo&previous-version=1.17.1&new-version=1.17.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e47a640577979..1eb1cd78d4df1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5489,9 +5489,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b" [[package]] name = "onig" From 5a2fea10da7eaa04b7e51af84cdea87ab6e8326b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 16:09:28 +0000 Subject: [PATCH 064/236] chore(deps): Bump log from 0.4.17 to 0.4.18 (#17526) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [log](https://github.com/rust-lang/log) from 0.4.17 to 0.4.18.
Changelog

Sourced from log's changelog.

[0.4.18] - 2023-05-28

Commits
  • 304eef7 Merge pull request #566 from rust-lang/cargo/0.4.18-patch
  • d062c83 prepare for 0.4.18 release
  • fcbb351 Merge pull request #565 from rust-lang/fix/nightly-build-msrv
  • 4e689bb Revert "Remove build.rs file"
  • 3ea1c66 Merge pull request #562 from OccupyMars2025/patch-1
  • 7c4f808 [doc] src/macros.rs : correct grammar errors of an example in lib documenta...
  • f4c21c1 Merge pull request #561 from OccupyMars2025/master
  • 1acf2f3 src/lib.rs : prefix an unused variable with an underscore
  • 502bdb7 Merge pull request #557 from est31/msrv_in_toml
  • 79c330c Put MSRV into Cargo.toml
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=log&package-manager=cargo&previous-version=0.4.17&new-version=0.4.18)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 7 ++----- vdev/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1eb1cd78d4df1..8b219864d8467 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4676,12 +4676,9 @@ checksum = "8166fbddef141acbea89cf3425ed97d4c22d14a68161977fc01c301175a4fb89" [[package]] name = "log" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" [[package]] name = "logfmt" diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 54038b4c213ae..8b7b3c676e12d 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -24,7 +24,7 @@ hashlink = { version = "0.8.2", features = ["serde_impl"] } hex = "0.4.3" indicatif = { version = "0.17.4", features = ["improved_unicode"] } itertools = "0.10.5" -log = "0.4.17" +log = "0.4.18" once_cell = "1.17" os_info = { version = "3.7.0", default-features = false } # watch https://github.com/epage/anstyle for official interop with Clap From ecb707a633020bca8c805d5764b85302b74ca477 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 18:08:20 +0000 Subject: [PATCH 065/236] chore(deps): Bump graphql_client from 0.12.0 to 0.13.0 (#17541) Bumps [graphql_client](https://github.com/graphql-rust/graphql-client) from 0.12.0 to 0.13.0.
Changelog

Sourced from graphql_client's changelog.

0.13.0 - 2023-05-25

  • Add support for @oneOf
  • Update Ubuntu image for CI
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=graphql_client&package-manager=cargo&previous-version=0.12.0&new-version=0.13.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++------ lib/vector-api-client/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8b219864d8467..38ac0c27824ac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3512,9 +3512,9 @@ dependencies = [ [[package]] name = "graphql_client" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa61bb9dc6d373a8b465a5da17b62809483e8527a34b0e9034dc0915b09e160a" +checksum = "09cdf7b487d864c2939b23902291a5041bc4a84418268f25fda1c8d4e15ad8fa" dependencies = [ "graphql_query_derive", "serde", @@ -3523,9 +3523,9 @@ dependencies = [ [[package]] name = "graphql_client_codegen" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e55df64cc702c4ad6647f8df13a799ad11688a3781fadf5045f7ba12733fa9b" +checksum = "a40f793251171991c4eb75bd84bc640afa8b68ff6907bc89d3b712a22f700506" dependencies = [ "graphql-introspection-query", "graphql-parser", @@ -3540,9 +3540,9 @@ dependencies = [ [[package]] name = "graphql_query_derive" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52fc9cde811f44b15ec0692b31e56a3067f6f431c5ace712f286e47c1dacc98" +checksum = "00bda454f3d313f909298f626115092d348bc231025699f557b27e248475f48c" dependencies = [ "graphql_client_codegen", "proc-macro2 1.0.59", diff --git a/lib/vector-api-client/Cargo.toml b/lib/vector-api-client/Cargo.toml index 9337589aa4ca5..ec2763cd1e49a 100644 --- a/lib/vector-api-client/Cargo.toml +++ b/lib/vector-api-client/Cargo.toml @@ -22,7 +22,7 @@ tokio = { version = "1.28.2", default-features = false, features = ["macros", "r tokio-stream = { version = "0.1.14", default-features = false, features = ["sync"] } # GraphQL -graphql_client = { version = "0.12.0", default-features = false, features = ["graphql_query_derive"] } +graphql_client = { version = "0.13.0", default-features = false, features = ["graphql_query_derive"] } # HTTP / WebSockets reqwest = { version = "0.11.18", default-features = false, features = ["json"] } From b0ed167d1ae22b8f0a7a762ad50750c912f0833b Mon Sep 17 00:00:00 2001 From: Toby Lawrence Date: Tue, 30 May 2023 15:04:00 -0400 Subject: [PATCH 066/236] chore(observability): remove more deprecated internal metrics (#17542) This is a follow-up PR to #17516, which does a few things: - completely removes `processing_errors_total`, `events_failed_total`, and `events_discarded_total`, with one exception (see reviewer notes) - adds a 0.31 upgrade guide which covers the removal of these metrics and the metrics removed in #17516 - updates the Cue files for all components to derive their Component Specification-related metrics descriptions at the component type level (see reviewer notes) ## Reviewer Notes ### One dangling reference to `events_discarded_total` There's still one dangling reference to this deprecated metric for the `throttle` transform. I left a TODO comment in the code, but essentially, it currently specifies a tag (which bucket key was throttled) for `events_discarded_total` which can't be translated directly to `component_discarded_events_total` as the specification disallows custom tags. We'll probably need to quickly talk about whether the specification is too rigid or if we actually want to emit that tag at all. ### Updated Cue files for common component metrics I followed the Component Specification here, since technically all components receive and send events, and can drop or discard those events. This touched a vast majority of the component Cue files, and should bring them up to a far more consistent state than the previous set of circumstances. There's definitely something to be said for even pushing up the abstraction of how components inherit that `telemetry: metrics { ... }` stuff so it's driven by component type rather than hard-coded in the per-component-type Cue file (`sources.cue`, etc)... but this felt like a reasonable middle ground to leave it at for now. --- src/internal_events/batch.rs | 13 ++--- src/internal_events/dedupe.rs | 2 - src/internal_events/filter.rs | 3 -- src/internal_events/kafka.rs | 2 - src/internal_events/loki.rs | 53 +++++++++++++------ src/internal_events/lua.rs | 8 --- src/internal_events/metric_to_log.rs | 2 - src/internal_events/parser.rs | 6 --- src/internal_events/remap.rs | 2 - src/internal_events/sample.rs | 2 - src/internal_events/sematext_metrics.rs | 2 - src/internal_events/statsd_sink.rs | 2 - src/internal_events/template.rs | 7 --- src/internal_events/throttle.rs | 10 +++- src/sinks/loki/sink.rs | 6 +-- .../2023-07-05-0-31-0-upgrade-guide.md | 51 ++++++++++++++++++ website/cue/reference/components/sinks.cue | 15 ++++-- .../cue/reference/components/sinks/amqp.cue | 5 -- .../components/sinks/aws_cloudwatch_logs.cue | 7 --- .../sinks/aws_cloudwatch_metrics.cue | 5 -- .../components/sinks/aws_kinesis_firehose.cue | 5 -- .../components/sinks/aws_kinesis_streams.cue | 6 --- .../cue/reference/components/sinks/aws_s3.cue | 8 --- .../reference/components/sinks/aws_sqs.cue | 7 --- .../cue/reference/components/sinks/axiom.cue | 8 --- .../reference/components/sinks/azure_blob.cue | 8 +-- .../components/sinks/azure_monitor_logs.cue | 6 --- .../reference/components/sinks/blackhole.cue | 3 -- .../reference/components/sinks/clickhouse.cue | 6 --- .../reference/components/sinks/console.cue | 4 -- .../reference/components/sinks/databend.cue | 6 --- .../components/sinks/datadog_archives.cue | 8 --- .../components/sinks/datadog_events.cue | 6 --- .../components/sinks/datadog_metrics.cue | 5 -- .../components/sinks/datadog_traces.cue | 7 --- .../components/sinks/elasticsearch.cue | 8 --- .../cue/reference/components/sinks/file.cue | 8 --- .../sinks/gcp_chronicle_unstructured.cue | 7 --- .../components/sinks/gcp_cloud_storage.cue | 7 --- .../reference/components/sinks/gcp_pubsub.cue | 6 --- .../components/sinks/gcp_stackdriver_logs.cue | 6 --- .../sinks/gcp_stackdriver_metrics.cue | 6 --- .../reference/components/sinks/honeycomb.cue | 6 --- .../cue/reference/components/sinks/http.cue | 6 +-- .../cue/reference/components/sinks/humio.cue | 6 --- .../reference/components/sinks/humio_logs.cue | 1 - .../components/sinks/humio_metrics.cue | 1 - .../components/sinks/influxdb_logs.cue | 6 --- .../components/sinks/influxdb_metrics.cue | 5 -- .../cue/reference/components/sinks/kafka.cue | 5 -- .../cue/reference/components/sinks/loki.cue | 7 +-- .../cue/reference/components/sinks/mezmo.cue | 8 --- .../cue/reference/components/sinks/nats.cue | 4 +- .../sinks/prometheus_remote_write.cue | 6 --- .../cue/reference/components/sinks/pulsar.cue | 4 +- .../cue/reference/components/sinks/redis.cue | 4 +- .../components/sinks/sematext_metrics.cue | 5 +- .../components/sinks/splunk_hec_logs.cue | 9 +--- .../cue/reference/components/sinks/statsd.cue | 6 --- .../cue/reference/components/sinks/vector.cue | 5 +- .../reference/components/sinks/webhdfs.cue | 6 --- .../reference/components/sinks/websocket.cue | 13 ++--- website/cue/reference/components/sources.cue | 12 +++-- .../cue/reference/components/sources/amqp.cue | 1 - .../components/sources/apache_metrics.cue | 15 ++---- .../components/sources/aws_ecs_metrics.cue | 17 ++---- .../sources/aws_kinesis_firehose.cue | 7 --- .../reference/components/sources/aws_s3.cue | 5 -- .../reference/components/sources/aws_sqs.cue | 5 +- .../components/sources/datadog_agent.cue | 8 --- .../components/sources/demo_logs.cue | 8 --- .../reference/components/sources/dnstap.cue | 5 +- .../components/sources/docker_logs.cue | 5 -- .../sources/eventstoredb_metrics.cue | 8 +-- .../cue/reference/components/sources/exec.cue | 9 +--- .../cue/reference/components/sources/file.cue | 26 ++++----- .../components/sources/file_descriptor.cue | 8 --- .../reference/components/sources/fluent.cue | 4 +- .../components/sources/gcp_pubsub.cue | 6 --- .../components/sources/heroku_logs.cue | 8 +-- .../components/sources/host_metrics.cue | 8 --- .../components/sources/http_client.cue | 15 ++---- .../components/sources/http_server.cue | 9 +--- .../components/sources/internal_logs.cue | 7 --- .../components/sources/internal_metrics.cue | 21 -------- .../reference/components/sources/journald.cue | 7 +-- .../reference/components/sources/kafka.cue | 6 --- .../components/sources/kubernetes_logs.cue | 5 -- .../reference/components/sources/logstash.cue | 2 - .../components/sources/mongodb_metrics.cue | 15 ++---- .../cue/reference/components/sources/nats.cue | 8 --- .../components/sources/nginx_metrics.cue | 9 ++-- .../components/sources/opentelemetry.cue | 8 --- .../components/sources/postgresql_metrics.cue | 11 ++-- .../sources/prometheus_remote_write.cue | 12 ++--- .../components/sources/prometheus_scrape.cue | 15 ++---- .../reference/components/sources/redis.cue | 4 -- .../reference/components/sources/socket.cue | 19 +++---- .../components/sources/splunk_hec.cue | 8 +-- .../reference/components/sources/statsd.cue | 11 ++-- .../reference/components/sources/stdin.cue | 7 +-- .../reference/components/sources/syslog.cue | 6 +-- .../reference/components/sources/vector.cue | 7 +-- .../cue/reference/components/transforms.cue | 4 +- .../components/transforms/dedupe.cue | 4 -- .../components/transforms/filter.cue | 4 -- .../components/transforms/log_to_metric.cue | 4 -- .../reference/components/transforms/lua.cue | 3 +- .../components/transforms/metric_to_log.cue | 4 -- .../reference/components/transforms/remap.cue | 4 -- .../components/transforms/sample.cue | 4 -- 111 files changed, 216 insertions(+), 649 deletions(-) create mode 100644 website/content/en/highlights/2023-07-05-0-31-0-upgrade-guide.md diff --git a/src/internal_events/batch.rs b/src/internal_events/batch.rs index 3bd9f6ab6eafd..1044d1cca82ab 100644 --- a/src/internal_events/batch.rs +++ b/src/internal_events/batch.rs @@ -14,8 +14,9 @@ pub struct LargeEventDroppedError { impl InternalEvent for LargeEventDroppedError { fn emit(self) { + let reason = "Event larger than batch max_bytes."; error!( - message = "Event larger than batch max_bytes.", + message = reason, batch_max_bytes = %self.max_length, length = %self.length, error_type = error_type::CONDITION_FAILED, @@ -28,14 +29,6 @@ impl InternalEvent for LargeEventDroppedError { "error_type" => error_type::CONDITION_FAILED, "stage" => error_stage::SENDING, ); - emit!(ComponentEventsDropped:: { - count: 1, - reason: "Event larger than batch max_bytes." - }); - // deprecated - counter!( - "events_discarded_total", 1, - "reason" => "oversized", - ); + emit!(ComponentEventsDropped:: { count: 1, reason }); } } diff --git a/src/internal_events/dedupe.rs b/src/internal_events/dedupe.rs index ef8525cb6f3fd..caf0dd9c204e1 100644 --- a/src/internal_events/dedupe.rs +++ b/src/internal_events/dedupe.rs @@ -1,5 +1,4 @@ use crate::emit; -use metrics::counter; use vector_core::internal_event::{ComponentEventsDropped, InternalEvent, INTENTIONAL}; #[derive(Debug)] @@ -13,6 +12,5 @@ impl InternalEvent for DedupeEventsDropped { count: self.count, reason: "Events have been found in cache for deduplication.", }); - counter!("events_discarded_total", self.count as u64); // Deprecated } } diff --git a/src/internal_events/filter.rs b/src/internal_events/filter.rs index 91df569bd1cdb..44d3c607f4f1f 100644 --- a/src/internal_events/filter.rs +++ b/src/internal_events/filter.rs @@ -1,4 +1,3 @@ -use metrics::{register_counter, Counter}; use vector_common::internal_event::{ComponentEventsDropped, Count, Registered, INTENTIONAL}; use crate::register; @@ -9,11 +8,9 @@ vector_common::registered_event! ( = register!(ComponentEventsDropped::::from( "Events matched filter condition." )), - events_discarded: Counter = register_counter!("events_discarded_total"), } fn emit(&self, data: Count) { self.events_dropped.emit(data); - self.events_discarded.increment(data.0 as u64); } ); diff --git a/src/internal_events/kafka.rs b/src/internal_events/kafka.rs index b17e04e6396b1..ab20ec0d47cde 100644 --- a/src/internal_events/kafka.rs +++ b/src/internal_events/kafka.rs @@ -104,8 +104,6 @@ impl InternalEvent for KafkaReadError { "error_type" => error_type::READER_FAILED, "stage" => error_stage::RECEIVING, ); - // deprecated - counter!("events_failed_total", 1); } } diff --git a/src/internal_events/loki.rs b/src/internal_events/loki.rs index 3d3da61786552..eff93d7a7a429 100644 --- a/src/internal_events/loki.rs +++ b/src/internal_events/loki.rs @@ -1,35 +1,58 @@ use crate::emit; use metrics::counter; +use vector_common::internal_event::{error_stage, error_type}; use vector_core::internal_event::{ComponentEventsDropped, InternalEvent, INTENTIONAL}; #[derive(Debug)] -pub struct LokiEventUnlabeled; +pub struct LokiEventUnlabeledError; -impl InternalEvent for LokiEventUnlabeled { +impl InternalEvent for LokiEventUnlabeledError { fn emit(self) { - // Deprecated - counter!("processing_errors_total", 1, - "error_type" => "unlabeled_event"); + error!( + message = "Event had no labels. Adding default `agent` label.", + error_code = "unlabeled_event", + error_type = error_type::CONDITION_FAILED, + stage = error_stage::PROCESSING, + internal_log_rate_limit = true, + ); + + counter!( + "component_errors_total", 1, + "error_code" => "unlabeled_event", + "error_type" => error_type::CONDITION_FAILED, + "stage" => error_stage::PROCESSING, + ); } } #[derive(Debug)] -pub struct LokiOutOfOrderEventDropped { +pub struct LokiOutOfOrderEventDroppedError { pub count: usize, } -impl InternalEvent for LokiOutOfOrderEventDropped { +impl InternalEvent for LokiOutOfOrderEventDroppedError { fn emit(self) { + let reason = "Dropping out-of-order event(s)."; + + error!( + message = reason, + error_code = "out_of_order", + error_type = error_type::CONDITION_FAILED, + stage = error_stage::PROCESSING, + internal_log_rate_limit = true, + ); + emit!(ComponentEventsDropped:: { count: self.count, - reason: "out_of_order", + reason, }); - // Deprecated - counter!("events_discarded_total", self.count as u64, - "reason" => "out_of_order"); - counter!("processing_errors_total", 1, - "error_type" => "out_of_order"); + counter!( + "component_errors_total", 1, + "error_code" => "out_of_order", + "error_type" => error_type::CONDITION_FAILED, + "stage" => error_stage::PROCESSING, + ); } } @@ -47,9 +70,5 @@ impl InternalEvent for LokiOutOfOrderEventRewritten { internal_log_rate_limit = true, ); counter!("rewritten_timestamp_events_total", self.count as u64); - - // Deprecated - counter!("processing_errors_total", 1, - "error_type" => "out_of_order"); } } diff --git a/src/internal_events/lua.rs b/src/internal_events/lua.rs index 54491c1602967..302e908d490c1 100644 --- a/src/internal_events/lua.rs +++ b/src/internal_events/lua.rs @@ -43,8 +43,6 @@ impl InternalEvent for LuaScriptError { count: 1, reason: "Error in lua script.", }); - // deprecated - counter!("processing_errors_total", 1); } } @@ -70,12 +68,6 @@ impl InternalEvent for LuaBuildError { "error_type" => error_type::SCRIPT_FAILED, "stage" => error_stage:: PROCESSING, ); - emit!(ComponentEventsDropped:: { - count: 1, - reason: "Error in lua build.", - }); - // deprecated - counter!("processing_errors_total", 1); emit!(ComponentEventsDropped:: { count: 1, reason }) } diff --git a/src/internal_events/metric_to_log.rs b/src/internal_events/metric_to_log.rs index fe75b1252353f..14463782d3adc 100644 --- a/src/internal_events/metric_to_log.rs +++ b/src/internal_events/metric_to_log.rs @@ -27,8 +27,6 @@ impl InternalEvent for MetricToLogSerializeError { "error_type" => error_type::ENCODER_FAILED, "stage" => error_stage::PROCESSING, ); - // deprecated - counter!("processing_errors_total", 1, "error_type" => "failed_serialize"); emit!(ComponentEventsDropped:: { count: 1, reason }) } diff --git a/src/internal_events/parser.rs b/src/internal_events/parser.rs index 739d33199202e..07826e3b6f155 100644 --- a/src/internal_events/parser.rs +++ b/src/internal_events/parser.rs @@ -42,8 +42,6 @@ impl InternalEvent for ParserMatchError<'_> { "error_type" => error_type::CONDITION_FAILED, "stage" => error_stage::PROCESSING, ); - // deprecated - counter!("processing_errors_total", 1, "error_type" => "failed_match"); } } @@ -75,8 +73,6 @@ impl InternalEvent for ParserMissingFieldError<'_, DROP_ "stage" => error_stage::PROCESSING, "field" => self.field.to_string(), ); - // deprecated - counter!("processing_errors_total", 1, "error_type" => "missing_field"); if DROP_EVENT { emit!(ComponentEventsDropped:: { count: 1, reason }); @@ -108,8 +104,6 @@ impl<'a> InternalEvent for ParserConversionError<'a> { "stage" => error_stage::PROCESSING, "name" => self.name.to_string(), ); - // deprecated - counter!("processing_errors_total", 1, "error_type" => "type_conversion_failed"); } } diff --git a/src/internal_events/remap.rs b/src/internal_events/remap.rs index f4889b3ecc95e..7a6bb5500cc30 100644 --- a/src/internal_events/remap.rs +++ b/src/internal_events/remap.rs @@ -34,8 +34,6 @@ impl InternalEvent for RemapMappingError { reason: "Mapping failed with event.", }); } - // deprecated - counter!("processing_errors_total", 1); } } diff --git a/src/internal_events/sample.rs b/src/internal_events/sample.rs index 04cb631801483..cded9a1a13343 100644 --- a/src/internal_events/sample.rs +++ b/src/internal_events/sample.rs @@ -1,5 +1,4 @@ use crate::emit; -use metrics::counter; use vector_core::internal_event::{ComponentEventsDropped, InternalEvent, INTENTIONAL}; #[derive(Debug)] @@ -7,7 +6,6 @@ pub struct SampleEventDiscarded; impl InternalEvent for SampleEventDiscarded { fn emit(self) { - counter!("events_discarded_total", 1); // Deprecated. emit!(ComponentEventsDropped:: { count: 1, reason: "Sample discarded." diff --git a/src/internal_events/sematext_metrics.rs b/src/internal_events/sematext_metrics.rs index 6d9c1dc155ef6..fea41b796c9f1 100644 --- a/src/internal_events/sematext_metrics.rs +++ b/src/internal_events/sematext_metrics.rs @@ -29,8 +29,6 @@ impl<'a> InternalEvent for SematextMetricsInvalidMetricError<'a> { "error_type" => error_type::ENCODER_FAILED, "stage" => error_stage::PROCESSING, ); - // deprecated - counter!("processing_errors_total", 1); emit!(ComponentEventsDropped:: { count: 1, reason }); } diff --git a/src/internal_events/statsd_sink.rs b/src/internal_events/statsd_sink.rs index 27e33fd0aa89b..20766f3376aad 100644 --- a/src/internal_events/statsd_sink.rs +++ b/src/internal_events/statsd_sink.rs @@ -31,8 +31,6 @@ impl<'a> InternalEvent for StatsdInvalidMetricError<'a> { "error_type" => error_type::ENCODER_FAILED, "stage" => error_stage::PROCESSING, ); - // deprecated - counter!("processing_errors_total", 1); emit!(ComponentEventsDropped:: { reason, count: 1 }); } diff --git a/src/internal_events/template.rs b/src/internal_events/template.rs index b1265d5b83924..549120d6e38bb 100644 --- a/src/internal_events/template.rs +++ b/src/internal_events/template.rs @@ -33,18 +33,11 @@ impl<'a> InternalEvent for TemplateRenderingError<'a> { "stage" => error_stage::PROCESSING, ); - // deprecated - counter!("processing_errors_total", 1, - "error_type" => "render_error"); - if self.drop_event { emit!(ComponentEventsDropped:: { count: 1, reason: "Failed to render template.", }); - - // deprecated - counter!("events_discarded_total", 1); } } } diff --git a/src/internal_events/throttle.rs b/src/internal_events/throttle.rs index 18f42e2f4a802..17ceeaac95590 100644 --- a/src/internal_events/throttle.rs +++ b/src/internal_events/throttle.rs @@ -9,7 +9,15 @@ pub(crate) struct ThrottleEventDiscarded { impl InternalEvent for ThrottleEventDiscarded { fn emit(self) { - debug!(message = "Rate limit exceeded.", key = ?self.key); // Deprecated. + // TODO: Technically, the Component Specification states that the discarded events metric + // must _only_ have the `intentional` tag, in addition to the core tags like + // `component_kind`, etc, and nothing else. + // + // That doesn't give us the leeway to specify which throttle bucket the events are being + // discarded for... but including the key/bucket as a tag does seem useful and so I wonder + // if we should change the specification wording? Sort of a similar situation to the + // `error_code` tag for the component errors metric, where it's meant to be optional and + // only specified when relevant. counter!( "events_discarded_total", 1, "key" => self.key, diff --git a/src/sinks/loki/sink.rs b/src/sinks/loki/sink.rs index 367a41ea8c6de..766526bb12bff 100644 --- a/src/sinks/loki/sink.rs +++ b/src/sinks/loki/sink.rs @@ -29,7 +29,7 @@ use crate::{ codecs::{Encoder, Transformer}, http::{get_http_scheme_from_uri, HttpClient}, internal_events::{ - LokiEventUnlabeled, LokiOutOfOrderEventDropped, LokiOutOfOrderEventRewritten, + LokiEventUnlabeledError, LokiOutOfOrderEventDroppedError, LokiOutOfOrderEventRewritten, SinkRequestBuildError, TemplateRenderingError, }, sinks::util::{ @@ -288,7 +288,7 @@ impl EventEncoder { // `{agent="vector"}` label. This can happen if the only // label is a templatable one but the event doesn't match. if labels.is_empty() { - emit!(LokiEventUnlabeled); + emit!(LokiEventUnlabeledError); labels = vec![("agent".to_string(), "vector".to_string())] } @@ -486,7 +486,7 @@ impl LokiSink { } Some((partition, result)) } else { - emit!(LokiOutOfOrderEventDropped { count: batch.len() }); + emit!(LokiOutOfOrderEventDroppedError { count: batch.len() }); None } }) diff --git a/website/content/en/highlights/2023-07-05-0-31-0-upgrade-guide.md b/website/content/en/highlights/2023-07-05-0-31-0-upgrade-guide.md new file mode 100644 index 0000000000000..fe42e31349445 --- /dev/null +++ b/website/content/en/highlights/2023-07-05-0-31-0-upgrade-guide.md @@ -0,0 +1,51 @@ +--- +date: "2023-07-05" +title: "0.31 Upgrade Guide" +description: "An upgrade guide that addresses breaking changes in 0.31.0" +authors: ["tobz"] +release: "0.31.0" +hide_on_release_notes: false +badges: + type: breaking change +--- + +Vector's 0.31.0 release includes **breaking changes**: + +1. [Removal of various deprecated internal metrics](#deprecated-internal-metrics) + +We cover them below to help you upgrade quickly: + +## Upgrade guide + +### Breaking changes + +#### Removal of various deprecated internal metrics {#deprecated-internal-metrics} + +Over the course of many of the previous releases, we've been working to deprecate the usage of older +internal metrics as we worked towards implementing full support for the [Component +Specification][component_spec], which dictates the basic metrics that all components, or the basic +metrics all components of a specific type, are expected to emit. + +We've made enough progress on this work that we've gone ahead and removed many of the deprecated +metrics from this release. First, below is a list of all metrics we've removed: + +- `events_in_total` (superceded by `component_received_events_total`) +- `events_out_total` (superceded by `component_sent_events_total`) +- `processed_bytes_total` (superceded by either `component_received_bytes_total` or + `component_sent_bytes_total`, more below) +- `processed_events_total` (superceded by either `component_received_events_total` or + `component_sent_events_total`, more below) +- `processing_errors_total` (superceded by `component_errors_total`) +- `events_failed_total` (superceded by `component_errors_total`) + +Most of the removals have straightforward replacements, but the `processed_`-prefixed metrics +involve a small amount of logic. For **sources**, `processed_bytes_total` is superceded by +`component_received_bytes_total`, and `processed_events_total` is superceded by +`component_received_events_total`. For **sinks**, `processed_bytes_total` is superceded by +`component_sent_bytes_total`, and `processed_events_total` is superceded by +`component_sent_events_total`. + +A small note is that a small number of components still emit some of these metrics, as they provided +additional tags and information that is disallowed by the Component Specification. They will be +removed in a future version once we can rectify those discrepancies, but they are effectively +removed as of this release: you cannot depend on them still existing. diff --git a/website/cue/reference/components/sinks.cue b/website/cue/reference/components/sinks.cue index d3df682927263..6223a396b3859 100644 --- a/website/cue/reference/components/sinks.cue +++ b/website/cue/reference/components/sinks.cue @@ -652,16 +652,21 @@ components: sinks: [Name=string]: { } telemetry: metrics: { - component_received_events_count: components.sources.internal_metrics.output.metrics.component_received_events_count - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - utilization: components.sources.internal_metrics.output.metrics.utilization buffer_byte_size: components.sources.internal_metrics.output.metrics.buffer_byte_size + buffer_discarded_events_total: components.sources.internal_metrics.output.metrics.buffer_discarded_events_total buffer_events: components.sources.internal_metrics.output.metrics.buffer_events buffer_received_events_total: components.sources.internal_metrics.output.metrics.buffer_received_events_total buffer_received_event_bytes_total: components.sources.internal_metrics.output.metrics.buffer_received_event_bytes_total buffer_sent_events_total: components.sources.internal_metrics.output.metrics.buffer_sent_events_total buffer_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.buffer_sent_event_bytes_total - buffer_discarded_events_total: components.sources.internal_metrics.output.metrics.buffer_discarded_events_total + component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total + component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total + component_received_events_count: components.sources.internal_metrics.output.metrics.component_received_events_count + component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total + component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total + component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total + component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total + component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total + utilization: components.sources.internal_metrics.output.metrics.utilization } } diff --git a/website/cue/reference/components/sinks/amqp.cue b/website/cue/reference/components/sinks/amqp.cue index cc26da0c72376..cda263626ac75 100644 --- a/website/cue/reference/components/sinks/amqp.cue +++ b/website/cue/reference/components/sinks/amqp.cue @@ -59,9 +59,4 @@ components: sinks: amqp: { } how_it_works: components._amqp.how_it_works - - telemetry: metrics: { - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/aws_cloudwatch_logs.cue b/website/cue/reference/components/sinks/aws_cloudwatch_logs.cue index 91ae314bc6907..9daf79982787a 100644 --- a/website/cue/reference/components/sinks/aws_cloudwatch_logs.cue +++ b/website/cue/reference/components/sinks/aws_cloudwatch_logs.cue @@ -110,11 +110,4 @@ components: sinks: aws_cloudwatch_logs: components._aws & { ] }, ] - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/aws_cloudwatch_metrics.cue b/website/cue/reference/components/sinks/aws_cloudwatch_metrics.cue index a2b14db4ff7ee..67270c3bb65a2 100644 --- a/website/cue/reference/components/sinks/aws_cloudwatch_metrics.cue +++ b/website/cue/reference/components/sinks/aws_cloudwatch_metrics.cue @@ -114,9 +114,4 @@ components: sinks: aws_cloudwatch_metrics: components._aws & { ] }, ] - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/aws_kinesis_firehose.cue b/website/cue/reference/components/sinks/aws_kinesis_firehose.cue index 3e650e06a2e05..b06766ee167cc 100644 --- a/website/cue/reference/components/sinks/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sinks/aws_kinesis_firehose.cue @@ -99,9 +99,4 @@ components: sinks: aws_kinesis_firehose: components._aws & { ] }, ] - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/aws_kinesis_streams.cue b/website/cue/reference/components/sinks/aws_kinesis_streams.cue index c980d6df5de6c..75bfa684f8863 100644 --- a/website/cue/reference/components/sinks/aws_kinesis_streams.cue +++ b/website/cue/reference/components/sinks/aws_kinesis_streams.cue @@ -141,10 +141,4 @@ components: sinks: aws_kinesis_streams: components._aws & { ] }, ] - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - } } diff --git a/website/cue/reference/components/sinks/aws_s3.cue b/website/cue/reference/components/sinks/aws_s3.cue index 97eae03fb1a19..3953ac53ab9d9 100644 --- a/website/cue/reference/components/sinks/aws_s3.cue +++ b/website/cue/reference/components/sinks/aws_s3.cue @@ -216,12 +216,4 @@ components: sinks: aws_s3: components._aws & { ] }, ] - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/aws_sqs.cue b/website/cue/reference/components/sinks/aws_sqs.cue index 4d3d40a303735..8d097eb579885 100644 --- a/website/cue/reference/components/sinks/aws_sqs.cue +++ b/website/cue/reference/components/sinks/aws_sqs.cue @@ -93,11 +93,4 @@ components: sinks: aws_sqs: components._aws & { ] }, ] - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/axiom.cue b/website/cue/reference/components/sinks/axiom.cue index 631c3876fc848..d34bae1612a6a 100644 --- a/website/cue/reference/components/sinks/axiom.cue +++ b/website/cue/reference/components/sinks/axiom.cue @@ -90,12 +90,4 @@ components: sinks: axiom: { """ } } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/azure_blob.cue b/website/cue/reference/components/sinks/azure_blob.cue index 82c701969415f..e3e9d6f1a2b50 100644 --- a/website/cue/reference/components/sinks/azure_blob.cue +++ b/website/cue/reference/components/sinks/azure_blob.cue @@ -125,11 +125,7 @@ components: sinks: azure_blob: { } telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total - http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total + http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total + http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total } } diff --git a/website/cue/reference/components/sinks/azure_monitor_logs.cue b/website/cue/reference/components/sinks/azure_monitor_logs.cue index a5e0949c5a596..5b98b65bfc414 100644 --- a/website/cue/reference/components/sinks/azure_monitor_logs.cue +++ b/website/cue/reference/components/sinks/azure_monitor_logs.cue @@ -68,10 +68,4 @@ components: sinks: azure_monitor_logs: { metrics: null traces: false } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/blackhole.cue b/website/cue/reference/components/sinks/blackhole.cue index 74d0945a6ac8a..4952e8fa78f42 100644 --- a/website/cue/reference/components/sinks/blackhole.cue +++ b/website/cue/reference/components/sinks/blackhole.cue @@ -44,7 +44,4 @@ components: sinks: blackhole: { } traces: true } - - telemetry: metrics: { - } } diff --git a/website/cue/reference/components/sinks/clickhouse.cue b/website/cue/reference/components/sinks/clickhouse.cue index 1f3e48fcb1219..e20d656717460 100644 --- a/website/cue/reference/components/sinks/clickhouse.cue +++ b/website/cue/reference/components/sinks/clickhouse.cue @@ -80,10 +80,4 @@ components: sinks: clickhouse: { metrics: null traces: false } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/console.cue b/website/cue/reference/components/sinks/console.cue index c0aa859bd9176..521bd14a4e569 100644 --- a/website/cue/reference/components/sinks/console.cue +++ b/website/cue/reference/components/sinks/console.cue @@ -55,8 +55,4 @@ components: sinks: console: { } traces: true } - - telemetry: metrics: { - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/databend.cue b/website/cue/reference/components/sinks/databend.cue index a9bfab6a27f37..edb5147d49b77 100644 --- a/website/cue/reference/components/sinks/databend.cue +++ b/website/cue/reference/components/sinks/databend.cue @@ -101,10 +101,4 @@ components: sinks: databend: { """ } } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/datadog_archives.cue b/website/cue/reference/components/sinks/datadog_archives.cue index be5c7220da1b9..9fce2f2901e1e 100644 --- a/website/cue/reference/components/sinks/datadog_archives.cue +++ b/website/cue/reference/components/sinks/datadog_archives.cue @@ -245,12 +245,4 @@ components: sinks: datadog_archives: { ] }, ] - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/datadog_events.cue b/website/cue/reference/components/sinks/datadog_events.cue index 5fa9cf1ef18aa..8dee616beaed9 100644 --- a/website/cue/reference/components/sinks/datadog_events.cue +++ b/website/cue/reference/components/sinks/datadog_events.cue @@ -52,10 +52,4 @@ components: sinks: datadog_events: { metrics: null traces: false } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/datadog_metrics.cue b/website/cue/reference/components/sinks/datadog_metrics.cue index 82e1a8625a7c1..f73c0bfe270e5 100644 --- a/website/cue/reference/components/sinks/datadog_metrics.cue +++ b/website/cue/reference/components/sinks/datadog_metrics.cue @@ -71,9 +71,4 @@ components: sinks: datadog_metrics: { } traces: false } - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/datadog_traces.cue b/website/cue/reference/components/sinks/datadog_traces.cue index 7caf7619b4354..d37e9087270f1 100644 --- a/website/cue/reference/components/sinks/datadog_traces.cue +++ b/website/cue/reference/components/sinks/datadog_traces.cue @@ -75,11 +75,4 @@ components: sinks: datadog_traces: { metrics: null traces: true } - - telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/elasticsearch.cue b/website/cue/reference/components/sinks/elasticsearch.cue index 6768159b643c5..18d7ab204bb01 100644 --- a/website/cue/reference/components/sinks/elasticsearch.cue +++ b/website/cue/reference/components/sinks/elasticsearch.cue @@ -134,12 +134,4 @@ components: sinks: elasticsearch: { aws_authentication: components._aws.how_it_works.aws_authentication } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/file.cue b/website/cue/reference/components/sinks/file.cue index ee8051bcffdf6..2749e76efdd3a 100644 --- a/website/cue/reference/components/sinks/file.cue +++ b/website/cue/reference/components/sinks/file.cue @@ -74,12 +74,4 @@ components: sinks: file: { """ } } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/gcp_chronicle_unstructured.cue b/website/cue/reference/components/sinks/gcp_chronicle_unstructured.cue index ae01156f7e8a8..bd3f3318d008b 100644 --- a/website/cue/reference/components/sinks/gcp_chronicle_unstructured.cue +++ b/website/cue/reference/components/sinks/gcp_chronicle_unstructured.cue @@ -79,11 +79,4 @@ components: sinks: gcp_chronicle_unstructured: { how_it_works: { } - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/gcp_cloud_storage.cue b/website/cue/reference/components/sinks/gcp_cloud_storage.cue index b58e3edb2fee0..6e0cebd455b96 100644 --- a/website/cue/reference/components/sinks/gcp_cloud_storage.cue +++ b/website/cue/reference/components/sinks/gcp_cloud_storage.cue @@ -179,11 +179,4 @@ components: sinks: gcp_cloud_storage: { ] }, ] - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/gcp_pubsub.cue b/website/cue/reference/components/sinks/gcp_pubsub.cue index 6d0307d430534..3a789a3bd6bb5 100644 --- a/website/cue/reference/components/sinks/gcp_pubsub.cue +++ b/website/cue/reference/components/sinks/gcp_pubsub.cue @@ -93,10 +93,4 @@ components: sinks: gcp_pubsub: { ] }, ] - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/gcp_stackdriver_logs.cue b/website/cue/reference/components/sinks/gcp_stackdriver_logs.cue index 632dcdeddb624..c7194dd1344fa 100644 --- a/website/cue/reference/components/sinks/gcp_stackdriver_logs.cue +++ b/website/cue/reference/components/sinks/gcp_stackdriver_logs.cue @@ -115,10 +115,4 @@ components: sinks: gcp_stackdriver_logs: { ] }, ] - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/gcp_stackdriver_metrics.cue b/website/cue/reference/components/sinks/gcp_stackdriver_metrics.cue index 93200c3f9f0d1..1db83b33a69f5 100644 --- a/website/cue/reference/components/sinks/gcp_stackdriver_metrics.cue +++ b/website/cue/reference/components/sinks/gcp_stackdriver_metrics.cue @@ -103,10 +103,4 @@ components: sinks: gcp_stackdriver_metrics: { ] }, ] - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/honeycomb.cue b/website/cue/reference/components/sinks/honeycomb.cue index 2fa680df67eaa..036fcf7315e67 100644 --- a/website/cue/reference/components/sinks/honeycomb.cue +++ b/website/cue/reference/components/sinks/honeycomb.cue @@ -77,10 +77,4 @@ components: sinks: honeycomb: { """ } } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/http.cue b/website/cue/reference/components/sinks/http.cue index bb626d846f88d..c133e9623853a 100644 --- a/website/cue/reference/components/sinks/http.cue +++ b/website/cue/reference/components/sinks/http.cue @@ -93,10 +93,6 @@ components: sinks: http: { } telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - http_bad_requests_total: components.sources.internal_metrics.output.metrics.http_bad_requests_total + http_bad_requests_total: components.sources.internal_metrics.output.metrics.http_bad_requests_total } } diff --git a/website/cue/reference/components/sinks/humio.cue b/website/cue/reference/components/sinks/humio.cue index 3b7f6101d6c94..137d357175976 100644 --- a/website/cue/reference/components/sinks/humio.cue +++ b/website/cue/reference/components/sinks/humio.cue @@ -150,10 +150,4 @@ components: sinks: _humio: { } } } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/humio_logs.cue b/website/cue/reference/components/sinks/humio_logs.cue index 93330822e9bfe..7d79bbf25b650 100644 --- a/website/cue/reference/components/sinks/humio_logs.cue +++ b/website/cue/reference/components/sinks/humio_logs.cue @@ -15,7 +15,6 @@ components: sinks: humio_logs: { features: sinks._humio.features support: sinks._humio.support configuration: base.components.sinks.humio_logs.configuration - telemetry: sinks._humio.telemetry input: { logs: true diff --git a/website/cue/reference/components/sinks/humio_metrics.cue b/website/cue/reference/components/sinks/humio_metrics.cue index 705959abab75f..d24ee3af92874 100644 --- a/website/cue/reference/components/sinks/humio_metrics.cue +++ b/website/cue/reference/components/sinks/humio_metrics.cue @@ -7,7 +7,6 @@ components: sinks: humio_metrics: { features: sinks._humio.features support: sinks._humio.support configuration: base.components.sinks.humio_metrics.configuration - telemetry: sinks._humio.telemetry input: { logs: false diff --git a/website/cue/reference/components/sinks/influxdb_logs.cue b/website/cue/reference/components/sinks/influxdb_logs.cue index 13fa0182145a9..3f5ef8d457a9f 100644 --- a/website/cue/reference/components/sinks/influxdb_logs.cue +++ b/website/cue/reference/components/sinks/influxdb_logs.cue @@ -97,10 +97,4 @@ components: sinks: influxdb_logs: { ] } } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/influxdb_metrics.cue b/website/cue/reference/components/sinks/influxdb_metrics.cue index 5cc34b6f071d0..2f77a92ea1c12 100644 --- a/website/cue/reference/components/sinks/influxdb_metrics.cue +++ b/website/cue/reference/components/sinks/influxdb_metrics.cue @@ -189,9 +189,4 @@ components: sinks: influxdb_metrics: { output: "\(_name),metric_type=summary,host=\(_host) count=6i,quantile_0.01=1.5,quantile_0.5=2,quantile_0.99=3,sum=12.1 1542182950000000011" }, ] - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/kafka.cue b/website/cue/reference/components/sinks/kafka.cue index 37cd3c2af0bfd..cfd2a9e8ccd64 100644 --- a/website/cue/reference/components/sinks/kafka.cue +++ b/website/cue/reference/components/sinks/kafka.cue @@ -69,11 +69,6 @@ components: sinks: kafka: { how_it_works: components._kafka.how_it_works telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total kafka_queue_messages: components.sources.internal_metrics.output.metrics.kafka_queue_messages kafka_queue_messages_bytes: components.sources.internal_metrics.output.metrics.kafka_queue_messages_bytes kafka_requests_total: components.sources.internal_metrics.output.metrics.kafka_requests_total diff --git a/website/cue/reference/components/sinks/loki.cue b/website/cue/reference/components/sinks/loki.cue index 057df8af0b2d4..6f341bbde7998 100644 --- a/website/cue/reference/components/sinks/loki.cue +++ b/website/cue/reference/components/sinks/loki.cue @@ -159,11 +159,6 @@ components: sinks: loki: { } telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - streams_total: components.sources.internal_metrics.output.metrics.streams_total + streams_total: components.sources.internal_metrics.output.metrics.streams_total } } diff --git a/website/cue/reference/components/sinks/mezmo.cue b/website/cue/reference/components/sinks/mezmo.cue index 6a284c4b4f001..0672c321343a6 100644 --- a/website/cue/reference/components/sinks/mezmo.cue +++ b/website/cue/reference/components/sinks/mezmo.cue @@ -62,12 +62,4 @@ components: sinks: mezmo: { metrics: null traces: false } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/nats.cue b/website/cue/reference/components/sinks/nats.cue index 494bd131f85e7..0d5eb18b1323e 100644 --- a/website/cue/reference/components/sinks/nats.cue +++ b/website/cue/reference/components/sinks/nats.cue @@ -64,8 +64,6 @@ components: sinks: nats: { how_it_works: components._nats.how_it_works telemetry: metrics: { - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - send_errors_total: components.sources.internal_metrics.output.metrics.send_errors_total + send_errors_total: components.sources.internal_metrics.output.metrics.send_errors_total } } diff --git a/website/cue/reference/components/sinks/prometheus_remote_write.cue b/website/cue/reference/components/sinks/prometheus_remote_write.cue index a7731b268f7c5..c384571aef6f2 100644 --- a/website/cue/reference/components/sinks/prometheus_remote_write.cue +++ b/website/cue/reference/components/sinks/prometheus_remote_write.cue @@ -101,10 +101,4 @@ components: sinks: prometheus_remote_write: { """ } } - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/pulsar.cue b/website/cue/reference/components/sinks/pulsar.cue index bf910a2a88bc8..27ea75379891b 100644 --- a/website/cue/reference/components/sinks/pulsar.cue +++ b/website/cue/reference/components/sinks/pulsar.cue @@ -65,8 +65,6 @@ components: sinks: pulsar: { } telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - encode_errors_total: components.sources.internal_metrics.output.metrics.encode_errors_total + encode_errors_total: components.sources.internal_metrics.output.metrics.encode_errors_total } } diff --git a/website/cue/reference/components/sinks/redis.cue b/website/cue/reference/components/sinks/redis.cue index 6f4e44cc1a386..cc9f9ab4f8152 100644 --- a/website/cue/reference/components/sinks/redis.cue +++ b/website/cue/reference/components/sinks/redis.cue @@ -75,8 +75,6 @@ components: sinks: redis: { } telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - send_errors_total: components.sources.internal_metrics.output.metrics.send_errors_total + send_errors_total: components.sources.internal_metrics.output.metrics.send_errors_total } } diff --git a/website/cue/reference/components/sinks/sematext_metrics.cue b/website/cue/reference/components/sinks/sematext_metrics.cue index 1ec54a088fa01..6f614d7a8c293 100644 --- a/website/cue/reference/components/sinks/sematext_metrics.cue +++ b/website/cue/reference/components/sinks/sematext_metrics.cue @@ -64,9 +64,6 @@ components: sinks: sematext_metrics: { } telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - encode_errors_total: components.sources.internal_metrics.output.metrics.encode_errors_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total + encode_errors_total: components.sources.internal_metrics.output.metrics.encode_errors_total } } diff --git a/website/cue/reference/components/sinks/splunk_hec_logs.cue b/website/cue/reference/components/sinks/splunk_hec_logs.cue index c3915ec5be422..6ecfe55b34484 100644 --- a/website/cue/reference/components/sinks/splunk_hec_logs.cue +++ b/website/cue/reference/components/sinks/splunk_hec_logs.cue @@ -82,13 +82,8 @@ components: sinks: splunk_hec_logs: { } telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total - requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total + http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total + requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total } how_it_works: sinks._splunk_hec.how_it_works diff --git a/website/cue/reference/components/sinks/statsd.cue b/website/cue/reference/components/sinks/statsd.cue index b023e5982e893..5d5678d6ab8a6 100644 --- a/website/cue/reference/components/sinks/statsd.cue +++ b/website/cue/reference/components/sinks/statsd.cue @@ -48,10 +48,4 @@ components: sinks: statsd: { } configuration: base.components.sinks.statsd.configuration - - telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sinks/vector.cue b/website/cue/reference/components/sinks/vector.cue index 922836ce15484..217b44c23222e 100644 --- a/website/cue/reference/components/sinks/vector.cue +++ b/website/cue/reference/components/sinks/vector.cue @@ -80,9 +80,6 @@ components: sinks: vector: { how_it_works: components.sources.vector.how_it_works telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - protobuf_decode_errors_total: components.sources.internal_metrics.output.metrics.protobuf_decode_errors_total + protobuf_decode_errors_total: components.sources.internal_metrics.output.metrics.protobuf_decode_errors_total } } diff --git a/website/cue/reference/components/sinks/webhdfs.cue b/website/cue/reference/components/sinks/webhdfs.cue index ee414d94b03c2..be5604ef11378 100644 --- a/website/cue/reference/components/sinks/webhdfs.cue +++ b/website/cue/reference/components/sinks/webhdfs.cue @@ -50,10 +50,4 @@ components: sinks: webhdfs: { metrics: null traces: false } - - telemetry: metrics: { - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - } } diff --git a/website/cue/reference/components/sinks/websocket.cue b/website/cue/reference/components/sinks/websocket.cue index 21d5d7efce561..f38a55091b4e9 100644 --- a/website/cue/reference/components/sinks/websocket.cue +++ b/website/cue/reference/components/sinks/websocket.cue @@ -73,13 +73,10 @@ components: sinks: websocket: { } telemetry: metrics: { - open_connections: components.sources.internal_metrics.output.metrics.open_connections - connection_established_total: components.sources.internal_metrics.output.metrics.connection_established_total - connection_failed_total: components.sources.internal_metrics.output.metrics.connection_failed_total - connection_shutdown_total: components.sources.internal_metrics.output.metrics.connection_shutdown_total - connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total - component_sent_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total + open_connections: components.sources.internal_metrics.output.metrics.open_connections + connection_established_total: components.sources.internal_metrics.output.metrics.connection_established_total + connection_failed_total: components.sources.internal_metrics.output.metrics.connection_failed_total + connection_shutdown_total: components.sources.internal_metrics.output.metrics.connection_shutdown_total + connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total } } diff --git a/website/cue/reference/components/sources.cue b/website/cue/reference/components/sources.cue index 659753d81dc57..9e3682067ee26 100644 --- a/website/cue/reference/components/sources.cue +++ b/website/cue/reference/components/sources.cue @@ -401,8 +401,14 @@ components: sources: [Name=string]: { } telemetry: metrics: { - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - source_lag_time_seconds: components.sources.internal_metrics.output.metrics.source_lag_time_seconds + component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total + component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total + component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total + component_received_events_count: components.sources.internal_metrics.output.metrics.component_received_events_count + component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total + component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total + component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total + component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total + source_lag_time_seconds: components.sources.internal_metrics.output.metrics.source_lag_time_seconds } } diff --git a/website/cue/reference/components/sources/amqp.cue b/website/cue/reference/components/sources/amqp.cue index b2647cbbad352..18da792ae7f8a 100644 --- a/website/cue/reference/components/sources/amqp.cue +++ b/website/cue/reference/components/sources/amqp.cue @@ -79,7 +79,6 @@ components: sources: amqp: { telemetry: metrics: { consumer_offset_updates_failed_total: components.sources.internal_metrics.output.metrics.consumer_offset_updates_failed_total - events_failed_total: components.sources.internal_metrics.output.metrics.events_failed_total } how_it_works: components._amqp.how_it_works diff --git a/website/cue/reference/components/sources/apache_metrics.cue b/website/cue/reference/components/sources/apache_metrics.cue index f39a895f79a77..77294ea5c4984 100644 --- a/website/cue/reference/components/sources/apache_metrics.cue +++ b/website/cue/reference/components/sources/apache_metrics.cue @@ -161,15 +161,10 @@ components: sources: apache_metrics: { how_it_works: {} telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total - http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total - parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total - request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds + http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total + http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total + parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total + requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total + request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds } } diff --git a/website/cue/reference/components/sources/aws_ecs_metrics.cue b/website/cue/reference/components/sources/aws_ecs_metrics.cue index 7eb00540a88e3..53586655b7683 100644 --- a/website/cue/reference/components/sources/aws_ecs_metrics.cue +++ b/website/cue/reference/components/sources/aws_ecs_metrics.cue @@ -185,17 +185,10 @@ components: sources: aws_ecs_metrics: { } telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total - http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total - parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total - request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds + http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total + http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total + parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total + requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total + request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds } } diff --git a/website/cue/reference/components/sources/aws_kinesis_firehose.cue b/website/cue/reference/components/sources/aws_kinesis_firehose.cue index 7d9997685b41d..22adc0ecb28e4 100644 --- a/website/cue/reference/components/sources/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sources/aws_kinesis_firehose.cue @@ -187,13 +187,6 @@ components: sources: aws_kinesis_firehose: { } telemetry: metrics: { - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total - component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total request_read_errors_total: components.sources.internal_metrics.output.metrics.request_read_errors_total requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total request_automatic_decode_errors_total: components.sources.internal_metrics.output.metrics.request_automatic_decode_errors_total diff --git a/website/cue/reference/components/sources/aws_s3.cue b/website/cue/reference/components/sources/aws_s3.cue index d8316999b521f..200027bdaa32e 100644 --- a/website/cue/reference/components/sources/aws_s3.cue +++ b/website/cue/reference/components/sources/aws_s3.cue @@ -162,11 +162,6 @@ components: sources: aws_s3: components._aws & { ] telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total sqs_message_delete_failed_total: components.sources.internal_metrics.output.metrics.sqs_message_delete_failed_total sqs_message_delete_succeeded_total: components.sources.internal_metrics.output.metrics.sqs_message_delete_succeeded_total sqs_message_processing_failed_total: components.sources.internal_metrics.output.metrics.sqs_message_processing_failed_total diff --git a/website/cue/reference/components/sources/aws_sqs.cue b/website/cue/reference/components/sources/aws_sqs.cue index c19f9f994aecd..b0e7980a7c108 100644 --- a/website/cue/reference/components/sources/aws_sqs.cue +++ b/website/cue/reference/components/sources/aws_sqs.cue @@ -83,10 +83,7 @@ components: sources: aws_sqs: components._aws & { } telemetry: metrics: { - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - sqs_message_delete_failed_total: components.sources.internal_metrics.output.metrics.sqs_message_delete_failed_total + sqs_message_delete_failed_total: components.sources.internal_metrics.output.metrics.sqs_message_delete_failed_total } how_it_works: { diff --git a/website/cue/reference/components/sources/datadog_agent.cue b/website/cue/reference/components/sources/datadog_agent.cue index e83edf69d723d..88e5cd9cd429f 100644 --- a/website/cue/reference/components/sources/datadog_agent.cue +++ b/website/cue/reference/components/sources/datadog_agent.cue @@ -217,12 +217,4 @@ components: sources: datadog_agent: { """ } } - - telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - } } diff --git a/website/cue/reference/components/sources/demo_logs.cue b/website/cue/reference/components/sources/demo_logs.cue index e7b506a9edb51..5ad1811a3bebd 100644 --- a/website/cue/reference/components/sources/demo_logs.cue +++ b/website/cue/reference/components/sources/demo_logs.cue @@ -55,12 +55,4 @@ components: sources: demo_logs: { } } } - - telemetry: metrics: { - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - } } diff --git a/website/cue/reference/components/sources/dnstap.cue b/website/cue/reference/components/sources/dnstap.cue index aa38de8f1f711..d05fb84a51758 100644 --- a/website/cue/reference/components/sources/dnstap.cue +++ b/website/cue/reference/components/sources/dnstap.cue @@ -1171,9 +1171,6 @@ components: sources: dnstap: { } telemetry: metrics: { - parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total + parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total } } diff --git a/website/cue/reference/components/sources/docker_logs.cue b/website/cue/reference/components/sources/docker_logs.cue index f9786bce712af..172da36fa2f6a 100644 --- a/website/cue/reference/components/sources/docker_logs.cue +++ b/website/cue/reference/components/sources/docker_logs.cue @@ -214,10 +214,5 @@ components: sources: docker_logs: { containers_unwatched_total: components.sources.internal_metrics.output.metrics.containers_unwatched_total containers_watched_total: components.sources.internal_metrics.output.metrics.containers_watched_total logging_driver_errors_total: components.sources.internal_metrics.output.metrics.logging_driver_errors_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total } } diff --git a/website/cue/reference/components/sources/eventstoredb_metrics.cue b/website/cue/reference/components/sources/eventstoredb_metrics.cue index 5421f4d94624d..b0b5da4e93827 100644 --- a/website/cue/reference/components/sources/eventstoredb_metrics.cue +++ b/website/cue/reference/components/sources/eventstoredb_metrics.cue @@ -116,11 +116,7 @@ components: sources: eventstoredb_metrics: { } } telemetry: metrics: { - http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total - parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total + http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total + parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total } } diff --git a/website/cue/reference/components/sources/exec.cue b/website/cue/reference/components/sources/exec.cue index 0bf792c886781..6c37b5b313897 100644 --- a/website/cue/reference/components/sources/exec.cue +++ b/website/cue/reference/components/sources/exec.cue @@ -126,12 +126,7 @@ components: sources: exec: { } telemetry: metrics: { - command_executed_total: components.sources.internal_metrics.output.metrics.command_executed_total - command_execution_duration_seconds: components.sources.internal_metrics.output.metrics.command_execution_duration_seconds - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total + command_executed_total: components.sources.internal_metrics.output.metrics.command_executed_total + command_execution_duration_seconds: components.sources.internal_metrics.output.metrics.command_execution_duration_seconds } } diff --git a/website/cue/reference/components/sources/file.cue b/website/cue/reference/components/sources/file.cue index 61564ea8d1850..b53395ee5f900 100644 --- a/website/cue/reference/components/sources/file.cue +++ b/website/cue/reference/components/sources/file.cue @@ -423,20 +423,16 @@ components: sources: file: { } telemetry: metrics: { - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - checkpoint_write_errors_total: components.sources.internal_metrics.output.metrics.checkpoint_write_errors_total - checkpoints_total: components.sources.internal_metrics.output.metrics.checkpoints_total - checksum_errors_total: components.sources.internal_metrics.output.metrics.checksum_errors_total - file_delete_errors_total: components.sources.internal_metrics.output.metrics.file_delete_errors_total - file_watch_errors_total: components.sources.internal_metrics.output.metrics.file_watch_errors_total - files_added_total: components.sources.internal_metrics.output.metrics.files_added_total - files_deleted_total: components.sources.internal_metrics.output.metrics.files_deleted_total - files_resumed_total: components.sources.internal_metrics.output.metrics.files_resumed_total - files_unwatched_total: components.sources.internal_metrics.output.metrics.files_unwatched_total - fingerprint_read_errors_total: components.sources.internal_metrics.output.metrics.fingerprint_read_errors_total - glob_errors_total: components.sources.internal_metrics.output.metrics.glob_errors_total + checkpoint_write_errors_total: components.sources.internal_metrics.output.metrics.checkpoint_write_errors_total + checkpoints_total: components.sources.internal_metrics.output.metrics.checkpoints_total + checksum_errors_total: components.sources.internal_metrics.output.metrics.checksum_errors_total + file_delete_errors_total: components.sources.internal_metrics.output.metrics.file_delete_errors_total + file_watch_errors_total: components.sources.internal_metrics.output.metrics.file_watch_errors_total + files_added_total: components.sources.internal_metrics.output.metrics.files_added_total + files_deleted_total: components.sources.internal_metrics.output.metrics.files_deleted_total + files_resumed_total: components.sources.internal_metrics.output.metrics.files_resumed_total + files_unwatched_total: components.sources.internal_metrics.output.metrics.files_unwatched_total + fingerprint_read_errors_total: components.sources.internal_metrics.output.metrics.fingerprint_read_errors_total + glob_errors_total: components.sources.internal_metrics.output.metrics.glob_errors_total } } diff --git a/website/cue/reference/components/sources/file_descriptor.cue b/website/cue/reference/components/sources/file_descriptor.cue index 0a54a4b67251c..ee86f0063e24e 100644 --- a/website/cue/reference/components/sources/file_descriptor.cue +++ b/website/cue/reference/components/sources/file_descriptor.cue @@ -77,12 +77,4 @@ components: sources: file_descriptor: { """ } } - - telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - } } diff --git a/website/cue/reference/components/sources/fluent.cue b/website/cue/reference/components/sources/fluent.cue index a8492e1adcbdf..f5350c7cdebcd 100644 --- a/website/cue/reference/components/sources/fluent.cue +++ b/website/cue/reference/components/sources/fluent.cue @@ -177,8 +177,6 @@ components: sources: fluent: { } telemetry: metrics: { - decode_errors_total: components.sources.internal_metrics.output.metrics.decode_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total + decode_errors_total: components.sources.internal_metrics.output.metrics.decode_errors_total } } diff --git a/website/cue/reference/components/sources/gcp_pubsub.cue b/website/cue/reference/components/sources/gcp_pubsub.cue index 53c3b1d8c3066..3ff488347179f 100644 --- a/website/cue/reference/components/sources/gcp_pubsub.cue +++ b/website/cue/reference/components/sources/gcp_pubsub.cue @@ -99,12 +99,6 @@ components: sources: gcp_pubsub: { } } - telemetry: metrics: { - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - } - how_it_works: { gcp_pubsub: { title: "GCP Pub/Sub" diff --git a/website/cue/reference/components/sources/heroku_logs.cue b/website/cue/reference/components/sources/heroku_logs.cue index 1741703745193..038fe51b8164b 100644 --- a/website/cue/reference/components/sources/heroku_logs.cue +++ b/website/cue/reference/components/sources/heroku_logs.cue @@ -101,11 +101,7 @@ components: sources: heroku_logs: { } telemetry: metrics: { - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - request_read_errors_total: components.sources.internal_metrics.output.metrics.request_read_errors_total - requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total + request_read_errors_total: components.sources.internal_metrics.output.metrics.request_read_errors_total + requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total } } diff --git a/website/cue/reference/components/sources/host_metrics.cue b/website/cue/reference/components/sources/host_metrics.cue index 0018e403b7117..246286d5763f4 100644 --- a/website/cue/reference/components/sources/host_metrics.cue +++ b/website/cue/reference/components/sources/host_metrics.cue @@ -257,12 +257,4 @@ components: sources: host_metrics: { } _network_nomac: _network_gauge & {relevant_when: "OS is not macOS"} } - - telemetry: metrics: { - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - } } diff --git a/website/cue/reference/components/sources/http_client.cue b/website/cue/reference/components/sources/http_client.cue index 304e18070743f..cbd071a85a998 100644 --- a/website/cue/reference/components/sources/http_client.cue +++ b/website/cue/reference/components/sources/http_client.cue @@ -139,15 +139,10 @@ components: sources: http_client: { } telemetry: metrics: { - http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total - http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total - parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total - request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds + http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total + http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total + parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total + requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total + request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds } } diff --git a/website/cue/reference/components/sources/http_server.cue b/website/cue/reference/components/sources/http_server.cue index 317de8954e618..87055622b5ec7 100644 --- a/website/cue/reference/components/sources/http_server.cue +++ b/website/cue/reference/components/sources/http_server.cue @@ -178,13 +178,8 @@ components: sources: http_server: { ] telemetry: metrics: { - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - http_bad_requests_total: components.sources.internal_metrics.output.metrics.http_bad_requests_total - parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total + http_bad_requests_total: components.sources.internal_metrics.output.metrics.http_bad_requests_total + parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total } how_it_works: { diff --git a/website/cue/reference/components/sources/internal_logs.cue b/website/cue/reference/components/sources/internal_logs.cue index 4ffe7b44e2be5..632267111583d 100644 --- a/website/cue/reference/components/sources/internal_logs.cue +++ b/website/cue/reference/components/sources/internal_logs.cue @@ -132,11 +132,4 @@ components: sources: internal_logs: { """ } } - - telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - } } diff --git a/website/cue/reference/components/sources/internal_metrics.cue b/website/cue/reference/components/sources/internal_metrics.cue index 36b5757141a94..5115d4f35c32d 100644 --- a/website/cue/reference/components/sources/internal_metrics.cue +++ b/website/cue/reference/components/sources/internal_metrics.cue @@ -374,12 +374,6 @@ components: sources: internal_metrics: { reason: _reason } } - events_failed_total: { - description: "The total number of failures to read a Kafka message." - type: "counter" - default_namespace: "vector" - tags: _component_tags - } buffer_byte_size: { description: "The number of bytes current in the buffer." type: "gauge" @@ -819,14 +813,6 @@ components: sources: internal_metrics: { default_namespace: "vector" tags: _internal_metrics_tags } - processing_errors_total: { - description: "The total number of processing errors encountered by this component. This metric is deprecated in favor of `component_errors_total`." - type: "counter" - default_namespace: "vector" - tags: _component_tags & { - error_type: _error_type - } - } protobuf_decode_errors_total: { description: "The total number of [Protocol Buffers](\(urls.protobuf)) errors thrown during communication between Vector instances." type: "counter" @@ -1225,11 +1211,4 @@ components: sources: internal_metrics: { """ } } - - telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - } } diff --git a/website/cue/reference/components/sources/journald.cue b/website/cue/reference/components/sources/journald.cue index c64c1fdcaa486..8bc1e3deb5af9 100644 --- a/website/cue/reference/components/sources/journald.cue +++ b/website/cue/reference/components/sources/journald.cue @@ -151,10 +151,7 @@ components: sources: journald: { } telemetry: metrics: { - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - invalid_record_total: components.sources.internal_metrics.output.metrics.invalid_record_total - invalid_record_bytes_total: components.sources.internal_metrics.output.metrics.invalid_record_bytes_total + invalid_record_total: components.sources.internal_metrics.output.metrics.invalid_record_total + invalid_record_bytes_total: components.sources.internal_metrics.output.metrics.invalid_record_bytes_total } } diff --git a/website/cue/reference/components/sources/kafka.cue b/website/cue/reference/components/sources/kafka.cue index d5d56563b5bac..eee8281a645bf 100644 --- a/website/cue/reference/components/sources/kafka.cue +++ b/website/cue/reference/components/sources/kafka.cue @@ -87,7 +87,6 @@ components: sources: kafka: { } telemetry: metrics: { - events_failed_total: components.sources.internal_metrics.output.metrics.events_failed_total consumer_offset_updates_failed_total: components.sources.internal_metrics.output.metrics.consumer_offset_updates_failed_total kafka_queue_messages: components.sources.internal_metrics.output.metrics.kafka_queue_messages kafka_queue_messages_bytes: components.sources.internal_metrics.output.metrics.kafka_queue_messages_bytes @@ -100,11 +99,6 @@ components: sources: kafka: { kafka_consumed_messages_total: components.sources.internal_metrics.output.metrics.kafka_consumed_messages_total kafka_consumed_messages_bytes_total: components.sources.internal_metrics.output.metrics.kafka_consumed_messages_bytes_total kafka_consumer_lag: components.sources.internal_metrics.output.metrics.kafka_consumer_lag - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total } how_it_works: components._kafka.how_it_works diff --git a/website/cue/reference/components/sources/kubernetes_logs.cue b/website/cue/reference/components/sources/kubernetes_logs.cue index 50ee665bdcd6a..082a2205a9e43 100644 --- a/website/cue/reference/components/sources/kubernetes_logs.cue +++ b/website/cue/reference/components/sources/kubernetes_logs.cue @@ -477,10 +477,5 @@ components: sources: kubernetes_logs: { k8s_watch_stream_failed_total: components.sources.internal_metrics.output.metrics.k8s_watch_stream_failed_total k8s_watch_stream_items_obtained_total: components.sources.internal_metrics.output.metrics.k8s_watch_stream_items_obtained_total k8s_watcher_http_error_total: components.sources.internal_metrics.output.metrics.k8s_watcher_http_error_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total } } diff --git a/website/cue/reference/components/sources/logstash.cue b/website/cue/reference/components/sources/logstash.cue index 0329e2ea50484..131ae7894a25a 100644 --- a/website/cue/reference/components/sources/logstash.cue +++ b/website/cue/reference/components/sources/logstash.cue @@ -309,7 +309,5 @@ components: sources: logstash: { connection_send_ack_errors_total: components.sources.internal_metrics.output.metrics.connection_send_ack_errors_total decode_errors_total: components.sources.internal_metrics.output.metrics.decode_errors_total open_connections: components.sources.internal_metrics.output.metrics.open_connections - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total } } diff --git a/website/cue/reference/components/sources/mongodb_metrics.cue b/website/cue/reference/components/sources/mongodb_metrics.cue index 92f142e7318f7..950a94d3fe957 100644 --- a/website/cue/reference/components/sources/mongodb_metrics.cue +++ b/website/cue/reference/components/sources/mongodb_metrics.cue @@ -722,16 +722,9 @@ components: sources: mongodb_metrics: { } telemetry: metrics: { - collect_completed_total: components.sources.internal_metrics.output.metrics.collect_completed_total - collect_duration_seconds: components.sources.internal_metrics.output.metrics.collect_duration_seconds - parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - request_errors_total: components.sources.internal_metrics.output.metrics.request_errors_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total & { - description: "The number of deserialized bytes from the returned BSON documents" - } - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total + collect_completed_total: components.sources.internal_metrics.output.metrics.collect_completed_total + collect_duration_seconds: components.sources.internal_metrics.output.metrics.collect_duration_seconds + parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total + request_errors_total: components.sources.internal_metrics.output.metrics.request_errors_total } } diff --git a/website/cue/reference/components/sources/nats.cue b/website/cue/reference/components/sources/nats.cue index 80e92a4c7c7d1..f9f3c3819b73a 100644 --- a/website/cue/reference/components/sources/nats.cue +++ b/website/cue/reference/components/sources/nats.cue @@ -68,13 +68,5 @@ components: sources: nats: { } } - telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - } - how_it_works: components._nats.how_it_works } diff --git a/website/cue/reference/components/sources/nginx_metrics.cue b/website/cue/reference/components/sources/nginx_metrics.cue index 57c31f3ec8294..9880ce3915130 100644 --- a/website/cue/reference/components/sources/nginx_metrics.cue +++ b/website/cue/reference/components/sources/nginx_metrics.cue @@ -128,10 +128,9 @@ components: sources: nginx_metrics: { } telemetry: metrics: { - collect_completed_total: components.sources.internal_metrics.output.metrics.collect_completed_total - collect_duration_seconds: components.sources.internal_metrics.output.metrics.collect_duration_seconds - http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total - parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total + collect_completed_total: components.sources.internal_metrics.output.metrics.collect_completed_total + collect_duration_seconds: components.sources.internal_metrics.output.metrics.collect_duration_seconds + http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total + parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total } } diff --git a/website/cue/reference/components/sources/opentelemetry.cue b/website/cue/reference/components/sources/opentelemetry.cue index 3638a9cd95085..7a9e3b9511b4f 100644 --- a/website/cue/reference/components/sources/opentelemetry.cue +++ b/website/cue/reference/components/sources/opentelemetry.cue @@ -194,14 +194,6 @@ components: sources: opentelemetry: { } } - telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - } - how_it_works: { tls: { title: "Transport Layer Security (TLS)" diff --git a/website/cue/reference/components/sources/postgresql_metrics.cue b/website/cue/reference/components/sources/postgresql_metrics.cue index f1573ea4e8564..9f832c0cf95b2 100644 --- a/website/cue/reference/components/sources/postgresql_metrics.cue +++ b/website/cue/reference/components/sources/postgresql_metrics.cue @@ -69,14 +69,9 @@ components: sources: postgresql_metrics: { } telemetry: metrics: { - collect_completed_total: components.sources.internal_metrics.output.metrics.collect_completed_total - collect_duration_seconds: components.sources.internal_metrics.output.metrics.collect_duration_seconds - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - request_errors_total: components.sources.internal_metrics.output.metrics.request_errors_total + collect_completed_total: components.sources.internal_metrics.output.metrics.collect_completed_total + collect_duration_seconds: components.sources.internal_metrics.output.metrics.collect_duration_seconds + request_errors_total: components.sources.internal_metrics.output.metrics.request_errors_total } output: metrics: { diff --git a/website/cue/reference/components/sources/prometheus_remote_write.cue b/website/cue/reference/components/sources/prometheus_remote_write.cue index 02ccbb25b63a2..ce004a3595c95 100644 --- a/website/cue/reference/components/sources/prometheus_remote_write.cue +++ b/website/cue/reference/components/sources/prometheus_remote_write.cue @@ -84,13 +84,9 @@ components: sources: prometheus_remote_write: { } telemetry: metrics: { - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total - requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total - request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds + parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total + requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total + requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total + request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds } } diff --git a/website/cue/reference/components/sources/prometheus_scrape.cue b/website/cue/reference/components/sources/prometheus_scrape.cue index ac051c92ebe86..797e83b5860a2 100644 --- a/website/cue/reference/components/sources/prometheus_scrape.cue +++ b/website/cue/reference/components/sources/prometheus_scrape.cue @@ -97,15 +97,10 @@ components: sources: prometheus_scrape: { } telemetry: metrics: { - http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total - http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total - parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total - request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds + http_error_response_total: components.sources.internal_metrics.output.metrics.http_error_response_total + http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total + parse_errors_total: components.sources.internal_metrics.output.metrics.parse_errors_total + requests_completed_total: components.sources.internal_metrics.output.metrics.requests_completed_total + request_duration_seconds: components.sources.internal_metrics.output.metrics.request_duration_seconds } } diff --git a/website/cue/reference/components/sources/redis.cue b/website/cue/reference/components/sources/redis.cue index 19e7a2780da8d..d395b233d2d3d 100644 --- a/website/cue/reference/components/sources/redis.cue +++ b/website/cue/reference/components/sources/redis.cue @@ -95,8 +95,4 @@ components: sources: redis: { """ } } - - telemetry: metrics: { - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/sources/socket.cue b/website/cue/reference/components/sources/socket.cue index f6bed0e610611..94bc241ee428d 100644 --- a/website/cue/reference/components/sources/socket.cue +++ b/website/cue/reference/components/sources/socket.cue @@ -111,17 +111,12 @@ components: sources: socket: { ] telemetry: metrics: { - connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total - connection_failed_total: components.sources.internal_metrics.output.metrics.connection_failed_total - connection_established_total: components.sources.internal_metrics.output.metrics.connection_established_total - connection_failed_total: components.sources.internal_metrics.output.metrics.connection_failed_total - connection_send_errors_total: components.sources.internal_metrics.output.metrics.connection_send_errors_total - connection_send_ack_errors_total: components.sources.internal_metrics.output.metrics.connection_send_ack_errors_total - connection_shutdown_total: components.sources.internal_metrics.output.metrics.connection_shutdown_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total + connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total + connection_failed_total: components.sources.internal_metrics.output.metrics.connection_failed_total + connection_established_total: components.sources.internal_metrics.output.metrics.connection_established_total + connection_failed_total: components.sources.internal_metrics.output.metrics.connection_failed_total + connection_send_errors_total: components.sources.internal_metrics.output.metrics.connection_send_errors_total + connection_send_ack_errors_total: components.sources.internal_metrics.output.metrics.connection_send_ack_errors_total + connection_shutdown_total: components.sources.internal_metrics.output.metrics.connection_shutdown_total } } diff --git a/website/cue/reference/components/sources/splunk_hec.cue b/website/cue/reference/components/sources/splunk_hec.cue index 43be16cdfc7dd..c1334cb3792da 100644 --- a/website/cue/reference/components/sources/splunk_hec.cue +++ b/website/cue/reference/components/sources/splunk_hec.cue @@ -79,12 +79,8 @@ components: sources: splunk_hec: { } telemetry: metrics: { - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total - requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total + http_request_errors_total: components.sources.internal_metrics.output.metrics.http_request_errors_total + requests_received_total: components.sources.internal_metrics.output.metrics.requests_received_total } how_it_works: { diff --git a/website/cue/reference/components/sources/statsd.cue b/website/cue/reference/components/sources/statsd.cue index cb190cd2b503a..3f09298f6e810 100644 --- a/website/cue/reference/components/sources/statsd.cue +++ b/website/cue/reference/components/sources/statsd.cue @@ -79,13 +79,8 @@ components: sources: statsd: { } telemetry: metrics: { - connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total - invalid_record_total: components.sources.internal_metrics.output.metrics.invalid_record_total - invalid_record_bytes_total: components.sources.internal_metrics.output.metrics.invalid_record_bytes_total - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total + connection_errors_total: components.sources.internal_metrics.output.metrics.connection_errors_total + invalid_record_total: components.sources.internal_metrics.output.metrics.invalid_record_total + invalid_record_bytes_total: components.sources.internal_metrics.output.metrics.invalid_record_bytes_total } } diff --git a/website/cue/reference/components/sources/stdin.cue b/website/cue/reference/components/sources/stdin.cue index e823a74c4156a..f99622103123b 100644 --- a/website/cue/reference/components/sources/stdin.cue +++ b/website/cue/reference/components/sources/stdin.cue @@ -85,11 +85,6 @@ components: sources: stdin: { } telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - stdin_reads_failed_total: components.sources.internal_metrics.output.metrics.stdin_reads_failed_total + stdin_reads_failed_total: components.sources.internal_metrics.output.metrics.stdin_reads_failed_total } } diff --git a/website/cue/reference/components/sources/syslog.cue b/website/cue/reference/components/sources/syslog.cue index dc228327221b7..c98751a59b613 100644 --- a/website/cue/reference/components/sources/syslog.cue +++ b/website/cue/reference/components/sources/syslog.cue @@ -205,9 +205,7 @@ components: sources: syslog: { } telemetry: metrics: { - connection_read_errors_total: components.sources.internal_metrics.output.metrics.connection_read_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - utf8_convert_errors_total: components.sources.internal_metrics.output.metrics.utf8_convert_errors_total + connection_read_errors_total: components.sources.internal_metrics.output.metrics.connection_read_errors_total + utf8_convert_errors_total: components.sources.internal_metrics.output.metrics.utf8_convert_errors_total } } diff --git a/website/cue/reference/components/sources/vector.cue b/website/cue/reference/components/sources/vector.cue index 2b933f40fef4a..87737a272480f 100644 --- a/website/cue/reference/components/sources/vector.cue +++ b/website/cue/reference/components/sources/vector.cue @@ -100,11 +100,6 @@ components: sources: vector: { } telemetry: metrics: { - component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total - component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total - component_received_bytes_total: components.sources.internal_metrics.output.metrics.component_received_bytes_total - component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total - component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - protobuf_decode_errors_total: components.sources.internal_metrics.output.metrics.protobuf_decode_errors_total + protobuf_decode_errors_total: components.sources.internal_metrics.output.metrics.protobuf_decode_errors_total } } diff --git a/website/cue/reference/components/transforms.cue b/website/cue/reference/components/transforms.cue index f548f7be69c35..35942a0e0005a 100644 --- a/website/cue/reference/components/transforms.cue +++ b/website/cue/reference/components/transforms.cue @@ -13,11 +13,13 @@ components: transforms: [Name=string]: { configuration: base.components.transforms.configuration telemetry: metrics: { + component_discarded_events_total: components.sources.internal_metrics.output.metrics.component_discarded_events_total + component_errors_total: components.sources.internal_metrics.output.metrics.component_errors_total component_received_events_count: components.sources.internal_metrics.output.metrics.component_received_events_count component_received_events_total: components.sources.internal_metrics.output.metrics.component_received_events_total component_received_event_bytes_total: components.sources.internal_metrics.output.metrics.component_received_event_bytes_total - utilization: components.sources.internal_metrics.output.metrics.utilization component_sent_events_total: components.sources.internal_metrics.output.metrics.component_sent_events_total component_sent_event_bytes_total: components.sources.internal_metrics.output.metrics.component_sent_event_bytes_total + utilization: components.sources.internal_metrics.output.metrics.utilization } } diff --git a/website/cue/reference/components/transforms/dedupe.cue b/website/cue/reference/components/transforms/dedupe.cue index 0fe9e5e19499f..b358129ef0e60 100644 --- a/website/cue/reference/components/transforms/dedupe.cue +++ b/website/cue/reference/components/transforms/dedupe.cue @@ -93,8 +93,4 @@ components: transforms: dedupe: { """ } } - - telemetry: metrics: { - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - } } diff --git a/website/cue/reference/components/transforms/filter.cue b/website/cue/reference/components/transforms/filter.cue index f0264c527dc4e..8fb684e40123b 100644 --- a/website/cue/reference/components/transforms/filter.cue +++ b/website/cue/reference/components/transforms/filter.cue @@ -69,8 +69,4 @@ components: transforms: filter: { ] }, ] - - telemetry: metrics: { - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - } } diff --git a/website/cue/reference/components/transforms/log_to_metric.cue b/website/cue/reference/components/transforms/log_to_metric.cue index 04df2a042fd0c..1bdc6a333924c 100644 --- a/website/cue/reference/components/transforms/log_to_metric.cue +++ b/website/cue/reference/components/transforms/log_to_metric.cue @@ -326,8 +326,4 @@ components: transforms: log_to_metric: { """ } } - - telemetry: metrics: { - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/transforms/lua.cue b/website/cue/reference/components/transforms/lua.cue index 766d61fa1ede1..ea36e8c63f4ca 100644 --- a/website/cue/reference/components/transforms/lua.cue +++ b/website/cue/reference/components/transforms/lua.cue @@ -308,7 +308,6 @@ components: transforms: lua: { } telemetry: metrics: { - lua_memory_used_bytes: components.sources.internal_metrics.output.metrics.lua_memory_used_bytes - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total + lua_memory_used_bytes: components.sources.internal_metrics.output.metrics.lua_memory_used_bytes } } diff --git a/website/cue/reference/components/transforms/metric_to_log.cue b/website/cue/reference/components/transforms/metric_to_log.cue index 2db6b64aece01..3607ccaacf07f 100644 --- a/website/cue/reference/components/transforms/metric_to_log.cue +++ b/website/cue/reference/components/transforms/metric_to_log.cue @@ -92,8 +92,4 @@ components: transforms: metric_to_log: { ] how_it_works: {} - - telemetry: metrics: { - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/transforms/remap.cue b/website/cue/reference/components/transforms/remap.cue index 1b86eaa205161..0dfa7e067de5f 100644 --- a/website/cue/reference/components/transforms/remap.cue +++ b/website/cue/reference/components/transforms/remap.cue @@ -154,8 +154,4 @@ components: transforms: "remap": { """ }, ] - - telemetry: metrics: { - processing_errors_total: components.sources.internal_metrics.output.metrics.processing_errors_total - } } diff --git a/website/cue/reference/components/transforms/sample.cue b/website/cue/reference/components/transforms/sample.cue index 40f1a7476f3da..97c39cd2f6a0a 100644 --- a/website/cue/reference/components/transforms/sample.cue +++ b/website/cue/reference/components/transforms/sample.cue @@ -31,8 +31,4 @@ components: transforms: sample: { metrics: null traces: true } - - telemetry: metrics: { - events_discarded_total: components.sources.internal_metrics.output.metrics.events_discarded_total - } } From 3b87e00f3a62be93f55a89df676b47a8fad22201 Mon Sep 17 00:00:00 2001 From: neuronull Date: Tue, 30 May 2023 15:02:15 -0600 Subject: [PATCH 067/236] fix(ci): add missing logic to mark required checks failed (#17543) - Test Suite and Integration Test Suite are required checks for CI, so their jobs always need to run (relevant if dependent job failed) to correctly mark the final status. --- .github/workflows/integration.yml | 2 ++ .github/workflows/test.yml | 13 +++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 959c43dde7058..898d4c6f4a649 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -121,9 +121,11 @@ jobs: - test_name: 'webhdfs' if: ${{ github.event_name == 'merge_group' || needs.changes.outputs.int-all == 'true' || needs.changes.outputs.webhdfs == 'true' }} + # This is a required status check, so it always needs to run if prior jobs failed, in order to mark the status correctly. integration: name: Integration Test Suite runs-on: ubuntu-latest + if: always() needs: - integration-matrix env: diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b36aba1f3108d..76ad3ce2e27db 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -139,14 +139,23 @@ jobs: path: "/tmp/vector-config-schema.json" if: success() || failure() + # This is a required status check, so it always needs to run if prior jobs failed, in order to mark the status correctly. all-checks: name: Test Suite runs-on: ubuntu-20.04 + if: always() needs: - checks - test-vrl - test-linux + env: + FAILED: ${{ contains(needs.*.result, 'failure') }} steps: - - name: validate - run: echo "OK" + - run: | + echo "failed=${{ env.FAILED }}" + if [[ "$FAILED" == "true" ]] ; then + exit 1 + else + exit 0 + fi From e2c025591c572efdd04728fac301b2e025596516 Mon Sep 17 00:00:00 2001 From: neuronull Date: Tue, 30 May 2023 16:14:59 -0600 Subject: [PATCH 068/236] fix(ci): post failed status to PR and isolate branch checkout on comment trigger (#17544) --- .github/workflows/regression.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 629ae62c201db..2811a0042237c 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -129,6 +129,12 @@ jobs: - uses: actions/checkout@v3 + - name: Checkout PR branch (issue_comment) + if: github.event_name == 'issue_comment' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: gh pr checkout ${{ github.event.issue.number }} + # If triggered by issue comment, the event payload doesn't directly contain the head and base sha from the PR. # But, we can retrieve this info from some commands. - name: Get PR metadata (issue_comment) @@ -140,8 +146,6 @@ jobs: export PR_NUMBER=${{ github.event.issue.number }} echo "PR_NUMBER=${PR_NUMBER}" >> $GITHUB_OUTPUT - gh pr checkout ${PR_NUMBER} - export BASELINE_SHA=$(git merge-base --fork-point master) echo "BASELINE_SHA=${BASELINE_SHA}" >> $GITHUB_OUTPUT @@ -768,7 +772,7 @@ jobs: if: github.event_name == 'issue_comment' && env.FAILED == 'true' uses: myrotvorets/set-commit-status-action@v1.1.7 with: - sha: ${{ steps.compute-metadata.outputs.comparison-sha }} + sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} context: Regression Detection Suite status: 'failure' From dbd7151aa4128638765e360f3f0f4e6582735041 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 22:57:35 +0000 Subject: [PATCH 069/236] chore(deps): Bump opendal from 0.35.0 to 0.36.0 (#17540) Bumps [opendal](https://github.com/apache/incubator-opendal) from 0.35.0 to 0.36.0.
Release notes

Sourced from opendal's releases.

v0.36.0

Upgrade to v0.36

Public API

In v0.36, OpenDAL improving the xxx_with API by allow it to be called in chain:

After this change, all xxx_with alike call will be changed from

let bs = op.read_with(
  "path/to/file",
  OpRead::new()
    .with_range(0..=1024)
    .with_if_match("<etag>")
    .with_if_none_match("<etag>")
    .with_override_cache_control("<cache_control>")

.with_override_content_disposition("<content_disposition>")
  ).await?;

to

let bs = op.read_with("path/to/file")
  .range(0..=1024)
  .if_match("<etag>")
  .if_none_match("<etag>")
  .override_cache_control("<cache_control>")
  .override_content_disposition("<content_disposition>")
  .await?;

For blocking API calls, we will need a call() at the end:

let bs = bop.read_with("path/to/file")
  .range(0..=1024)
  .if_match("<etag>")
  .if_none_match("<etag>")
  .override_cache_control("<cache_control>")
  .override_content_disposition("<content_disposition>")
  .call()?;

Along with this change, users don't need to call OpXxx anymore so we moved it to raw API.

More detailes could be found at [RFC: Chain Based Operator API][https://opendal.apache.org/docs/rust/opendal/docs/rfcs/rfc_2299_chain_based_operator_api/index.html].

Raw API

... (truncated)

Changelog

Sourced from opendal's changelog.

[v0.36.0] - 2023-05-30

Added

  • feat(service/fs): add append support for fs (#2296)
  • feat(services/sftp): add append support for sftp (#2297)
  • RFC-2299: Chain based Operator API (#2299)
  • feat(services/azblob): add append support (#2302)
  • feat(bindings/nodejs): add append support (#2322)
  • feat(bindings/C): opendal_operator_ptr construction using kvs (#2329)
  • feat(services/cos): append support (#2332)
  • feat(bindings/java): implement Operator#delete (#2345)
  • feat(bindings/java): support append (#2350)
  • feat(bindings/java): save one jni call in the hot path (#2353)
  • feat: server side encryption support for azblob (#2347)

Changed

  • refactor(core): Implement RFC-2299 for stat_with (#2303)
  • refactor(core): Implement RFC-2299 for BlockingOperator::write_with (#2305)
  • refactor(core): Implement RFC-2299 for appender_with (#2307)
  • refactor(core): Implement RFC-2299 for read_with (#2308)
  • refactor(core): Implement RFC-2299 for read_with (#2308)
  • refactor(core): Implement RFC-2299 for append_with (#2312)
  • refactor(core): Implement RFC-2299 for write_with (#2315)
  • refactor(core): Implement RFC-2299 for reader_with (#2316)
  • refactor(core): Implement RFC-2299 for writer_with (#2317)
  • refactor(core): Implement RFC-2299 for presign_read_with (#2314)
  • refactor(core): Implement RFC-2299 for presign_write_with (#2320)
  • refactor(core): Implement RFC-2299 for list_with (#2323)
  • refactor: Move ops to raw::ops (#2325)
  • refactor(bindings/C): align bdd test with the feature tests (#2340)
  • refactor(bindings/java): narrow unsafe boundary (#2351)

Fixed

  • fix(services/supabase): correctly set retryable (#2295)
  • fix(core): appender complete check (#2298)

Docs

  • docs: add service doc for azdfs (#2310)
  • docs(bidnings/java): how to deploy snapshots (#2311)
  • docs(bidnings/java): how to deploy snapshots (#2311)
  • docs: Fixed links of languages to open in same tab (#2327)
  • docs: Adopt docusaurus pathname protocol (#2330)
  • docs(bindings/nodejs): update lib desc (#2331)
  • docs(bindings/java): update the README file (#2338)
  • docs: add service doc for fs (#2337)
  • docs: add service doc for cos (#2341)

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=opendal&package-manager=cargo&previous-version=0.35.0&new-version=0.36.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38ac0c27824ac..efe263a0f366c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5526,9 +5526,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "opendal" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "440f466680e0bc98ea94af95301aab4c69d9720934baec8f46b79a69fdd87cce" +checksum = "3555168d4cc9a83c332e1416ff00e3be36a6d78447dff472829962afbc91bb3d" dependencies = [ "anyhow", "async-compat", diff --git a/Cargo.toml b/Cargo.toml index e7fb3e18c9b29..3bf10945b4d06 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -181,7 +181,7 @@ azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = azure_storage_blobs = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, optional = true } # OpenDAL -opendal = {version = "0.35", default-features = false, features = ["native-tls", "services-webhdfs"], optional = true} +opendal = {version = "0.36", default-features = false, features = ["native-tls", "services-webhdfs"], optional = true} # Tower tower = { version = "0.4.13", default-features = false, features = ["buffer", "limit", "retry", "timeout", "util", "balance", "discover"] } From 3b2a2be1b075344a92294c1248b09844f895ad72 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Wed, 31 May 2023 15:18:38 +0100 Subject: [PATCH 070/236] chore(observability): ensure `sent_event` and `received_event` metrics are estimated json size (#17465) This PR creates a newtype - [`JsonSize`](https://github.com/vectordotdev/vector/blob/stephen/event_json_size/lib/vector-common/src/json_size.rs) that is returned by the `EstimatedJsonEncodedSizeOf::estimated_json_encoded_size_of` trait function. The events that emit a `component_received_event_bytes_total` or `component_sent_event_bytes_total` event accept `JsonSize`. This allows us to use the compiler to ensure we are emitting the correct measurement. A number of components needed changing to ensure this worked. --------- Signed-off-by: Stephen Wakely --- .../src/internal_event/events_received.rs | 2 +- .../src/internal_event/events_sent.rs | 6 +- lib/vector-common/src/internal_event/mod.rs | 6 +- lib/vector-common/src/json_size.rs | 105 +++++ lib/vector-common/src/lib.rs | 2 + lib/vector-common/src/request_metadata.rs | 10 +- lib/vector-core/src/event/array.rs | 7 +- .../event/estimated_json_encoded_size_of.rs | 367 +++++++++--------- lib/vector-core/src/event/log_event.rs | 13 +- lib/vector-core/src/event/metric/mod.rs | 6 +- lib/vector-core/src/event/mod.rs | 4 +- lib/vector-core/src/event/trace.rs | 4 +- lib/vector-core/src/stream/driver.rs | 3 +- lib/vector-core/src/transform/mod.rs | 5 +- .../validators/component_spec/sources.rs | 9 +- src/internal_events/apache_metrics.rs | 9 +- src/internal_events/aws_ecs_metrics.rs | 9 +- src/internal_events/docker_logs.rs | 9 +- src/internal_events/exec.rs | 11 +- src/internal_events/file.rs | 10 +- src/internal_events/http.rs | 9 +- src/internal_events/http_client_source.rs | 9 +- src/internal_events/internal_logs.rs | 5 +- src/internal_events/kafka.rs | 9 +- src/internal_events/kubernetes_logs.rs | 11 +- src/internal_events/mongodb_metrics.rs | 11 +- src/internal_events/nginx_metrics.rs | 9 +- src/internal_events/socket.rs | 17 +- src/sinks/amqp/request_builder.rs | 5 + src/sinks/amqp/service.rs | 17 +- src/sinks/amqp/sink.rs | 3 +- src/sinks/aws_cloudwatch_logs/service.rs | 6 +- src/sinks/aws_cloudwatch_metrics/mod.rs | 6 +- src/sinks/aws_kinesis/service.rs | 8 +- src/sinks/aws_sqs/request_builder.rs | 2 +- src/sinks/aws_sqs/service.rs | 13 +- src/sinks/azure_blob/request_builder.rs | 4 +- src/sinks/azure_common/config.rs | 9 +- src/sinks/blackhole/sink.rs | 4 +- src/sinks/databend/service.rs | 2 +- src/sinks/datadog/events/service.rs | 6 +- src/sinks/datadog/logs/service.rs | 11 +- src/sinks/datadog/metrics/request_builder.rs | 12 +- src/sinks/datadog/metrics/service.rs | 11 +- src/sinks/datadog/traces/request_builder.rs | 13 +- src/sinks/datadog/traces/service.rs | 11 +- src/sinks/datadog_archives.rs | 4 +- src/sinks/elasticsearch/encoder.rs | 3 +- src/sinks/elasticsearch/request_builder.rs | 10 +- src/sinks/elasticsearch/retry.rs | 5 +- src/sinks/elasticsearch/service.rs | 9 +- src/sinks/influxdb/metrics.rs | 6 +- src/sinks/kafka/service.rs | 11 +- src/sinks/loki/event.rs | 11 +- src/sinks/opendal_common.rs | 9 +- src/sinks/prometheus/remote_write.rs | 5 +- src/sinks/pulsar/service.rs | 16 +- src/sinks/pulsar/sink.rs | 4 +- src/sinks/redis.rs | 3 +- src/sinks/s3_common/service.rs | 11 +- src/sinks/sematext/metrics.rs | 12 +- src/sinks/splunk_hec/common/response.rs | 3 +- src/sinks/splunk_hec/common/service.rs | 4 +- src/sinks/statsd/service.rs | 2 +- src/sinks/util/adaptive_concurrency/tests.rs | 5 +- src/sinks/util/batch.rs | 9 + src/sinks/util/buffer/mod.rs | 6 +- src/sinks/util/http.rs | 7 +- src/sinks/util/metadata.rs | 6 +- src/sinks/util/mod.rs | 7 +- src/sinks/util/processed_event.rs | 3 +- src/sinks/util/service.rs | 6 +- src/sinks/util/sink.rs | 47 ++- src/sinks/util/socket_bytes_sink.rs | 13 +- src/sinks/util/tcp.rs | 7 +- src/sinks/util/unix.rs | 7 +- src/sinks/vector/service.rs | 11 +- src/sinks/vector/sink.rs | 3 +- src/sources/dnstap/mod.rs | 69 ++-- src/sources/file.rs | 17 +- src/sources/file_descriptors/mod.rs | 2 +- src/sources/internal_logs.rs | 6 +- src/sources/internal_metrics.rs | 8 +- src/sources/postgresql_metrics.rs | 26 +- src/sources/util/framestream.rs | 9 +- src/sources/util/http_client.rs | 3 +- .../2023-07-04-0-31-0-upgrade-guide.md | 31 ++ 87 files changed, 807 insertions(+), 449 deletions(-) create mode 100644 lib/vector-common/src/json_size.rs create mode 100644 website/content/en/highlights/2023-07-04-0-31-0-upgrade-guide.md diff --git a/lib/vector-common/src/internal_event/events_received.rs b/lib/vector-common/src/internal_event/events_received.rs index 4021b3c578143..c25cc228c9fd1 100644 --- a/lib/vector-common/src/internal_event/events_received.rs +++ b/lib/vector-common/src/internal_event/events_received.rs @@ -18,6 +18,6 @@ crate::registered_event!( #[allow(clippy::cast_precision_loss)] self.events_count.record(count as f64); self.events.increment(count as u64); - self.event_bytes.increment(byte_size as u64); + self.event_bytes.increment(byte_size.get() as u64); } ); diff --git a/lib/vector-common/src/internal_event/events_sent.rs b/lib/vector-common/src/internal_event/events_sent.rs index 7d9986fdf63c4..d329562afe7fc 100644 --- a/lib/vector-common/src/internal_event/events_sent.rs +++ b/lib/vector-common/src/internal_event/events_sent.rs @@ -27,15 +27,15 @@ crate::registered_event!( match &self.output { Some(output) => { - trace!(message = "Events sent.", count = %count, byte_size = %byte_size, output = %output); + trace!(message = "Events sent.", count = %count, byte_size = %byte_size.get(), output = %output); } None => { - trace!(message = "Events sent.", count = %count, byte_size = %byte_size); + trace!(message = "Events sent.", count = %count, byte_size = %byte_size.get()); } } self.events.increment(count as u64); - self.event_bytes.increment(byte_size as u64); + self.event_bytes.increment(byte_size.get() as u64); } ); diff --git a/lib/vector-common/src/internal_event/mod.rs b/lib/vector-common/src/internal_event/mod.rs index 37d0dcfc634d9..7af70cc1322ee 100644 --- a/lib/vector-common/src/internal_event/mod.rs +++ b/lib/vector-common/src/internal_event/mod.rs @@ -16,6 +16,8 @@ pub use events_sent::{EventsSent, DEFAULT_OUTPUT}; pub use prelude::{error_stage, error_type}; pub use service::{CallError, PollReadyError}; +use crate::json_size::JsonSize; + pub trait InternalEvent: Sized { fn emit(self); @@ -106,9 +108,9 @@ pub struct ByteSize(pub usize); #[derive(Clone, Copy)] pub struct Count(pub usize); -/// Holds the tuple `(count_of_events, size_of_events_in_bytes)`. +/// Holds the tuple `(count_of_events, estimated_json_size_of_events)`. #[derive(Clone, Copy)] -pub struct CountByteSize(pub usize, pub usize); +pub struct CountByteSize(pub usize, pub JsonSize); // Wrapper types used to hold parameters for registering events diff --git a/lib/vector-common/src/json_size.rs b/lib/vector-common/src/json_size.rs new file mode 100644 index 0000000000000..746b6335716d1 --- /dev/null +++ b/lib/vector-common/src/json_size.rs @@ -0,0 +1,105 @@ +use std::{ + fmt, + iter::Sum, + ops::{Add, AddAssign, Sub}, +}; + +/// A newtype for the JSON size of an event. +/// Used to emit the `component_received_event_bytes_total` and +/// `component_sent_event_bytes_total` metrics. +#[derive(Clone, Copy, Default, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct JsonSize(usize); + +impl fmt::Display for JsonSize { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Sub for JsonSize { + type Output = JsonSize; + + #[inline] + fn sub(mut self, rhs: Self) -> Self::Output { + self.0 -= rhs.0; + self + } +} + +impl Add for JsonSize { + type Output = JsonSize; + + #[inline] + fn add(mut self, rhs: Self) -> Self::Output { + self.0 += rhs.0; + self + } +} + +impl AddAssign for JsonSize { + #[inline] + fn add_assign(&mut self, rhs: Self) { + self.0 += rhs.0; + } +} + +impl Sum for JsonSize { + #[inline] + fn sum>(iter: I) -> Self { + let mut accum = 0; + for val in iter { + accum += val.get(); + } + + JsonSize::new(accum) + } +} + +impl From for JsonSize { + #[inline] + fn from(value: usize) -> Self { + Self(value) + } +} + +impl JsonSize { + /// Create a new instance with the specified size. + #[must_use] + #[inline] + pub const fn new(size: usize) -> Self { + Self(size) + } + + /// Create a new instance with size 0. + #[must_use] + #[inline] + pub const fn zero() -> Self { + Self(0) + } + + /// Returns the contained size. + #[must_use] + #[inline] + pub fn get(&self) -> usize { + self.0 + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +#[allow(clippy::module_name_repetitions)] +pub struct NonZeroJsonSize(JsonSize); + +impl NonZeroJsonSize { + #[must_use] + #[inline] + pub fn new(size: JsonSize) -> Option { + (size.0 > 0).then_some(NonZeroJsonSize(size)) + } +} + +impl From for JsonSize { + #[inline] + fn from(value: NonZeroJsonSize) -> Self { + value.0 + } +} diff --git a/lib/vector-common/src/lib.rs b/lib/vector-common/src/lib.rs index 832eaf0d5cdc5..d7d591323e07e 100644 --- a/lib/vector-common/src/lib.rs +++ b/lib/vector-common/src/lib.rs @@ -18,6 +18,8 @@ pub use vrl::btreemap; #[cfg(feature = "byte_size_of")] pub mod byte_size_of; +pub mod json_size; + pub mod config; #[cfg(feature = "conversion")] diff --git a/lib/vector-common/src/request_metadata.rs b/lib/vector-common/src/request_metadata.rs index be68c319dcadf..cce6124361b60 100644 --- a/lib/vector-common/src/request_metadata.rs +++ b/lib/vector-common/src/request_metadata.rs @@ -1,5 +1,7 @@ use std::ops::Add; +use crate::json_size::JsonSize; + /// Metadata for batch requests. #[derive(Clone, Copy, Debug, Default)] pub struct RequestMetadata { @@ -8,7 +10,7 @@ pub struct RequestMetadata { /// Size, in bytes, of the in-memory representation of all events in this batch request. events_byte_size: usize, /// Size, in bytes, of the estimated JSON-encoded representation of all events in this batch request. - events_estimated_json_encoded_byte_size: usize, + events_estimated_json_encoded_byte_size: JsonSize, /// Uncompressed size, in bytes, of the encoded events in this batch request. request_encoded_size: usize, /// On-the-wire size, in bytes, of the batch request itself after compression, etc. @@ -25,7 +27,7 @@ impl RequestMetadata { events_byte_size: usize, request_encoded_size: usize, request_wire_size: usize, - events_estimated_json_encoded_byte_size: usize, + events_estimated_json_encoded_byte_size: JsonSize, ) -> Self { Self { event_count, @@ -47,7 +49,7 @@ impl RequestMetadata { } #[must_use] - pub const fn events_estimated_json_encoded_byte_size(&self) -> usize { + pub const fn events_estimated_json_encoded_byte_size(&self) -> JsonSize { self.events_estimated_json_encoded_byte_size } @@ -64,7 +66,7 @@ impl RequestMetadata { /// Constructs a `RequestMetadata` by summation of the "batch" of `RequestMetadata` provided. #[must_use] pub fn from_batch>(metadata_iter: T) -> Self { - let mut metadata_sum = RequestMetadata::new(0, 0, 0, 0, 0); + let mut metadata_sum = RequestMetadata::new(0, 0, 0, 0, JsonSize::zero()); for metadata in metadata_iter { metadata_sum = metadata_sum + &metadata; diff --git a/lib/vector-core/src/event/array.rs b/lib/vector-core/src/event/array.rs index bc30573147d45..da87a6f6a8074 100644 --- a/lib/vector-core/src/event/array.rs +++ b/lib/vector-core/src/event/array.rs @@ -8,7 +8,10 @@ use futures::{stream, Stream}; #[cfg(test)] use quickcheck::{Arbitrary, Gen}; use vector_buffers::EventCount; -use vector_common::finalization::{AddBatchNotifier, BatchNotifier, EventFinalizers, Finalizable}; +use vector_common::{ + finalization::{AddBatchNotifier, BatchNotifier, EventFinalizers, Finalizable}, + json_size::JsonSize, +}; use super::{ EstimatedJsonEncodedSizeOf, Event, EventDataEq, EventFinalizer, EventMutRef, EventRef, @@ -253,7 +256,7 @@ impl ByteSizeOf for EventArray { } impl EstimatedJsonEncodedSizeOf for EventArray { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { match self { Self::Logs(v) => v.estimated_json_encoded_size_of(), Self::Traces(v) => v.estimated_json_encoded_size_of(), diff --git a/lib/vector-core/src/event/estimated_json_encoded_size_of.rs b/lib/vector-core/src/event/estimated_json_encoded_size_of.rs index 8ee523d13f5cd..b671c8a817919 100644 --- a/lib/vector-core/src/event/estimated_json_encoded_size_of.rs +++ b/lib/vector-core/src/event/estimated_json_encoded_size_of.rs @@ -4,11 +4,12 @@ use bytes::Bytes; use chrono::{DateTime, Timelike, Utc}; use ordered_float::NotNan; use smallvec::SmallVec; +use vector_common::json_size::JsonSize; use vrl::value::Value; -const NULL_SIZE: usize = 4; -const TRUE_SIZE: usize = 4; -const FALSE_SIZE: usize = 5; +const NULL_SIZE: JsonSize = JsonSize::new(4); +const TRUE_SIZE: JsonSize = JsonSize::new(4); +const FALSE_SIZE: JsonSize = JsonSize::new(5); const BRACKETS_SIZE: usize = 2; const BRACES_SIZE: usize = 2; @@ -40,17 +41,17 @@ const EPOCH_RFC3339_9: &str = "1970-01-01T00:00:00.000000000Z"; /// /// Ideally, no allocations should take place in any implementation of this function. pub trait EstimatedJsonEncodedSizeOf { - fn estimated_json_encoded_size_of(&self) -> usize; + fn estimated_json_encoded_size_of(&self) -> JsonSize; } impl EstimatedJsonEncodedSizeOf for &T { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { T::estimated_json_encoded_size_of(self) } } impl EstimatedJsonEncodedSizeOf for Option { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { match self { Some(v) => v.estimated_json_encoded_size_of(), None => NULL_SIZE, @@ -61,13 +62,13 @@ impl EstimatedJsonEncodedSizeOf for Option { impl EstimatedJsonEncodedSizeOf for SmallVec<[T; N]> { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.iter().map(T::estimated_json_encoded_size_of).sum() } } impl EstimatedJsonEncodedSizeOf for Value { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { match self { Value::Timestamp(v) => v.estimated_json_encoded_size_of(), Value::Object(v) => v.estimated_json_encoded_size_of(), @@ -88,25 +89,25 @@ impl EstimatedJsonEncodedSizeOf for Value { /// This is the main reason why `EstimatedJsonEncodedSizeOf` is named as is, as most other types can /// be calculated exactly without a noticable performance penalty. impl EstimatedJsonEncodedSizeOf for str { - fn estimated_json_encoded_size_of(&self) -> usize { - QUOTES_SIZE + self.len() + fn estimated_json_encoded_size_of(&self) -> JsonSize { + JsonSize::new(QUOTES_SIZE + self.len()) } } impl EstimatedJsonEncodedSizeOf for String { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.as_str().estimated_json_encoded_size_of() } } impl EstimatedJsonEncodedSizeOf for Bytes { - fn estimated_json_encoded_size_of(&self) -> usize { - QUOTES_SIZE + self.len() + fn estimated_json_encoded_size_of(&self) -> JsonSize { + JsonSize::new(QUOTES_SIZE + self.len()) } } impl EstimatedJsonEncodedSizeOf for bool { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { if *self { TRUE_SIZE } else { @@ -116,19 +117,19 @@ impl EstimatedJsonEncodedSizeOf for bool { } impl EstimatedJsonEncodedSizeOf for f64 { - fn estimated_json_encoded_size_of(&self) -> usize { - ryu::Buffer::new().format_finite(*self).len() + fn estimated_json_encoded_size_of(&self) -> JsonSize { + ryu::Buffer::new().format_finite(*self).len().into() } } impl EstimatedJsonEncodedSizeOf for f32 { - fn estimated_json_encoded_size_of(&self) -> usize { - ryu::Buffer::new().format_finite(*self).len() + fn estimated_json_encoded_size_of(&self) -> JsonSize { + ryu::Buffer::new().format_finite(*self).len().into() } } impl EstimatedJsonEncodedSizeOf for NotNan { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.into_inner().estimated_json_encoded_size_of() } } @@ -140,19 +141,19 @@ where K: AsRef, V: EstimatedJsonEncodedSizeOf, { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let size = self.iter().fold(BRACES_SIZE, |acc, (k, v)| { - acc + k.as_ref().estimated_json_encoded_size_of() + acc + k.as_ref().estimated_json_encoded_size_of().get() + COLON_SIZE - + v.estimated_json_encoded_size_of() + + v.estimated_json_encoded_size_of().get() + COMMA_SIZE }); - if size > BRACES_SIZE { + JsonSize::new(if size > BRACES_SIZE { size - COMMA_SIZE } else { size - } + }) } } @@ -164,19 +165,19 @@ where V: EstimatedJsonEncodedSizeOf, S: ::std::hash::BuildHasher, { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let size = self.iter().fold(BRACES_SIZE, |acc, (k, v)| { - acc + k.as_ref().estimated_json_encoded_size_of() + acc + k.as_ref().estimated_json_encoded_size_of().get() + COLON_SIZE - + v.estimated_json_encoded_size_of() + + v.estimated_json_encoded_size_of().get() + COMMA_SIZE }); - if size > BRACES_SIZE { + JsonSize::new(if size > BRACES_SIZE { size - COMMA_SIZE } else { size - } + }) } } @@ -184,16 +185,16 @@ impl EstimatedJsonEncodedSizeOf for Vec where V: EstimatedJsonEncodedSizeOf, { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let size = self.iter().fold(BRACKETS_SIZE, |acc, v| { - acc + COMMA_SIZE + v.estimated_json_encoded_size_of() + acc + COMMA_SIZE + v.estimated_json_encoded_size_of().get() }); - if size > BRACKETS_SIZE { + JsonSize::new(if size > BRACKETS_SIZE { size - COMMA_SIZE } else { size - } + }) } } @@ -205,7 +206,7 @@ impl EstimatedJsonEncodedSizeOf for DateTime { /// /// - `chrono::SecondsFormat::AutoSi` is used to calculate nanoseconds precision. /// - `use_z` is `true` for the `chrono::DateTime#to_rfc3339_opts` function call. - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let ns = self.nanosecond() % 1_000_000_000; let epoch = if ns == 0 { EPOCH_RFC3339_0 @@ -217,202 +218,218 @@ impl EstimatedJsonEncodedSizeOf for DateTime { EPOCH_RFC3339_9 }; - QUOTES_SIZE + epoch.len() + JsonSize::new(QUOTES_SIZE + epoch.len()) } } impl EstimatedJsonEncodedSizeOf for u8 { #[rustfmt::skip] - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let v = *self; // 0 ..= 255 - if v < 10 { 1 - } else if v < 100 { 2 - } else { 3 } + JsonSize::new( + if v < 10 { 1 + } else if v < 100 { 2 + } else { 3 } + ) } } impl EstimatedJsonEncodedSizeOf for i8 { #[rustfmt::skip] - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let v = *self; // -128 ..= 127 - if v < -99 { 4 - } else if v < -9 { 3 - } else if v < 0 { 2 - } else if v < 10 { 1 - } else if v < 100 { 2 - } else { 3 } + JsonSize::new( + if v < -99 { 4 + } else if v < -9 { 3 + } else if v < 0 { 2 + } else if v < 10 { 1 + } else if v < 100 { 2 + } else { 3 } + ) } } impl EstimatedJsonEncodedSizeOf for u16 { #[rustfmt::skip] - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let v = *self; // 0 ..= 65_535 - if v < 10 { 1 - } else if v < 100 { 2 - } else if v < 1_000 { 3 - } else if v < 10_000 { 4 - } else { 5 } + JsonSize::new( + if v < 10 { 1 + } else if v < 100 { 2 + } else if v < 1_000 { 3 + } else if v < 10_000 { 4 + } else { 5 } + ) } } impl EstimatedJsonEncodedSizeOf for i16 { #[rustfmt::skip] - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let v = *self; // -32_768 ..= 32_767 - if v < -9_999 { 6 - } else if v < -999 { 5 - } else if v < -99 { 4 - } else if v < -9 { 3 - } else if v < 0 { 2 - } else if v < 10 { 1 - } else if v < 100 { 2 - } else if v < 1_000 { 3 - } else if v < 10_000 { 4 - } else { 5 } + JsonSize::new( + if v < -9_999 { 6 + } else if v < -999 { 5 + } else if v < -99 { 4 + } else if v < -9 { 3 + } else if v < 0 { 2 + } else if v < 10 { 1 + } else if v < 100 { 2 + } else if v < 1_000 { 3 + } else if v < 10_000 { 4 + } else { 5 } + ) } } impl EstimatedJsonEncodedSizeOf for u32 { #[rustfmt::skip] - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let v = *self; // 0 ..= 4_294_967_295 - if v < 10 { 1 - } else if v < 100 { 2 - } else if v < 1_000 { 3 - } else if v < 10_000 { 4 - } else if v < 100_000 { 5 - } else if v < 1_000_000 { 6 - } else if v < 10_000_000 { 7 - } else if v < 100_000_000 { 8 - } else if v < 1_000_000_000 { 9 - } else { 10 } + JsonSize::new( + if v < 10 { 1 + } else if v < 100 { 2 + } else if v < 1_000 { 3 + } else if v < 10_000 { 4 + } else if v < 100_000 { 5 + } else if v < 1_000_000 { 6 + } else if v < 10_000_000 { 7 + } else if v < 100_000_000 { 8 + } else if v < 1_000_000_000 { 9 + } else { 10 } + ) } } impl EstimatedJsonEncodedSizeOf for i32 { #[rustfmt::skip] - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let v = *self; // -2_147_483_648 ..= 2_147_483_647 - if v < -999_999_999 { 11 - } else if v < -99_999_999 { 10 - } else if v < -9_999_999 { 9 - } else if v < -999_999 { 8 - } else if v < -99_999 { 7 - } else if v < -9_999 { 6 - } else if v < -999 { 5 - } else if v < -99 { 4 - } else if v < -9 { 3 - } else if v < 0 { 2 - } else if v < 10 { 1 - } else if v < 100 { 2 - } else if v < 1_000 { 3 - } else if v < 10_000 { 4 - } else if v < 100_000 { 5 - } else if v < 1_000_000 { 6 - } else if v < 10_000_000 { 7 - } else if v < 100_000_000 { 8 - } else if v < 1_000_000_000 { 9 - } else { 10 } + JsonSize::new( + if v < -999_999_999 { 11 + } else if v < -99_999_999 { 10 + } else if v < -9_999_999 { 9 + } else if v < -999_999 { 8 + } else if v < -99_999 { 7 + } else if v < -9_999 { 6 + } else if v < -999 { 5 + } else if v < -99 { 4 + } else if v < -9 { 3 + } else if v < 0 { 2 + } else if v < 10 { 1 + } else if v < 100 { 2 + } else if v < 1_000 { 3 + } else if v < 10_000 { 4 + } else if v < 100_000 { 5 + } else if v < 1_000_000 { 6 + } else if v < 10_000_000 { 7 + } else if v < 100_000_000 { 8 + } else if v < 1_000_000_000 { 9 + } else { 10 } + ) } } impl EstimatedJsonEncodedSizeOf for u64 { #[rustfmt::skip] - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let v = *self; // 0 ..= 18_446_744_073_709_551_615 - if v < 10 { 1 - } else if v < 100 { 2 - } else if v < 1_000 { 3 - } else if v < 10_000 { 4 - } else if v < 100_000 { 5 - } else if v < 1_000_000 { 6 - } else if v < 10_000_000 { 7 - } else if v < 100_000_000 { 8 - } else if v < 1_000_000_000 { 9 - } else if v < 10_000_000_000 { 10 - } else if v < 100_000_000_000 { 11 - } else if v < 1_000_000_000_000 { 12 - } else if v < 10_000_000_000_000 { 13 - } else if v < 100_000_000_000_000 { 14 - } else if v < 1_000_000_000_000_000 { 15 - } else if v < 10_000_000_000_000_000 { 16 - } else if v < 100_000_000_000_000_000 { 17 - } else if v < 1_000_000_000_000_000_000 { 18 - } else if v < 10_000_000_000_000_000_000 { 19 - } else { 20 } + JsonSize::new( + if v < 10 { 1 + } else if v < 100 { 2 + } else if v < 1_000 { 3 + } else if v < 10_000 { 4 + } else if v < 100_000 { 5 + } else if v < 1_000_000 { 6 + } else if v < 10_000_000 { 7 + } else if v < 100_000_000 { 8 + } else if v < 1_000_000_000 { 9 + } else if v < 10_000_000_000 { 10 + } else if v < 100_000_000_000 { 11 + } else if v < 1_000_000_000_000 { 12 + } else if v < 10_000_000_000_000 { 13 + } else if v < 100_000_000_000_000 { 14 + } else if v < 1_000_000_000_000_000 { 15 + } else if v < 10_000_000_000_000_000 { 16 + } else if v < 100_000_000_000_000_000 { 17 + } else if v < 1_000_000_000_000_000_000 { 18 + } else if v < 10_000_000_000_000_000_000 { 19 + } else { 20 } + ) } } impl EstimatedJsonEncodedSizeOf for i64 { #[rustfmt::skip] - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { let v = *self; // -9_223_372_036_854_775_808 ..= 9_223_372_036_854_775_807 - if v < -999_999_999_999_999_999 { 20 - } else if v < -99_999_999_999_999_999 { 19 - } else if v < -9_999_999_999_999_999 { 18 - } else if v < -999_999_999_999_999 { 17 - } else if v < -99_999_999_999_999 { 16 - } else if v < -9_999_999_999_999 { 15 - } else if v < -999_999_999_999 { 14 - } else if v < -99_999_999_999 { 13 - } else if v < -9_999_999_999 { 12 - } else if v < -999_999_999 { 11 - } else if v < -99_999_999 { 10 - } else if v < -9_999_999 { 9 - } else if v < -999_999 { 8 - } else if v < -99_999 { 7 - } else if v < -9_999 { 6 - } else if v < -999 { 5 - } else if v < -99 { 4 - } else if v < -9 { 3 - } else if v < 0 { 2 - } else if v < 10 { 1 - } else if v < 100 { 2 - } else if v < 1_000 { 3 - } else if v < 10_000 { 4 - } else if v < 100_000 { 5 - } else if v < 1_000_000 { 6 - } else if v < 10_000_000 { 7 - } else if v < 100_000_000 { 8 - } else if v < 1_000_000_000 { 9 - } else if v < 10_000_000_000 { 10 - } else if v < 100_000_000_000 { 11 - } else if v < 1_000_000_000_000 { 12 - } else if v < 10_000_000_000_000 { 13 - } else if v < 100_000_000_000_000 { 14 - } else if v < 1_000_000_000_000_000 { 15 - } else if v < 10_000_000_000_000_000 { 16 - } else if v < 100_000_000_000_000_000 { 17 - } else if v < 1_000_000_000_000_000_000 { 18 - } else { 19 } + JsonSize::new( + if v < -999_999_999_999_999_999 { 20 + } else if v < -99_999_999_999_999_999 { 19 + } else if v < -9_999_999_999_999_999 { 18 + } else if v < -999_999_999_999_999 { 17 + } else if v < -99_999_999_999_999 { 16 + } else if v < -9_999_999_999_999 { 15 + } else if v < -999_999_999_999 { 14 + } else if v < -99_999_999_999 { 13 + } else if v < -9_999_999_999 { 12 + } else if v < -999_999_999 { 11 + } else if v < -99_999_999 { 10 + } else if v < -9_999_999 { 9 + } else if v < -999_999 { 8 + } else if v < -99_999 { 7 + } else if v < -9_999 { 6 + } else if v < -999 { 5 + } else if v < -99 { 4 + } else if v < -9 { 3 + } else if v < 0 { 2 + } else if v < 10 { 1 + } else if v < 100 { 2 + } else if v < 1_000 { 3 + } else if v < 10_000 { 4 + } else if v < 100_000 { 5 + } else if v < 1_000_000 { 6 + } else if v < 10_000_000 { 7 + } else if v < 100_000_000 { 8 + } else if v < 1_000_000_000 { 9 + } else if v < 10_000_000_000 { 10 + } else if v < 100_000_000_000 { 11 + } else if v < 1_000_000_000_000 { 12 + } else if v < 10_000_000_000_000 { 13 + } else if v < 100_000_000_000_000 { 14 + } else if v < 1_000_000_000_000_000 { 15 + } else if v < 10_000_000_000_000_000 { 16 + } else if v < 100_000_000_000_000_000 { 17 + } else if v < 1_000_000_000_000_000_000 { 18 + } else { 19 } + ) } } impl EstimatedJsonEncodedSizeOf for usize { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { (*self as u64).estimated_json_encoded_size_of() } } impl EstimatedJsonEncodedSizeOf for isize { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { (*self as i64).estimated_json_encoded_size_of() } } @@ -453,7 +470,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -461,7 +478,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -469,7 +486,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -477,7 +494,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -485,7 +502,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -493,7 +510,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -501,7 +518,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -509,7 +526,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -517,7 +534,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -525,7 +542,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -538,7 +555,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -551,7 +568,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -563,7 +580,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - TestResult::from_bool(got == want.len()) + TestResult::from_bool(got == want.len().into()) } #[quickcheck] @@ -575,7 +592,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - TestResult::from_bool(got == want.len()) + TestResult::from_bool(got == want.len().into()) } #[quickcheck] @@ -583,7 +600,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -591,7 +608,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - got == want.len() + got == want.len().into() } #[quickcheck] @@ -599,7 +616,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - TestResult::from_bool(got == want.len()) + TestResult::from_bool(got == want.len().into()) } #[quickcheck] @@ -611,7 +628,7 @@ mod tests { let got = v.estimated_json_encoded_size_of(); let want = serde_json::to_string(&v).unwrap(); - TestResult::from_bool(got == want.len()) + TestResult::from_bool(got == want.len().into()) } fn is_inaccurately_counted_value(v: &Value) -> bool { diff --git a/lib/vector-core/src/event/log_event.rs b/lib/vector-core/src/event/log_event.rs index 353d0416fccaa..beb0afdec1e32 100644 --- a/lib/vector-core/src/event/log_event.rs +++ b/lib/vector-core/src/event/log_event.rs @@ -14,7 +14,10 @@ use crossbeam_utils::atomic::AtomicCell; use lookup::lookup_v2::TargetPath; use lookup::PathPrefix; use serde::{Deserialize, Serialize, Serializer}; -use vector_common::EventDataEq; +use vector_common::{ + json_size::{JsonSize, NonZeroJsonSize}, + EventDataEq, +}; use super::{ estimated_json_encoded_size_of::EstimatedJsonEncodedSizeOf, @@ -36,7 +39,7 @@ struct Inner { size_cache: AtomicCell>, #[serde(skip)] - json_encoded_size_cache: AtomicCell>, + json_encoded_size_cache: AtomicCell>, } impl Inner { @@ -73,12 +76,12 @@ impl ByteSizeOf for Inner { } impl EstimatedJsonEncodedSizeOf for Inner { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.json_encoded_size_cache .load() .unwrap_or_else(|| { let size = self.fields.estimated_json_encoded_size_of(); - let size = NonZeroUsize::new(size).expect("Size cannot be zero"); + let size = NonZeroJsonSize::new(size).expect("Size cannot be zero"); self.json_encoded_size_cache.store(Some(size)); size @@ -204,7 +207,7 @@ impl Finalizable for LogEvent { } impl EstimatedJsonEncodedSizeOf for LogEvent { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.inner.estimated_json_encoded_size_of() } } diff --git a/lib/vector-core/src/event/metric/mod.rs b/lib/vector-core/src/event/metric/mod.rs index 392e43a6cda74..141d3b28997d9 100644 --- a/lib/vector-core/src/event/metric/mod.rs +++ b/lib/vector-core/src/event/metric/mod.rs @@ -11,7 +11,7 @@ use std::{ }; use chrono::{DateTime, Utc}; -use vector_common::EventDataEq; +use vector_common::{json_size::JsonSize, EventDataEq}; use vector_config::configurable_component; use crate::{ @@ -463,10 +463,10 @@ impl ByteSizeOf for Metric { } impl EstimatedJsonEncodedSizeOf for Metric { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { // TODO: For now we're using the in-memory representation of the metric, but we'll convert // this to actually calculate the JSON encoded size in the near future. - self.size_of() + self.size_of().into() } } diff --git a/lib/vector-core/src/event/mod.rs b/lib/vector-core/src/event/mod.rs index de9e01ec4c109..04522793e3436 100644 --- a/lib/vector-core/src/event/mod.rs +++ b/lib/vector-core/src/event/mod.rs @@ -19,7 +19,7 @@ pub use r#ref::{EventMutRef, EventRef}; use serde::{Deserialize, Serialize}; pub use trace::TraceEvent; use vector_buffers::EventCount; -use vector_common::{finalization, EventDataEq}; +use vector_common::{finalization, json_size::JsonSize, EventDataEq}; pub use vrl::value::Value; #[cfg(feature = "vrl")] pub use vrl_target::{TargetEvents, VrlTarget}; @@ -65,7 +65,7 @@ impl ByteSizeOf for Event { } impl EstimatedJsonEncodedSizeOf for Event { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { match self { Event::Log(log_event) => log_event.estimated_json_encoded_size_of(), Event::Metric(metric_event) => metric_event.estimated_json_encoded_size_of(), diff --git a/lib/vector-core/src/event/trace.rs b/lib/vector-core/src/event/trace.rs index 8bc68f9880605..bd10a9e3aaca5 100644 --- a/lib/vector-core/src/event/trace.rs +++ b/lib/vector-core/src/event/trace.rs @@ -3,7 +3,7 @@ use std::{collections::BTreeMap, fmt::Debug}; use lookup::lookup_v2::TargetPath; use serde::{Deserialize, Serialize}; use vector_buffers::EventCount; -use vector_common::EventDataEq; +use vector_common::{json_size::JsonSize, EventDataEq}; use super::{ BatchNotifier, EstimatedJsonEncodedSizeOf, EventFinalizer, EventFinalizers, EventMetadata, @@ -109,7 +109,7 @@ impl ByteSizeOf for TraceEvent { } impl EstimatedJsonEncodedSizeOf for TraceEvent { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.0.estimated_json_encoded_size_of() } } diff --git a/lib/vector-core/src/stream/driver.rs b/lib/vector-core/src/stream/driver.rs index 6376be29f5d32..093a7e0c4fad0 100644 --- a/lib/vector-core/src/stream/driver.rs +++ b/lib/vector-core/src/stream/driver.rs @@ -263,6 +263,7 @@ mod tests { use tower::Service; use vector_common::{ finalization::{BatchNotifier, EventFinalizer, EventFinalizers, EventStatus, Finalizable}, + json_size::JsonSize, request_metadata::RequestMetadata, }; use vector_common::{internal_event::CountByteSize, request_metadata::MetaDescriptive}; @@ -310,7 +311,7 @@ mod tests { } fn events_sent(&self) -> CountByteSize { - CountByteSize(1, 1) + CountByteSize(1, JsonSize::new(1)) } } diff --git a/lib/vector-core/src/transform/mod.rs b/lib/vector-core/src/transform/mod.rs index b506716a2cfd0..a60cd85c8200a 100644 --- a/lib/vector-core/src/transform/mod.rs +++ b/lib/vector-core/src/transform/mod.rs @@ -4,6 +4,7 @@ use futures::{Stream, StreamExt}; use vector_common::internal_event::{ self, register, CountByteSize, EventsSent, InternalEventHandle as _, Registered, DEFAULT_OUTPUT, }; +use vector_common::json_size::JsonSize; use vector_common::EventDataEq; use crate::{ @@ -247,7 +248,7 @@ impl TransformOutputs { if let Some(primary) = self.primary_output.as_mut() { let count = buf.primary_buffer.as_ref().map_or(0, OutputBuffer::len); let byte_size = buf.primary_buffer.as_ref().map_or( - 0, + JsonSize::new(0), EstimatedJsonEncodedSizeOf::estimated_json_encoded_size_of, ); buf.primary_buffer @@ -487,7 +488,7 @@ impl EventDataEq> for OutputBuffer { } impl EstimatedJsonEncodedSizeOf for OutputBuffer { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.0 .iter() .map(EstimatedJsonEncodedSizeOf::estimated_json_encoded_size_of) diff --git a/src/components/validation/validators/component_spec/sources.rs b/src/components/validation/validators/component_spec/sources.rs index f3593116b795a..c25b217a399e4 100644 --- a/src/components/validation/validators/component_spec/sources.rs +++ b/src/components/validation/validators/component_spec/sources.rs @@ -1,6 +1,7 @@ use std::fmt::{Display, Formatter}; use bytes::BytesMut; +use vector_common::json_size::JsonSize; use vector_core::event::{Event, MetricKind}; use vector_core::EstimatedJsonEncodedSizeOf; @@ -163,7 +164,7 @@ fn validate_component_received_event_bytes_total( } } - let expected_bytes = inputs.iter().fold(0, |acc, i| { + let expected_bytes = inputs.iter().fold(JsonSize::new(0), |acc, i| { if let TestEvent::Passthrough(_) = i { let size = vec![i.clone().into_event()].estimated_json_encoded_size_of(); return acc + size; @@ -179,7 +180,7 @@ fn validate_component_received_event_bytes_total( expected_bytes, ); - if metric_bytes != expected_bytes as f64 { + if JsonSize::new(metric_bytes as usize) != expected_bytes { errs.push(format!( "{}: expected {} bytes, but received {}", SourceMetrics::EventsReceivedBytes, @@ -367,7 +368,7 @@ fn validate_component_sent_event_bytes_total( } } - let mut expected_bytes = 0; + let mut expected_bytes = JsonSize::zero(); for e in outputs { expected_bytes += vec![e].estimated_json_encoded_size_of(); } @@ -379,7 +380,7 @@ fn validate_component_sent_event_bytes_total( expected_bytes, ); - if metric_bytes != expected_bytes as f64 { + if JsonSize::new(metric_bytes as usize) != expected_bytes { errs.push(format!( "{}: expected {} bytes, but received {}.", SourceMetrics::SentEventBytesTotal, diff --git a/src/internal_events/apache_metrics.rs b/src/internal_events/apache_metrics.rs index 86b5bfbdfd61c..0e42463f960e8 100644 --- a/src/internal_events/apache_metrics.rs +++ b/src/internal_events/apache_metrics.rs @@ -1,6 +1,9 @@ use metrics::counter; -use vector_common::internal_event::{error_stage, error_type}; +use vector_common::{ + internal_event::{error_stage, error_type}, + json_size::JsonSize, +}; use vector_core::internal_event::InternalEvent; use super::prelude::http_error_code; @@ -8,7 +11,7 @@ use crate::sources::apache_metrics; #[derive(Debug)] pub struct ApacheMetricsEventsReceived<'a> { - pub byte_size: usize, + pub byte_size: JsonSize, pub count: usize, pub endpoint: &'a str, } @@ -22,7 +25,7 @@ impl<'a> InternalEvent for ApacheMetricsEventsReceived<'a> { "endpoint" => self.endpoint.to_owned(), ); counter!( - "component_received_event_bytes_total", self.byte_size as u64, + "component_received_event_bytes_total", self.byte_size.get() as u64, "endpoint" => self.endpoint.to_owned(), ); } diff --git a/src/internal_events/aws_ecs_metrics.rs b/src/internal_events/aws_ecs_metrics.rs index 92340ba484110..2ae8081697a66 100644 --- a/src/internal_events/aws_ecs_metrics.rs +++ b/src/internal_events/aws_ecs_metrics.rs @@ -1,14 +1,17 @@ use std::borrow::Cow; use metrics::counter; -use vector_common::internal_event::{error_stage, error_type}; +use vector_common::{ + internal_event::{error_stage, error_type}, + json_size::JsonSize, +}; use vector_core::internal_event::InternalEvent; use super::prelude::{http_error_code, hyper_error_code}; #[derive(Debug)] pub struct AwsEcsMetricsEventsReceived<'a> { - pub byte_size: usize, + pub byte_size: JsonSize, pub count: usize, pub endpoint: &'a str, } @@ -27,7 +30,7 @@ impl<'a> InternalEvent for AwsEcsMetricsEventsReceived<'a> { "endpoint" => self.endpoint.to_string(), ); counter!( - "component_received_event_bytes_total", self.byte_size as u64, + "component_received_event_bytes_total", self.byte_size.get() as u64, "endpoint" => self.endpoint.to_string(), ); } diff --git a/src/internal_events/docker_logs.rs b/src/internal_events/docker_logs.rs index d126b656a6773..0447624b01630 100644 --- a/src/internal_events/docker_logs.rs +++ b/src/internal_events/docker_logs.rs @@ -1,12 +1,15 @@ use bollard::errors::Error; use chrono::ParseError; use metrics::counter; -use vector_common::internal_event::{error_stage, error_type}; +use vector_common::{ + internal_event::{error_stage, error_type}, + json_size::JsonSize, +}; use vector_core::internal_event::InternalEvent; #[derive(Debug)] pub struct DockerLogsEventsReceived<'a> { - pub byte_size: usize, + pub byte_size: JsonSize, pub container_id: &'a str, pub container_name: &'a str, } @@ -24,7 +27,7 @@ impl InternalEvent for DockerLogsEventsReceived<'_> { "container_name" => self.container_name.to_owned() ); counter!( - "component_received_event_bytes_total", self.byte_size as u64, + "component_received_event_bytes_total", self.byte_size.get() as u64, "container_name" => self.container_name.to_owned() ); } diff --git a/src/internal_events/exec.rs b/src/internal_events/exec.rs index f171ca374fbb1..4fd3461a1f822 100644 --- a/src/internal_events/exec.rs +++ b/src/internal_events/exec.rs @@ -3,8 +3,9 @@ use std::time::Duration; use crate::emit; use metrics::{counter, histogram}; use tokio::time::error::Elapsed; -use vector_common::internal_event::{ - error_stage, error_type, ComponentEventsDropped, UNINTENTIONAL, +use vector_common::{ + internal_event::{error_stage, error_type, ComponentEventsDropped, UNINTENTIONAL}, + json_size::JsonSize, }; use vector_core::internal_event::InternalEvent; @@ -14,7 +15,7 @@ use super::prelude::io_error_code; pub struct ExecEventsReceived<'a> { pub count: usize, pub command: &'a str, - pub byte_size: usize, + pub byte_size: JsonSize, } impl InternalEvent for ExecEventsReceived<'_> { @@ -22,7 +23,7 @@ impl InternalEvent for ExecEventsReceived<'_> { trace!( message = "Events received.", count = self.count, - byte_size = self.byte_size, + byte_size = self.byte_size.get(), command = %self.command, ); counter!( @@ -30,7 +31,7 @@ impl InternalEvent for ExecEventsReceived<'_> { "command" => self.command.to_owned(), ); counter!( - "component_received_event_bytes_total", self.byte_size as u64, + "component_received_event_bytes_total", self.byte_size.get() as u64, "command" => self.command.to_owned(), ); } diff --git a/src/internal_events/file.rs b/src/internal_events/file.rs index 6e76be2f5d398..aedac3d74afb2 100644 --- a/src/internal_events/file.rs +++ b/src/internal_events/file.rs @@ -6,6 +6,7 @@ use crate::emit; #[cfg(any(feature = "sources-file", feature = "sources-kubernetes_logs"))] pub use self::source::*; + use vector_common::internal_event::{error_stage, error_type}; #[derive(Debug)] @@ -86,7 +87,10 @@ mod source { use super::{FileOpen, InternalEvent}; use crate::emit; - use vector_common::internal_event::{error_stage, error_type}; + use vector_common::{ + internal_event::{error_stage, error_type}, + json_size::JsonSize, + }; #[derive(Debug)] pub struct FileBytesReceived<'a> { @@ -114,7 +118,7 @@ mod source { pub struct FileEventsReceived<'a> { pub count: usize, pub file: &'a str, - pub byte_size: usize, + pub byte_size: JsonSize, } impl InternalEvent for FileEventsReceived<'_> { @@ -130,7 +134,7 @@ mod source { "file" => self.file.to_owned(), ); counter!( - "component_received_event_bytes_total", self.byte_size as u64, + "component_received_event_bytes_total", self.byte_size.get() as u64, "file" => self.file.to_owned(), ); } diff --git a/src/internal_events/http.rs b/src/internal_events/http.rs index a016998b7655b..5243cf47628f5 100644 --- a/src/internal_events/http.rs +++ b/src/internal_events/http.rs @@ -3,7 +3,10 @@ use std::error::Error; use metrics::{counter, histogram}; use vector_core::internal_event::InternalEvent; -use vector_common::internal_event::{error_stage, error_type}; +use vector_common::{ + internal_event::{error_stage, error_type}, + json_size::JsonSize, +}; #[derive(Debug)] pub struct HttpBytesReceived<'a> { @@ -31,7 +34,7 @@ impl InternalEvent for HttpBytesReceived<'_> { #[derive(Debug)] pub struct HttpEventsReceived<'a> { pub count: usize, - pub byte_size: usize, + pub byte_size: JsonSize, pub http_path: &'a str, pub protocol: &'static str, } @@ -54,7 +57,7 @@ impl InternalEvent for HttpEventsReceived<'_> { ); counter!( "component_received_event_bytes_total", - self.byte_size as u64, + self.byte_size.get() as u64, "http_path" => self.http_path.to_string(), "protocol" => self.protocol, ); diff --git a/src/internal_events/http_client_source.rs b/src/internal_events/http_client_source.rs index b5e7ec2d8b68a..b5eb27e8a3fd9 100644 --- a/src/internal_events/http_client_source.rs +++ b/src/internal_events/http_client_source.rs @@ -1,12 +1,15 @@ use metrics::counter; -use vector_common::internal_event::{error_stage, error_type}; +use vector_common::{ + internal_event::{error_stage, error_type}, + json_size::JsonSize, +}; use vector_core::internal_event::InternalEvent; use super::prelude::http_error_code; #[derive(Debug)] pub struct HttpClientEventsReceived { - pub byte_size: usize, + pub byte_size: JsonSize, pub count: usize, pub url: String, } @@ -24,7 +27,7 @@ impl InternalEvent for HttpClientEventsReceived { "uri" => self.url.clone(), ); counter!( - "component_received_event_bytes_total", self.byte_size as u64, + "component_received_event_bytes_total", self.byte_size.get() as u64, "uri" => self.url.clone(), ); } diff --git a/src/internal_events/internal_logs.rs b/src/internal_events/internal_logs.rs index f78df4e2d32fd..5d6637bfec986 100644 --- a/src/internal_events/internal_logs.rs +++ b/src/internal_events/internal_logs.rs @@ -1,4 +1,5 @@ use metrics::counter; +use vector_common::json_size::JsonSize; use vector_core::internal_event::InternalEvent; #[derive(Debug)] @@ -18,7 +19,7 @@ impl InternalEvent for InternalLogsBytesReceived { #[derive(Debug)] pub struct InternalLogsEventsReceived { - pub byte_size: usize, + pub byte_size: JsonSize, pub count: usize, } @@ -28,7 +29,7 @@ impl InternalEvent for InternalLogsEventsReceived { counter!("component_received_events_total", self.count as u64); counter!( "component_received_event_bytes_total", - self.byte_size as u64 + self.byte_size.get() as u64 ); } } diff --git a/src/internal_events/kafka.rs b/src/internal_events/kafka.rs index ab20ec0d47cde..57c25d905c60f 100644 --- a/src/internal_events/kafka.rs +++ b/src/internal_events/kafka.rs @@ -1,7 +1,10 @@ use metrics::{counter, gauge}; use vector_core::{internal_event::InternalEvent, update_counter}; -use vector_common::internal_event::{error_stage, error_type}; +use vector_common::{ + internal_event::{error_stage, error_type}, + json_size::JsonSize, +}; #[derive(Debug)] pub struct KafkaBytesReceived<'a> { @@ -32,7 +35,7 @@ impl<'a> InternalEvent for KafkaBytesReceived<'a> { #[derive(Debug)] pub struct KafkaEventsReceived<'a> { - pub byte_size: usize, + pub byte_size: JsonSize, pub count: usize, pub topic: &'a str, pub partition: i32, @@ -50,7 +53,7 @@ impl<'a> InternalEvent for KafkaEventsReceived<'a> { counter!("component_received_events_total", self.count as u64, "topic" => self.topic.to_string(), "partition" => self.partition.to_string()); counter!( "component_received_event_bytes_total", - self.byte_size as u64, + self.byte_size.get() as u64, "topic" => self.topic.to_string(), "partition" => self.partition.to_string(), ); diff --git a/src/internal_events/kubernetes_logs.rs b/src/internal_events/kubernetes_logs.rs index a008fdb4499c1..aff0109295078 100644 --- a/src/internal_events/kubernetes_logs.rs +++ b/src/internal_events/kubernetes_logs.rs @@ -3,14 +3,15 @@ use vector_core::internal_event::InternalEvent; use crate::emit; use crate::event::Event; -use vector_common::internal_event::{ - error_stage, error_type, ComponentEventsDropped, UNINTENTIONAL, +use vector_common::{ + internal_event::{error_stage, error_type, ComponentEventsDropped, UNINTENTIONAL}, + json_size::JsonSize, }; #[derive(Debug)] pub struct KubernetesLogsEventsReceived<'a> { pub file: &'a str, - pub byte_size: usize, + pub byte_size: JsonSize, pub pod_info: Option, } @@ -34,13 +35,13 @@ impl InternalEvent for KubernetesLogsEventsReceived<'_> { let pod_namespace = pod_info.namespace; counter!("component_received_events_total", 1, "pod_name" => pod_name.clone(), "pod_namespace" => pod_namespace.clone()); - counter!("component_received_event_bytes_total", self.byte_size as u64, "pod_name" => pod_name, "pod_namespace" => pod_namespace); + counter!("component_received_event_bytes_total", self.byte_size.get() as u64, "pod_name" => pod_name, "pod_namespace" => pod_namespace); } None => { counter!("component_received_events_total", 1); counter!( "component_received_event_bytes_total", - self.byte_size as u64 + self.byte_size.get() as u64 ); } } diff --git a/src/internal_events/mongodb_metrics.rs b/src/internal_events/mongodb_metrics.rs index eb585fbccf6de..1e749dc5ba8c2 100644 --- a/src/internal_events/mongodb_metrics.rs +++ b/src/internal_events/mongodb_metrics.rs @@ -2,12 +2,15 @@ use metrics::counter; use mongodb::{bson, error::Error as MongoError}; use vector_core::internal_event::InternalEvent; -use vector_common::internal_event::{error_stage, error_type}; +use vector_common::{ + internal_event::{error_stage, error_type}, + json_size::JsonSize, +}; #[derive(Debug)] pub struct MongoDbMetricsEventsReceived<'a> { pub count: usize, - pub byte_size: usize, + pub byte_size: JsonSize, pub endpoint: &'a str, } @@ -17,7 +20,7 @@ impl<'a> InternalEvent for MongoDbMetricsEventsReceived<'a> { trace!( message = "Events received.", count = self.count, - byte_size = self.byte_size, + byte_size = self.byte_size.get(), endpoint = self.endpoint, ); counter!( @@ -25,7 +28,7 @@ impl<'a> InternalEvent for MongoDbMetricsEventsReceived<'a> { "endpoint" => self.endpoint.to_owned(), ); counter!( - "component_received_event_bytes_total", self.byte_size as u64, + "component_received_event_bytes_total", self.byte_size.get() as u64, "endpoint" => self.endpoint.to_owned(), ); } diff --git a/src/internal_events/nginx_metrics.rs b/src/internal_events/nginx_metrics.rs index 46da39b6288f4..eb5adcf8485d1 100644 --- a/src/internal_events/nginx_metrics.rs +++ b/src/internal_events/nginx_metrics.rs @@ -2,11 +2,14 @@ use metrics::counter; use vector_core::internal_event::InternalEvent; use crate::sources::nginx_metrics::parser::ParseError; -use vector_common::internal_event::{error_stage, error_type}; +use vector_common::{ + internal_event::{error_stage, error_type}, + json_size::JsonSize, +}; #[derive(Debug)] pub struct NginxMetricsEventsReceived<'a> { - pub byte_size: usize, + pub byte_size: JsonSize, pub count: usize, pub endpoint: &'a str, } @@ -24,7 +27,7 @@ impl<'a> InternalEvent for NginxMetricsEventsReceived<'a> { "endpoint" => self.endpoint.to_owned(), ); counter!( - "component_received_event_bytes_total", self.byte_size as u64, + "component_received_event_bytes_total", self.byte_size.get() as u64, "endpoint" => self.endpoint.to_owned(), ); } diff --git a/src/internal_events/socket.rs b/src/internal_events/socket.rs index 58c0c3b69f4fb..daa03a27991b5 100644 --- a/src/internal_events/socket.rs +++ b/src/internal_events/socket.rs @@ -1,5 +1,8 @@ use metrics::counter; -use vector_common::internal_event::{error_stage, error_type}; +use vector_common::{ + internal_event::{error_stage, error_type}, + json_size::JsonSize, +}; use vector_core::internal_event::{ComponentEventsDropped, InternalEvent, UNINTENTIONAL}; use crate::emit; @@ -45,7 +48,7 @@ impl InternalEvent for SocketBytesReceived { #[derive(Debug)] pub struct SocketEventsReceived { pub mode: SocketMode, - pub byte_size: usize, + pub byte_size: JsonSize, pub count: usize, } @@ -55,11 +58,11 @@ impl InternalEvent for SocketEventsReceived { trace!( message = "Events received.", count = self.count, - byte_size = self.byte_size, + byte_size = self.byte_size.get(), %mode, ); counter!("component_received_events_total", self.count as u64, "mode" => mode); - counter!("component_received_event_bytes_total", self.byte_size as u64, "mode" => mode); + counter!("component_received_event_bytes_total", self.byte_size.get() as u64, "mode" => mode); } } @@ -88,14 +91,14 @@ impl InternalEvent for SocketBytesSent { pub struct SocketEventsSent { pub mode: SocketMode, pub count: u64, - pub byte_size: usize, + pub byte_size: JsonSize, } impl InternalEvent for SocketEventsSent { fn emit(self) { - trace!(message = "Events sent.", count = %self.count, byte_size = %self.byte_size); + trace!(message = "Events sent.", count = %self.count, byte_size = %self.byte_size.get()); counter!("component_sent_events_total", self.count, "mode" => self.mode.as_str()); - counter!("component_sent_event_bytes_total", self.byte_size as u64, "mode" => self.mode.as_str()); + counter!("component_sent_event_bytes_total", self.byte_size.get() as u64, "mode" => self.mode.as_str()); } } diff --git a/src/sinks/amqp/request_builder.rs b/src/sinks/amqp/request_builder.rs index 313b37626aec8..ad8fe36565453 100644 --- a/src/sinks/amqp/request_builder.rs +++ b/src/sinks/amqp/request_builder.rs @@ -13,8 +13,10 @@ use lapin::BasicProperties; use std::io; use vector_common::{ finalization::{EventFinalizers, Finalizable}, + json_size::JsonSize, request_metadata::RequestMetadata, }; +use vector_core::EstimatedJsonEncodedSizeOf; use super::{encoder::AmqpEncoder, service::AmqpRequest, sink::AmqpEvent}; @@ -23,6 +25,7 @@ pub(super) struct AmqpMetadata { routing_key: String, properties: BasicProperties, finalizers: EventFinalizers, + event_json_size: JsonSize, } /// Build the request to send to `AMQP` by using the encoder to convert it into @@ -59,6 +62,7 @@ impl RequestBuilder for AmqpRequestBuilder { routing_key: input.routing_key, properties: input.properties, finalizers: input.event.take_finalizers(), + event_json_size: input.event.estimated_json_encoded_size_of(), }; (metadata, builder, input.event) @@ -78,6 +82,7 @@ impl RequestBuilder for AmqpRequestBuilder { amqp_metadata.properties, amqp_metadata.finalizers, metadata, + amqp_metadata.event_json_size, ) } } diff --git a/src/sinks/amqp/service.rs b/src/sinks/amqp/service.rs index 000c753ec97e2..ff1e71487298a 100644 --- a/src/sinks/amqp/service.rs +++ b/src/sinks/amqp/service.rs @@ -13,6 +13,7 @@ use tower::Service; use vector_common::{ finalization::{EventFinalizers, EventStatus, Finalizable}, internal_event::CountByteSize, + json_size::JsonSize, request_metadata::{MetaDescriptive, RequestMetadata}, }; use vector_core::stream::DriverResponse; @@ -26,6 +27,7 @@ pub(super) struct AmqpRequest { properties: BasicProperties, finalizers: EventFinalizers, metadata: RequestMetadata, + event_json_size: JsonSize, } impl AmqpRequest { @@ -36,6 +38,7 @@ impl AmqpRequest { properties: BasicProperties, finalizers: EventFinalizers, metadata: RequestMetadata, + event_json_size: JsonSize, ) -> Self { Self { body, @@ -44,6 +47,7 @@ impl AmqpRequest { properties, finalizers, metadata, + event_json_size, } } } @@ -63,6 +67,7 @@ impl MetaDescriptive for AmqpRequest { /// A successful response from `AMQP`. pub(super) struct AmqpResponse { byte_size: usize, + json_size: JsonSize, } impl DriverResponse for AmqpResponse { @@ -71,7 +76,7 @@ impl DriverResponse for AmqpResponse { } fn events_sent(&self) -> CountByteSize { - CountByteSize(1, self.byte_size) + CountByteSize(1, self.json_size) } fn bytes_sent(&self) -> Option { @@ -128,14 +133,20 @@ impl Service for AmqpService { Ok(result) => match result.await { Ok(lapin::publisher_confirm::Confirmation::Nack(_)) => { warn!("Received Negative Acknowledgement from AMQP server."); - Ok(AmqpResponse { byte_size }) + Ok(AmqpResponse { + json_size: req.event_json_size, + byte_size, + }) } Err(error) => { // TODO: In due course the caller could emit these on error. emit!(AmqpAcknowledgementError { error: &error }); Err(AmqpError::AmqpAcknowledgementFailed { error }) } - Ok(_) => Ok(AmqpResponse { byte_size }), + Ok(_) => Ok(AmqpResponse { + json_size: req.event_json_size, + byte_size, + }), }, Err(error) => { // TODO: In due course the caller could emit these on error. diff --git a/src/sinks/amqp/sink.rs b/src/sinks/amqp/sink.rs index 333b325dbeab4..ff0bdeb9d0042 100644 --- a/src/sinks/amqp/sink.rs +++ b/src/sinks/amqp/sink.rs @@ -12,6 +12,7 @@ use serde::Serialize; use std::sync::Arc; use tower::ServiceBuilder; use vector_buffers::EventCount; +use vector_common::json_size::JsonSize; use vector_core::{sink::StreamSink, ByteSizeOf, EstimatedJsonEncodedSizeOf}; use super::{ @@ -49,7 +50,7 @@ impl ByteSizeOf for AmqpEvent { } impl EstimatedJsonEncodedSizeOf for AmqpEvent { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.event.estimated_json_encoded_size_of() } } diff --git a/src/sinks/aws_cloudwatch_logs/service.rs b/src/sinks/aws_cloudwatch_logs/service.rs index 505de36f77e8d..93ed0b52252a2 100644 --- a/src/sinks/aws_cloudwatch_logs/service.rs +++ b/src/sinks/aws_cloudwatch_logs/service.rs @@ -22,7 +22,7 @@ use tower::{ timeout::Timeout, Service, ServiceBuilder, ServiceExt, }; -use vector_common::request_metadata::MetaDescriptive; +use vector_common::{json_size::JsonSize, request_metadata::MetaDescriptive}; use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; use crate::{ @@ -99,7 +99,7 @@ impl From> for CloudwatchError { #[derive(Debug)] pub struct CloudwatchResponse { events_count: usize, - events_byte_size: usize, + events_byte_size: JsonSize, } impl crate::sinks::util::sink::Response for CloudwatchResponse { @@ -158,7 +158,7 @@ impl Service for CloudwatchLogsPartitionSvc { fn call(&mut self, req: BatchCloudwatchRequest) -> Self::Future { let events_count = req.get_metadata().event_count(); - let events_byte_size = req.get_metadata().events_byte_size(); + let events_byte_size = req.get_metadata().events_estimated_json_encoded_byte_size(); let key = req.key; let events = req diff --git a/src/sinks/aws_cloudwatch_metrics/mod.rs b/src/sinks/aws_cloudwatch_metrics/mod.rs index e9bb6fe3c792f..accc041b54c32 100644 --- a/src/sinks/aws_cloudwatch_metrics/mod.rs +++ b/src/sinks/aws_cloudwatch_metrics/mod.rs @@ -15,7 +15,7 @@ use futures_util::{future, future::BoxFuture}; use std::task::{Context, Poll}; use tower::Service; use vector_config::configurable_component; -use vector_core::{sink::VectorSink, EstimatedJsonEncodedSizeOf}; +use vector_core::{sink::VectorSink, ByteSizeOf, EstimatedJsonEncodedSizeOf}; use crate::{ aws::{ @@ -236,7 +236,8 @@ impl CloudWatchMetricsSvc { .sink_map_err(|error| error!(message = "Fatal CloudwatchMetrics sink error.", %error)) .with_flat_map(move |event: Event| { stream::iter({ - let byte_size = event.estimated_json_encoded_size_of(); + let byte_size = event.allocated_bytes(); + let json_byte_size = event.estimated_json_encoded_size_of(); normalizer.normalize(event.into_metric()).map(|mut metric| { let namespace = metric .take_namespace() @@ -245,6 +246,7 @@ impl CloudWatchMetricsSvc { Ok(EncodedEvent::new( PartitionInnerBuffer::new(metric, namespace), byte_size, + json_byte_size, )) }) }) diff --git a/src/sinks/aws_kinesis/service.rs b/src/sinks/aws_kinesis/service.rs index a1058806288c4..9ceeb8c8d4938 100644 --- a/src/sinks/aws_kinesis/service.rs +++ b/src/sinks/aws_kinesis/service.rs @@ -7,7 +7,7 @@ use aws_smithy_client::SdkError; use aws_types::region::Region; use futures::future::BoxFuture; use tower::Service; -use vector_common::request_metadata::MetaDescriptive; +use vector_common::{json_size::JsonSize, request_metadata::MetaDescriptive}; use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; use super::{ @@ -41,7 +41,7 @@ where pub struct KinesisResponse { count: usize, - events_byte_size: usize, + events_byte_size: JsonSize, } impl DriverResponse for KinesisResponse { @@ -72,7 +72,9 @@ where // Emission of internal events for errors and dropped events is handled upstream by the caller. fn call(&mut self, requests: BatchKinesisRequest) -> Self::Future { - let events_byte_size = requests.get_metadata().events_byte_size(); + let events_byte_size = requests + .get_metadata() + .events_estimated_json_encoded_byte_size(); let count = requests.get_metadata().event_count(); let records = requests diff --git a/src/sinks/aws_sqs/request_builder.rs b/src/sinks/aws_sqs/request_builder.rs index 22e1340be1a1f..03d30f34f3737 100644 --- a/src/sinks/aws_sqs/request_builder.rs +++ b/src/sinks/aws_sqs/request_builder.rs @@ -130,7 +130,7 @@ pub(crate) struct SendMessageEntry { pub message_deduplication_id: Option, pub queue_url: String, finalizers: EventFinalizers, - metadata: RequestMetadata, + pub metadata: RequestMetadata, } impl ByteSizeOf for SendMessageEntry { diff --git a/src/sinks/aws_sqs/service.rs b/src/sinks/aws_sqs/service.rs index 15ffd155e0072..38b20fddfe21f 100644 --- a/src/sinks/aws_sqs/service.rs +++ b/src/sinks/aws_sqs/service.rs @@ -4,6 +4,7 @@ use aws_sdk_sqs::{error::SendMessageError, types::SdkError, Client as SqsClient} use futures::{future::BoxFuture, TryFutureExt}; use tower::Service; use tracing::Instrument; +use vector_common::json_size::JsonSize; use vector_core::{ event::EventStatus, internal_event::CountByteSize, stream::DriverResponse, ByteSizeOf, }; @@ -44,7 +45,10 @@ impl Service for SqsService { .set_message_deduplication_id(entry.message_deduplication_id) .queue_url(entry.queue_url) .send() - .map_ok(|_| SendMessageResponse { byte_size }) + .map_ok(|_| SendMessageResponse { + byte_size, + json_byte_size: entry.metadata.events_estimated_json_encoded_byte_size(), + }) .instrument(info_span!("request").or_current()) .await }) @@ -53,6 +57,7 @@ impl Service for SqsService { pub(crate) struct SendMessageResponse { byte_size: usize, + json_byte_size: JsonSize, } impl DriverResponse for SendMessageResponse { @@ -61,6 +66,10 @@ impl DriverResponse for SendMessageResponse { } fn events_sent(&self) -> CountByteSize { - CountByteSize(1, self.byte_size) + CountByteSize(1, self.json_byte_size) + } + + fn bytes_sent(&self) -> Option { + Some(self.byte_size) } } diff --git a/src/sinks/azure_blob/request_builder.rs b/src/sinks/azure_blob/request_builder.rs index f3e85cc5a8e7c..2ec67a7c758eb 100644 --- a/src/sinks/azure_blob/request_builder.rs +++ b/src/sinks/azure_blob/request_builder.rs @@ -3,7 +3,7 @@ use chrono::Utc; use codecs::encoding::Framer; use uuid::Uuid; use vector_common::request_metadata::RequestMetadata; -use vector_core::ByteSizeOf; +use vector_core::EstimatedJsonEncodedSizeOf; use crate::{ codecs::{Encoder, Transformer}, @@ -51,7 +51,7 @@ impl RequestBuilder<(String, Vec)> for AzureBlobRequestOptions { let azure_metadata = AzureBlobMetadata { partition_key, count: events.len(), - byte_size: events.size_of(), + byte_size: events.estimated_json_encoded_size_of(), finalizers, }; diff --git a/src/sinks/azure_common/config.rs b/src/sinks/azure_common/config.rs index 2b3b663d89753..56ed8d210f694 100644 --- a/src/sinks/azure_common/config.rs +++ b/src/sinks/azure_common/config.rs @@ -8,7 +8,10 @@ use bytes::Bytes; use futures::FutureExt; use http::StatusCode; use snafu::Snafu; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::{ + json_size::JsonSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; use crate::{ @@ -41,7 +44,7 @@ impl MetaDescriptive for AzureBlobRequest { pub struct AzureBlobMetadata { pub partition_key: String, pub count: usize, - pub byte_size: usize, + pub byte_size: JsonSize, pub finalizers: EventFinalizers, } @@ -62,7 +65,7 @@ impl RetryLogic for AzureBlobRetryLogic { pub struct AzureBlobResponse { pub inner: PutBlockBlobResponse, pub count: usize, - pub events_byte_size: usize, + pub events_byte_size: JsonSize, pub byte_size: usize, } diff --git a/src/sinks/blackhole/sink.rs b/src/sinks/blackhole/sink.rs index 92f90377931b1..09fb5f6353d94 100644 --- a/src/sinks/blackhole/sink.rs +++ b/src/sinks/blackhole/sink.rs @@ -92,10 +92,10 @@ impl StreamSink for BlackholeSink { _ = self.total_events.fetch_add(events.len(), Ordering::AcqRel); _ = self .total_raw_bytes - .fetch_add(message_len, Ordering::AcqRel); + .fetch_add(message_len.get(), Ordering::AcqRel); events_sent.emit(CountByteSize(events.len(), message_len)); - bytes_sent.emit(ByteSize(message_len)); + bytes_sent.emit(ByteSize(message_len.get())); } // Notify the reporting task to shutdown. diff --git a/src/sinks/databend/service.rs b/src/sinks/databend/service.rs index 23da26f560d41..473a2d3220ba3 100644 --- a/src/sinks/databend/service.rs +++ b/src/sinks/databend/service.rs @@ -85,7 +85,7 @@ impl DriverResponse for DatabendResponse { fn events_sent(&self) -> CountByteSize { CountByteSize( self.metadata.event_count(), - self.metadata.events_byte_size(), + self.metadata.events_estimated_json_encoded_byte_size(), ) } diff --git a/src/sinks/datadog/events/service.rs b/src/sinks/datadog/events/service.rs index 25669c45a9ff0..693929d62e961 100644 --- a/src/sinks/datadog/events/service.rs +++ b/src/sinks/datadog/events/service.rs @@ -8,7 +8,7 @@ use futures::{ use http::Request; use hyper::Body; use tower::{Service, ServiceExt}; -use vector_common::request_metadata::MetaDescriptive; +use vector_common::{json_size::JsonSize, request_metadata::MetaDescriptive}; use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; use crate::{ @@ -23,7 +23,7 @@ use crate::{ pub struct DatadogEventsResponse { pub(self) event_status: EventStatus, pub http_status: http::StatusCode, - pub event_byte_size: usize, + pub event_byte_size: JsonSize, } impl DriverResponse for DatadogEventsResponse { @@ -90,7 +90,7 @@ impl Service for DatadogEventsService { Box::pin(async move { http_service.ready().await?; - let event_byte_size = req.get_metadata().events_byte_size(); + let event_byte_size = req.get_metadata().events_estimated_json_encoded_byte_size(); let http_response = http_service.call(req).await?; let event_status = if http_response.is_successful() { EventStatus::Delivered diff --git a/src/sinks/datadog/logs/service.rs b/src/sinks/datadog/logs/service.rs index 5401165df873f..06bc923ad3e36 100644 --- a/src/sinks/datadog/logs/service.rs +++ b/src/sinks/datadog/logs/service.rs @@ -14,7 +14,10 @@ use hyper::Body; use indexmap::IndexMap; use tower::Service; use tracing::Instrument; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::{ + json_size::JsonSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; use vector_core::{ event::{EventFinalizers, EventStatus, Finalizable}, internal_event::CountByteSize, @@ -65,7 +68,7 @@ impl MetaDescriptive for LogApiRequest { pub struct LogApiResponse { event_status: EventStatus, count: usize, - events_byte_size: usize, + events_byte_size: JsonSize, raw_byte_size: usize, } @@ -137,7 +140,9 @@ impl Service for LogApiService { }; let count = request.get_metadata().event_count(); - let events_byte_size = request.get_metadata().events_byte_size(); + let events_byte_size = request + .get_metadata() + .events_estimated_json_encoded_byte_size(); let raw_byte_size = request.uncompressed_size; let mut http_request = http_request.header(CONTENT_LENGTH, request.body.len()); diff --git a/src/sinks/datadog/metrics/request_builder.rs b/src/sinks/datadog/metrics/request_builder.rs index c6b287b39ccc1..64b1226b661bf 100644 --- a/src/sinks/datadog/metrics/request_builder.rs +++ b/src/sinks/datadog/metrics/request_builder.rs @@ -3,7 +3,10 @@ use serde_json::error::Category; use snafu::Snafu; use std::{num::NonZeroUsize, sync::Arc}; use vector_common::request_metadata::RequestMetadata; -use vector_core::event::{EventFinalizers, Finalizable, Metric}; +use vector_core::{ + event::{EventFinalizers, Finalizable, Metric}, + EstimatedJsonEncodedSizeOf, +}; use super::{ config::{DatadogMetricsEndpoint, DatadogMetricsEndpointConfiguration}, @@ -209,6 +212,7 @@ impl IncrementalRequestBuilder<((Option>, DatadogMetricsEndpoint), Vec< if n > 0 { match encoder.finish() { Ok((payload, mut metrics, raw_bytes_written)) => { + let json_size = metrics.estimated_json_encoded_size_of(); let finalizers = metrics.take_finalizers(); let metadata = DDMetricsMetadata { api_key: api_key.as_ref().map(Arc::clone), @@ -219,7 +223,7 @@ impl IncrementalRequestBuilder<((Option>, DatadogMetricsEndpoint), Vec< let builder = RequestMetadataBuilder::new( metrics.len(), raw_bytes_written, - raw_bytes_written, + json_size, ); let bytes_len = NonZeroUsize::new(payload.len()) .expect("payload should never be zero length"); @@ -329,6 +333,7 @@ fn encode_now_or_never( encoder .finish() .map(|(payload, mut processed, raw_bytes_written)| { + let json_size = processed.estimated_json_encoded_size_of(); let finalizers = processed.take_finalizers(); let ddmetrics_metadata = DDMetricsMetadata { api_key, @@ -336,8 +341,7 @@ fn encode_now_or_never( finalizers, raw_bytes: raw_bytes_written, }; - let builder = - RequestMetadataBuilder::new(metrics_len, raw_bytes_written, raw_bytes_written); + let builder = RequestMetadataBuilder::new(metrics_len, raw_bytes_written, json_size); let bytes_len = NonZeroUsize::new(payload.len()).expect("payload should never be zero length"); let request_metadata = builder.with_request_size(bytes_len); diff --git a/src/sinks/datadog/metrics/service.rs b/src/sinks/datadog/metrics/service.rs index f107c7413bd9c..d15716b99d8ad 100644 --- a/src/sinks/datadog/metrics/service.rs +++ b/src/sinks/datadog/metrics/service.rs @@ -10,7 +10,10 @@ use http::{ use hyper::Body; use snafu::ResultExt; use tower::Service; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::{ + json_size::JsonSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; use vector_core::{ event::{EventFinalizers, EventStatus, Finalizable}, internal_event::CountByteSize, @@ -123,7 +126,7 @@ pub struct DatadogMetricsResponse { status_code: StatusCode, body: Bytes, batch_size: usize, - byte_size: usize, + byte_size: JsonSize, raw_byte_size: usize, } @@ -182,7 +185,9 @@ impl Service for DatadogMetricsService { let api_key = self.api_key.clone(); Box::pin(async move { - let byte_size = request.get_metadata().events_byte_size(); + let byte_size = request + .get_metadata() + .events_estimated_json_encoded_byte_size(); let batch_size = request.get_metadata().event_count(); let raw_byte_size = request.raw_bytes; diff --git a/src/sinks/datadog/traces/request_builder.rs b/src/sinks/datadog/traces/request_builder.rs index 80613314caef7..dbe57714995fc 100644 --- a/src/sinks/datadog/traces/request_builder.rs +++ b/src/sinks/datadog/traces/request_builder.rs @@ -9,7 +9,10 @@ use bytes::Bytes; use prost::Message; use snafu::Snafu; use vector_common::request_metadata::RequestMetadata; -use vector_core::event::{EventFinalizers, Finalizable}; +use vector_core::{ + event::{EventFinalizers, Finalizable}, + EstimatedJsonEncodedSizeOf, +}; use super::{ apm_stats::{compute_apm_stats, Aggregator}, @@ -122,6 +125,7 @@ impl IncrementalRequestBuilder<(PartitionKey, Vec)> for DatadogTracesRequ .for_each(|r| match r { Ok((payload, mut processed)) => { let uncompressed_size = payload.len(); + let json_size = processed.estimated_json_encoded_size_of(); let metadata = DDTracesMetadata { api_key: key .api_key @@ -139,11 +143,8 @@ impl IncrementalRequestBuilder<(PartitionKey, Vec)> for DatadogTracesRequ let bytes = compressor.into_inner().freeze(); // build RequestMetadata - let builder = RequestMetadataBuilder::new( - n, - uncompressed_size, - uncompressed_size, - ); + let builder = + RequestMetadataBuilder::new(n, uncompressed_size, json_size); let bytes_len = NonZeroUsize::new(bytes.len()) .expect("payload should never be zero length"); let request_metadata = builder.with_request_size(bytes_len); diff --git a/src/sinks/datadog/traces/service.rs b/src/sinks/datadog/traces/service.rs index 509c909a87d4d..66e46b8075ca1 100644 --- a/src/sinks/datadog/traces/service.rs +++ b/src/sinks/datadog/traces/service.rs @@ -9,7 +9,10 @@ use http::{Request, StatusCode, Uri}; use hyper::Body; use snafu::ResultExt; use tower::Service; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::{ + json_size::JsonSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; use vector_core::{ event::{EventFinalizers, EventStatus, Finalizable}, internal_event::CountByteSize, @@ -91,7 +94,7 @@ pub struct TraceApiResponse { status_code: StatusCode, body: Bytes, batch_size: usize, - byte_size: usize, + byte_size: JsonSize, uncompressed_size: usize, } @@ -146,7 +149,9 @@ impl Service for TraceApiService { let client = self.client.clone(); Box::pin(async move { - let byte_size = request.get_metadata().events_byte_size(); + let byte_size = request + .get_metadata() + .events_estimated_json_encoded_byte_size(); let batch_size = request.get_metadata().event_count(); let uncompressed_size = request.uncompressed_size; let http_request = request.into_http_request().context(BuildRequestSnafu)?; diff --git a/src/sinks/datadog_archives.rs b/src/sinks/datadog_archives.rs index 6be169c12a62a..e439481c076a1 100644 --- a/src/sinks/datadog_archives.rs +++ b/src/sinks/datadog_archives.rs @@ -31,7 +31,7 @@ use vector_config::{configurable_component, NamedComponent}; use vector_core::{ config::AcknowledgementsConfig, event::{Event, EventFinalizers, Finalizable}, - schema, ByteSizeOf, + schema, EstimatedJsonEncodedSizeOf, }; use vrl::value::Kind; @@ -816,7 +816,7 @@ impl RequestBuilder<(String, Vec)> for DatadogAzureRequestBuilder { let metadata = AzureBlobMetadata { partition_key, count: events.len(), - byte_size: events.size_of(), + byte_size: events.estimated_json_encoded_size_of(), finalizers, }; let builder = RequestMetadataBuilder::from_events(&events); diff --git a/src/sinks/elasticsearch/encoder.rs b/src/sinks/elasticsearch/encoder.rs index 0558e8c44684c..5d27ba891b596 100644 --- a/src/sinks/elasticsearch/encoder.rs +++ b/src/sinks/elasticsearch/encoder.rs @@ -2,6 +2,7 @@ use std::{io, io::Write}; use serde::Serialize; use vector_buffers::EventCount; +use vector_common::json_size::JsonSize; use vector_core::{event::Event, ByteSizeOf, EstimatedJsonEncodedSizeOf}; use crate::{ @@ -34,7 +35,7 @@ impl ByteSizeOf for ProcessedEvent { } impl EstimatedJsonEncodedSizeOf for ProcessedEvent { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.log.estimated_json_encoded_size_of() } } diff --git a/src/sinks/elasticsearch/request_builder.rs b/src/sinks/elasticsearch/request_builder.rs index f6327f34ca50b..c5918df6e384c 100644 --- a/src/sinks/elasticsearch/request_builder.rs +++ b/src/sinks/elasticsearch/request_builder.rs @@ -1,6 +1,6 @@ use bytes::Bytes; -use vector_common::request_metadata::RequestMetadata; -use vector_core::ByteSizeOf; +use vector_common::{json_size::JsonSize, request_metadata::RequestMetadata}; +use vector_core::EstimatedJsonEncodedSizeOf; use crate::{ event::{EventFinalizers, Finalizable}, @@ -25,7 +25,7 @@ pub struct ElasticsearchRequestBuilder { pub struct Metadata { finalizers: EventFinalizers, batch_size: usize, - events_byte_size: usize, + events_byte_size: JsonSize, } impl RequestBuilder> for ElasticsearchRequestBuilder { @@ -50,9 +50,9 @@ impl RequestBuilder> for ElasticsearchRequestBuilder { ) -> (Self::Metadata, RequestMetadataBuilder, Self::Events) { let events_byte_size = events .iter() - .map(|x| x.log.size_of()) + .map(|x| x.log.estimated_json_encoded_size_of()) .reduce(|a, b| a + b) - .unwrap_or(0); + .unwrap_or(JsonSize::zero()); let metadata_builder = RequestMetadataBuilder::from_events(&events); diff --git a/src/sinks/elasticsearch/retry.rs b/src/sinks/elasticsearch/retry.rs index bd6035087c91e..4f5a6e0c73ed6 100644 --- a/src/sinks/elasticsearch/retry.rs +++ b/src/sinks/elasticsearch/retry.rs @@ -160,6 +160,7 @@ mod tests { use bytes::Bytes; use http::Response; use similar_asserts::assert_eq; + use vector_common::json_size::JsonSize; use super::*; use crate::event::EventStatus; @@ -179,7 +180,7 @@ mod tests { http_response: response, event_status: EventStatus::Rejected, batch_size: 1, - events_byte_size: 1, + events_byte_size: JsonSize::new(1), }), RetryAction::DontRetry(_) )); @@ -200,7 +201,7 @@ mod tests { http_response: response, event_status: EventStatus::Errored, batch_size: 1, - events_byte_size: 1, + events_byte_size: JsonSize::new(1), }), RetryAction::Retry(_) )); diff --git a/src/sinks/elasticsearch/service.rs b/src/sinks/elasticsearch/service.rs index b205f10f810e6..1baea684c07b2 100644 --- a/src/sinks/elasticsearch/service.rs +++ b/src/sinks/elasticsearch/service.rs @@ -11,7 +11,10 @@ use futures::future::BoxFuture; use http::{Response, Uri}; use hyper::{service::Service, Body, Request}; use tower::ServiceExt; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::{ + json_size::JsonSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; use vector_core::{internal_event::CountByteSize, stream::DriverResponse, ByteSizeOf}; use crate::sinks::elasticsearch::sign_request; @@ -31,7 +34,7 @@ pub struct ElasticsearchRequest { pub payload: Bytes, pub finalizers: EventFinalizers, pub batch_size: usize, - pub events_byte_size: usize, + pub events_byte_size: JsonSize, pub metadata: RequestMetadata, } @@ -146,7 +149,7 @@ pub struct ElasticsearchResponse { pub http_response: Response, pub event_status: EventStatus, pub batch_size: usize, - pub events_byte_size: usize, + pub events_byte_size: JsonSize, } impl DriverResponse for ElasticsearchResponse { diff --git a/src/sinks/influxdb/metrics.rs b/src/sinks/influxdb/metrics.rs index 41c8559963cc5..75bde8fde1238 100644 --- a/src/sinks/influxdb/metrics.rs +++ b/src/sinks/influxdb/metrics.rs @@ -7,7 +7,7 @@ use tower::Service; use vector_config::configurable_component; use vector_core::{ event::metric::{MetricSketch, MetricTags, Quantile}, - ByteSizeOf, + ByteSizeOf, EstimatedJsonEncodedSizeOf, }; use crate::{ @@ -184,9 +184,11 @@ impl InfluxDbSvc { .with_flat_map(move |event: Event| { stream::iter({ let byte_size = event.size_of(); + let json_size = event.estimated_json_encoded_size_of(); + normalizer .normalize(event.into_metric()) - .map(|metric| Ok(EncodedEvent::new(metric, byte_size))) + .map(|metric| Ok(EncodedEvent::new(metric, byte_size, json_size))) }) }) .sink_map_err(|error| error!(message = "Fatal influxdb sink error.", %error)); diff --git a/src/sinks/kafka/service.rs b/src/sinks/kafka/service.rs index d79ab6eb808b2..89a1fb5ce6827 100644 --- a/src/sinks/kafka/service.rs +++ b/src/sinks/kafka/service.rs @@ -9,7 +9,10 @@ use rdkafka::{ util::Timeout, }; use tower::Service; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::{ + json_size::JsonSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; use vector_core::{ internal_event::{ ByteSize, BytesSent, CountByteSize, InternalEventHandle as _, Protocol, Registered, @@ -37,7 +40,7 @@ pub struct KafkaRequestMetadata { } pub struct KafkaResponse { - event_byte_size: usize, + event_byte_size: JsonSize, } impl DriverResponse for KafkaResponse { @@ -90,7 +93,9 @@ impl Service for KafkaService { let this = self.clone(); Box::pin(async move { - let event_byte_size = request.get_metadata().events_byte_size(); + let event_byte_size = request + .get_metadata() + .events_estimated_json_encoded_byte_size(); let mut record = FutureRecord::to(&request.metadata.topic).payload(request.body.as_ref()); diff --git a/src/sinks/loki/event.rs b/src/sinks/loki/event.rs index 93d92b8498ab1..76389e2c64fb9 100644 --- a/src/sinks/loki/event.rs +++ b/src/sinks/loki/event.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, io}; use bytes::Bytes; use serde::{ser::SerializeSeq, Serialize}; use vector_buffers::EventCount; +use vector_common::json_size::JsonSize; use vector_core::{ event::{EventFinalizers, Finalizable}, ByteSizeOf, EstimatedJsonEncodedSizeOf, @@ -140,10 +141,10 @@ impl ByteSizeOf for LokiEvent { /// This implementation approximates the `Serialize` implementation below, without any allocations. impl EstimatedJsonEncodedSizeOf for LokiEvent { - fn estimated_json_encoded_size_of(&self) -> usize { - static BRACKETS_SIZE: usize = 2; - static COLON_SIZE: usize = 1; - static QUOTES_SIZE: usize = 2; + fn estimated_json_encoded_size_of(&self) -> JsonSize { + static BRACKETS_SIZE: JsonSize = JsonSize::new(2); + static COLON_SIZE: JsonSize = JsonSize::new(1); + static QUOTES_SIZE: JsonSize = JsonSize::new(2); BRACKETS_SIZE + QUOTES_SIZE @@ -185,7 +186,7 @@ impl ByteSizeOf for LokiRecord { } impl EstimatedJsonEncodedSizeOf for LokiRecord { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.event.estimated_json_encoded_size_of() } } diff --git a/src/sinks/opendal_common.rs b/src/sinks/opendal_common.rs index dc3f1aefcd991..ba961607be438 100644 --- a/src/sinks/opendal_common.rs +++ b/src/sinks/opendal_common.rs @@ -19,13 +19,14 @@ use tower::Service; use tracing::Instrument; use vector_common::{ finalization::{EventStatus, Finalizable}, + json_size::JsonSize, request_metadata::{MetaDescriptive, RequestMetadata}, }; use vector_core::{ internal_event::CountByteSize, sink::StreamSink, stream::{BatcherSettings, DriverResponse}, - ByteSizeOf, + EstimatedJsonEncodedSizeOf, }; use crate::{ @@ -168,7 +169,7 @@ impl Finalizable for OpenDalRequest { pub struct OpenDalMetadata { pub partition_key: String, pub count: usize, - pub byte_size: usize, + pub byte_size: JsonSize, pub finalizers: EventFinalizers, } @@ -204,7 +205,7 @@ impl RequestBuilder<(String, Vec)> for OpenDalRequestBuilder { let opendal_metadata = OpenDalMetadata { partition_key, count: events.len(), - byte_size: events.size_of(), + byte_size: events.estimated_json_encoded_size_of(), finalizers, }; @@ -237,7 +238,7 @@ impl RequestBuilder<(String, Vec)> for OpenDalRequestBuilder { #[derive(Debug)] pub struct OpenDalResponse { pub count: usize, - pub events_byte_size: usize, + pub events_byte_size: JsonSize, pub byte_size: usize, } diff --git a/src/sinks/prometheus/remote_write.rs b/src/sinks/prometheus/remote_write.rs index 9d4d7ba9f37e5..2692253780c5a 100644 --- a/src/sinks/prometheus/remote_write.rs +++ b/src/sinks/prometheus/remote_write.rs @@ -10,7 +10,7 @@ use prost::Message; use snafu::{ResultExt, Snafu}; use tower::Service; use vector_config::configurable_component; -use vector_core::ByteSizeOf; +use vector_core::{ByteSizeOf, EstimatedJsonEncodedSizeOf}; use super::collector::{self, MetricCollector as _}; use crate::{ @@ -200,6 +200,8 @@ impl SinkConfig for RemoteWriteConfig { .partition_sink(HttpRetryLogic, service, buffer, batch.timeout) .with_flat_map(move |event: Event| { let byte_size = event.size_of(); + let json_size = event.estimated_json_encoded_size_of(); + stream::iter(normalizer.normalize(event.into_metric()).map(|event| { let tenant_id = tenant_id.as_ref().and_then(|template| { template @@ -217,6 +219,7 @@ impl SinkConfig for RemoteWriteConfig { Ok(EncodedEvent::new( PartitionInnerBuffer::new(event, key), byte_size, + json_size, )) })) }) diff --git a/src/sinks/pulsar/service.rs b/src/sinks/pulsar/service.rs index d723748fd1c7a..bb61dcee92ed3 100644 --- a/src/sinks/pulsar/service.rs +++ b/src/sinks/pulsar/service.rs @@ -14,7 +14,10 @@ use vector_core::stream::DriverResponse; use crate::event::{EventFinalizers, EventStatus, Finalizable}; use crate::internal_events::PulsarSendingError; use crate::sinks::pulsar::request_builder::PulsarMetadata; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::{ + json_size::JsonSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; #[derive(Clone)] pub(super) struct PulsarRequest { @@ -24,7 +27,8 @@ pub(super) struct PulsarRequest { } pub struct PulsarResponse { - event_byte_size: usize, + byte_size: usize, + event_byte_size: JsonSize, } impl DriverResponse for PulsarResponse { @@ -37,7 +41,7 @@ impl DriverResponse for PulsarResponse { } fn bytes_sent(&self) -> Option { - Some(self.event_byte_size) + Some(self.byte_size) } } @@ -102,6 +106,7 @@ impl Service for PulsarService { Box::pin(async move { let body = request.body.clone(); + let byte_size = request.body.len(); let mut properties = HashMap::new(); if let Some(props) = request.metadata.properties { @@ -134,7 +139,10 @@ impl Service for PulsarService { match fut { Ok(resp) => match resp.await { Ok(_) => Ok(PulsarResponse { - event_byte_size: request.request_metadata.events_byte_size(), + byte_size, + event_byte_size: request + .request_metadata + .events_estimated_json_encoded_byte_size(), }), Err(e) => { emit!(PulsarSendingError { diff --git a/src/sinks/pulsar/sink.rs b/src/sinks/pulsar/sink.rs index 0d7e13ec6c84d..c8ab0bcae256c 100644 --- a/src/sinks/pulsar/sink.rs +++ b/src/sinks/pulsar/sink.rs @@ -14,7 +14,7 @@ use crate::{ template::Template, }; use vector_buffers::EventCount; -use vector_common::byte_size_of::ByteSizeOf; +use vector_common::{byte_size_of::ByteSizeOf, json_size::JsonSize}; use vector_core::{ event::{EstimatedJsonEncodedSizeOf, LogEvent}, sink::StreamSink, @@ -76,7 +76,7 @@ impl ByteSizeOf for PulsarEvent { } impl EstimatedJsonEncodedSizeOf for PulsarEvent { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.event.estimated_json_encoded_size_of() } } diff --git a/src/sinks/redis.rs b/src/sinks/redis.rs index ff08a98f683a9..084ff21ffd0e3 100644 --- a/src/sinks/redis.rs +++ b/src/sinks/redis.rs @@ -290,12 +290,13 @@ fn encode_event( transformer.transform(&mut event); let mut bytes = BytesMut::new(); + let byte_size = bytes.len(); // Errors are handled by `Encoder`. encoder.encode(event, &mut bytes).ok()?; let value = bytes.freeze(); - let event = EncodedEvent::new(RedisKvEntry { key, value }, event_byte_size); + let event = EncodedEvent::new(RedisKvEntry { key, value }, byte_size, event_byte_size); Some(event) } diff --git a/src/sinks/s3_common/service.rs b/src/sinks/s3_common/service.rs index 52cfc71f637d0..c9c12ac4bcb69 100644 --- a/src/sinks/s3_common/service.rs +++ b/src/sinks/s3_common/service.rs @@ -11,7 +11,10 @@ use futures::future::BoxFuture; use md5::Digest; use tower::Service; use tracing::Instrument; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::{ + json_size::JsonSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; use vector_core::{ event::{EventFinalizers, EventStatus, Finalizable}, internal_event::CountByteSize, @@ -53,7 +56,7 @@ pub struct S3Metadata { #[derive(Debug)] pub struct S3Response { count: usize, - events_byte_size: usize, + events_byte_size: JsonSize, } impl DriverResponse for S3Response { @@ -100,7 +103,9 @@ impl Service for S3Service { // Emission of internal events for errors and dropped events is handled upstream by the caller. fn call(&mut self, request: S3Request) -> Self::Future { let count = request.get_metadata().event_count(); - let events_byte_size = request.get_metadata().events_byte_size(); + let events_byte_size = request + .get_metadata() + .events_estimated_json_encoded_byte_size(); let options = request.options; diff --git a/src/sinks/sematext/metrics.rs b/src/sinks/sematext/metrics.rs index b362808264829..c67898f307d59 100644 --- a/src/sinks/sematext/metrics.rs +++ b/src/sinks/sematext/metrics.rs @@ -8,7 +8,7 @@ use indoc::indoc; use tower::Service; use vector_common::sensitive_string::SensitiveString; use vector_config::configurable_component; -use vector_core::EstimatedJsonEncodedSizeOf; +use vector_core::{ByteSizeOf, EstimatedJsonEncodedSizeOf}; use super::Region; use crate::{ @@ -185,10 +185,11 @@ impl SematextMetricsService { ) .with_flat_map(move |event: Event| { stream::iter({ - let byte_size = event.estimated_json_encoded_size_of(); + let byte_size = event.size_of(); + let json_byte_size = event.estimated_json_encoded_size_of(); normalizer .normalize(event.into_metric()) - .map(|item| Ok(EncodedEvent::new(item, byte_size))) + .map(|item| Ok(EncodedEvent::new(item, byte_size, json_byte_size))) }) }) .sink_map_err(|error| error!(message = "Fatal sematext metrics sink error.", %error)); @@ -256,7 +257,8 @@ fn encode_events( metrics: Vec, ) -> EncodedEvent { let mut output = BytesMut::new(); - let byte_size = metrics.estimated_json_encoded_size_of(); + let byte_size = metrics.size_of(); + let json_byte_size = metrics.estimated_json_encoded_size_of(); for metric in metrics.into_iter() { let (series, data, _metadata) = metric.into_parts(); let namespace = series @@ -292,7 +294,7 @@ fn encode_events( if !output.is_empty() { output.truncate(output.len() - 1); } - EncodedEvent::new(output.freeze(), byte_size) + EncodedEvent::new(output.freeze(), byte_size, json_byte_size) } fn to_fields(label: String, value: f64) -> HashMap { diff --git a/src/sinks/splunk_hec/common/response.rs b/src/sinks/splunk_hec/common/response.rs index 9c3f8e952a50e..65eaea0f12bcf 100644 --- a/src/sinks/splunk_hec/common/response.rs +++ b/src/sinks/splunk_hec/common/response.rs @@ -1,10 +1,11 @@ +use vector_common::json_size::JsonSize; use vector_core::internal_event::CountByteSize; use vector_core::{event::EventStatus, stream::DriverResponse}; pub struct HecResponse { pub event_status: EventStatus, pub events_count: usize, - pub events_byte_size: usize, + pub events_byte_size: JsonSize, } impl AsRef for HecResponse { diff --git a/src/sinks/splunk_hec/common/service.rs b/src/sinks/splunk_hec/common/service.rs index 7f44cfa90e4df..9492f11137dbe 100644 --- a/src/sinks/splunk_hec/common/service.rs +++ b/src/sinks/splunk_hec/common/service.rs @@ -114,7 +114,7 @@ where let ack_slot = self.current_ack_slot.take(); let events_count = req.get_metadata().event_count(); - let events_byte_size = req.get_metadata().events_byte_size(); + let events_byte_size = req.get_metadata().events_estimated_json_encoded_byte_size(); let response = self.inner.call(req); Box::pin(async move { @@ -338,7 +338,7 @@ mod tests { let body = Bytes::from("test-message"); let events_byte_size = body.len(); - let builder = RequestMetadataBuilder::new(1, events_byte_size, events_byte_size); + let builder = RequestMetadataBuilder::new(1, events_byte_size, events_byte_size.into()); let bytes_len = NonZeroUsize::new(events_byte_size).expect("payload should never be zero length"); let metadata = builder.with_request_size(bytes_len); diff --git a/src/sinks/statsd/service.rs b/src/sinks/statsd/service.rs index 4d3c3bc78dd09..5686dc22de1ea 100644 --- a/src/sinks/statsd/service.rs +++ b/src/sinks/statsd/service.rs @@ -49,7 +49,7 @@ impl DriverResponse for StatsdResponse { fn events_sent(&self) -> CountByteSize { CountByteSize( self.metadata.event_count(), - self.metadata.events_byte_size(), + self.metadata.events_estimated_json_encoded_byte_size(), ) } diff --git a/src/sinks/util/adaptive_concurrency/tests.rs b/src/sinks/util/adaptive_concurrency/tests.rs index 5da3259d117cb..33d735a76f187 100644 --- a/src/sinks/util/adaptive_concurrency/tests.rs +++ b/src/sinks/util/adaptive_concurrency/tests.rs @@ -25,6 +25,7 @@ use serde::Deserialize; use snafu::Snafu; use tokio::time::{self, sleep, Duration, Instant}; use tower::Service; +use vector_common::json_size::JsonSize; use vector_config::configurable_component; use super::controller::ControllerStatistics; @@ -184,7 +185,9 @@ impl SinkConfig for TestConfig { VecBuffer::new(batch_settings.size), batch_settings.timeout, ) - .with_flat_map(|event| stream::iter(Some(Ok(EncodedEvent::new(event, 0))))) + .with_flat_map(|event| { + stream::iter(Some(Ok(EncodedEvent::new(event, 0, JsonSize::zero())))) + }) .sink_map_err(|error| panic!("Fatal test sink error: {}", error)); let healthcheck = future::ok(()).boxed(); diff --git a/src/sinks/util/batch.rs b/src/sinks/util/batch.rs index 7eff19279670f..b4bd7e70fdd60 100644 --- a/src/sinks/util/batch.rs +++ b/src/sinks/util/batch.rs @@ -3,6 +3,7 @@ use std::{marker::PhantomData, num::NonZeroUsize, time::Duration}; use derivative::Derivative; use serde_with::serde_as; use snafu::Snafu; +use vector_common::json_size::JsonSize; use vector_config::configurable_component; use vector_core::stream::BatcherSettings; @@ -362,6 +363,7 @@ pub struct EncodedBatch { pub finalizers: EventFinalizers, pub count: usize, pub byte_size: usize, + pub json_byte_size: JsonSize, } /// This is a batch construct that stores an set of event finalizers alongside the batch itself. @@ -374,6 +376,7 @@ pub struct FinalizersBatch { // could be smaller due to aggregated items (ie metrics). count: usize, byte_size: usize, + json_byte_size: JsonSize, } impl From for FinalizersBatch { @@ -383,6 +386,7 @@ impl From for FinalizersBatch { finalizers: Default::default(), count: 0, byte_size: 0, + json_byte_size: JsonSize::zero(), } } } @@ -402,18 +406,21 @@ impl Batch for FinalizersBatch { item, finalizers, byte_size, + json_byte_size, } = item; match self.inner.push(item) { PushResult::Ok(full) => { self.finalizers.merge(finalizers); self.count += 1; self.byte_size += byte_size; + self.json_byte_size += json_byte_size; PushResult::Ok(full) } PushResult::Overflow(item) => PushResult::Overflow(EncodedEvent { item, finalizers, byte_size, + json_byte_size, }), } } @@ -428,6 +435,7 @@ impl Batch for FinalizersBatch { finalizers: Default::default(), count: 0, byte_size: 0, + json_byte_size: JsonSize::zero(), } } @@ -437,6 +445,7 @@ impl Batch for FinalizersBatch { finalizers: self.finalizers, count: self.count, byte_size: self.byte_size, + json_byte_size: self.json_byte_size, } } diff --git a/src/sinks/util/buffer/mod.rs b/src/sinks/util/buffer/mod.rs index aa9a22ff6b22f..69bd67d17b2f3 100644 --- a/src/sinks/util/buffer/mod.rs +++ b/src/sinks/util/buffer/mod.rs @@ -145,6 +145,7 @@ mod test { use bytes::{Buf, BytesMut}; use futures::{future, stream, SinkExt, StreamExt}; use tokio::time::Duration; + use vector_common::json_size::JsonSize; use super::{Buffer, Compression}; use crate::sinks::util::{BatchSettings, BatchSink, EncodedEvent}; @@ -179,7 +180,10 @@ mod test { buffered .sink_map_err(drop) - .send_all(&mut stream::iter(input).map(|item| Ok(EncodedEvent::new(item, 0)))) + .send_all( + &mut stream::iter(input) + .map(|item| Ok(EncodedEvent::new(item, 0, JsonSize::zero()))), + ) .await .unwrap(); diff --git a/src/sinks/util/http.rs b/src/sinks/util/http.rs index fcc9c3d0f8158..3f943db17eeb9 100644 --- a/src/sinks/util/http.rs +++ b/src/sinks/util/http.rs @@ -20,7 +20,7 @@ use snafu::{ResultExt, Snafu}; use tower::{Service, ServiceBuilder}; use tower_http::decompression::DecompressionLayer; use vector_config::configurable_component; -use vector_core::ByteSizeOf; +use vector_core::{ByteSizeOf, EstimatedJsonEncodedSizeOf}; use super::{ retries::{RetryAction, RetryLogic}, @@ -172,12 +172,14 @@ where fn start_send(mut self: Pin<&mut Self>, mut event: Event) -> Result<(), Self::Error> { let byte_size = event.size_of(); + let json_byte_size = event.estimated_json_encoded_size_of(); let finalizers = event.metadata_mut().take_finalizers(); if let Some(item) = self.encoder.encode_event(event) { *self.project().slot = Some(EncodedEvent { item, finalizers, byte_size, + json_byte_size, }); } @@ -323,11 +325,14 @@ where fn start_send(mut self: Pin<&mut Self>, mut event: Event) -> Result<(), Self::Error> { let finalizers = event.metadata_mut().take_finalizers(); let byte_size = event.size_of(); + let json_byte_size = event.estimated_json_encoded_size_of(); + if let Some(item) = self.encoder.encode_event(event) { *self.project().slot = Some(EncodedEvent { item, finalizers, byte_size, + json_byte_size, }); } diff --git a/src/sinks/util/metadata.rs b/src/sinks/util/metadata.rs index 975959e56aa15..d89b51140e5f6 100644 --- a/src/sinks/util/metadata.rs +++ b/src/sinks/util/metadata.rs @@ -3,7 +3,7 @@ use std::num::NonZeroUsize; use vector_buffers::EventCount; use vector_core::{ByteSizeOf, EstimatedJsonEncodedSizeOf}; -use vector_common::request_metadata::RequestMetadata; +use vector_common::{json_size::JsonSize, request_metadata::RequestMetadata}; use super::request_builder::EncodeResult; @@ -11,7 +11,7 @@ use super::request_builder::EncodeResult; pub struct RequestMetadataBuilder { event_count: usize, events_byte_size: usize, - events_estimated_json_encoded_byte_size: usize, + events_estimated_json_encoded_byte_size: JsonSize, } impl RequestMetadataBuilder { @@ -29,7 +29,7 @@ impl RequestMetadataBuilder { pub const fn new( event_count: usize, events_byte_size: usize, - events_estimated_json_encoded_byte_size: usize, + events_estimated_json_encoded_byte_size: JsonSize, ) -> Self { Self { event_count, diff --git a/src/sinks/util/mod.rs b/src/sinks/util/mod.rs index 591e822ac557c..a83b17673ddbc 100644 --- a/src/sinks/util/mod.rs +++ b/src/sinks/util/mod.rs @@ -47,6 +47,7 @@ pub use service::{ pub use sink::{BatchSink, PartitionBatchSink, StreamSink}; use snafu::Snafu; pub use uri::UriSerde; +use vector_common::json_size::JsonSize; use crate::event::EventFinalizers; @@ -63,16 +64,18 @@ pub struct EncodedEvent { pub item: I, pub finalizers: EventFinalizers, pub byte_size: usize, + pub json_byte_size: JsonSize, } impl EncodedEvent { /// Create a trivial input with no metadata. This method will be /// removed when all sinks are converted. - pub fn new(item: I, byte_size: usize) -> Self { + pub fn new(item: I, byte_size: usize, json_byte_size: JsonSize) -> Self { Self { item, finalizers: Default::default(), byte_size, + json_byte_size, } } @@ -89,6 +92,7 @@ impl EncodedEvent { item: I::from(that.item), finalizers: that.finalizers, byte_size: that.byte_size, + json_byte_size: that.json_byte_size, } } @@ -98,6 +102,7 @@ impl EncodedEvent { item: doit(self.item), finalizers: self.finalizers, byte_size: self.byte_size, + json_byte_size: self.json_byte_size, } } } diff --git a/src/sinks/util/processed_event.rs b/src/sinks/util/processed_event.rs index 0aa241efe9709..dd13df8bd3f21 100644 --- a/src/sinks/util/processed_event.rs +++ b/src/sinks/util/processed_event.rs @@ -1,4 +1,5 @@ use serde::Serialize; +use vector_common::json_size::JsonSize; use vector_core::{ event::{EventFinalizers, Finalizable, LogEvent, MaybeAsLogMut}, ByteSizeOf, EstimatedJsonEncodedSizeOf, @@ -44,7 +45,7 @@ impl EstimatedJsonEncodedSizeOf for ProcessedEvent where E: EstimatedJsonEncodedSizeOf, { - fn estimated_json_encoded_size_of(&self) -> usize { + fn estimated_json_encoded_size_of(&self) -> JsonSize { self.event.estimated_json_encoded_size_of() } } diff --git a/src/sinks/util/service.rs b/src/sinks/util/service.rs index 2872c4eb39f6d..c53c52b8713c4 100644 --- a/src/sinks/util/service.rs +++ b/src/sinks/util/service.rs @@ -438,6 +438,7 @@ mod tests { use futures::{future, stream, FutureExt, SinkExt, StreamExt}; use tokio::time::Duration; + use vector_common::json_size::JsonSize; use super::*; use crate::sinks::util::{ @@ -520,7 +521,10 @@ mod tests { let input = (0..20).map(|i| PartitionInnerBuffer::new(i, 0)); sink.sink_map_err(drop) - .send_all(&mut stream::iter(input).map(|item| Ok(EncodedEvent::new(item, 0)))) + .send_all( + &mut stream::iter(input) + .map(|item| Ok(EncodedEvent::new(item, 0, JsonSize::zero()))), + ) .await .unwrap(); diff --git a/src/sinks/util/sink.rs b/src/sinks/util/sink.rs index 000b3d82be265..3305573899acb 100644 --- a/src/sinks/util/sink.rs +++ b/src/sinks/util/sink.rs @@ -426,7 +426,8 @@ where items, finalizers, count, - byte_size, + json_byte_size, + .. } = batch; let (tx, rx) = oneshot::channel(); @@ -449,7 +450,7 @@ where finalizers.update_status(status); match status { EventStatus::Delivered => { - events_sent.emit(CountByteSize(count, byte_size)); + events_sent.emit(CountByteSize(count, json_byte_size)); // TODO: Emit a BytesSent event here too } EventStatus::Rejected => { @@ -575,8 +576,9 @@ mod tests { use bytes::Bytes; use futures::{future, stream, task::noop_waker_ref, SinkExt, StreamExt}; use tokio::{task::yield_now, time::Instant}; - use vector_common::finalization::{ - BatchNotifier, BatchStatus, EventFinalizer, EventFinalizers, + use vector_common::{ + finalization::{BatchNotifier, BatchStatus, EventFinalizer, EventFinalizers}, + json_size::JsonSize, }; use super::*; @@ -621,6 +623,7 @@ mod tests { EncodedEvent { item, finalizers, + json_byte_size: JsonSize::zero(), byte_size: 0, } } @@ -770,7 +773,10 @@ mod tests { buffered .sink_map_err(drop) - .send_all(&mut stream::iter(0..22).map(|item| Ok(EncodedEvent::new(item, 0)))) + .send_all( + &mut stream::iter(0..22) + .map(|item| Ok(EncodedEvent::new(item, 0, JsonSize::zero()))), + ) .await .unwrap(); @@ -806,7 +812,7 @@ mod tests { Poll::Ready(Ok(())) )); assert!(matches!( - buffered.start_send_unpin(EncodedEvent::new(0, 0)), + buffered.start_send_unpin(EncodedEvent::new(0, 0, JsonSize::zero())), Ok(()) )); assert!(matches!( @@ -814,7 +820,7 @@ mod tests { Poll::Ready(Ok(())) )); assert!(matches!( - buffered.start_send_unpin(EncodedEvent::new(1, 0)), + buffered.start_send_unpin(EncodedEvent::new(1, 0, JsonSize::zero())), Ok(()) )); @@ -845,7 +851,7 @@ mod tests { Poll::Ready(Ok(())) )); assert!(matches!( - buffered.start_send_unpin(EncodedEvent::new(0, 0)), + buffered.start_send_unpin(EncodedEvent::new(0, 0, JsonSize::zero())), Ok(()) )); assert!(matches!( @@ -853,7 +859,7 @@ mod tests { Poll::Ready(Ok(())) )); assert!(matches!( - buffered.start_send_unpin(EncodedEvent::new(1, 0)), + buffered.start_send_unpin(EncodedEvent::new(1, 0, JsonSize::zero())), Ok(()) )); @@ -887,7 +893,10 @@ mod tests { let sink = PartitionBatchSink::new(svc, VecBuffer::new(batch_settings.size), TIMEOUT); sink.sink_map_err(drop) - .send_all(&mut stream::iter(0..22).map(|item| Ok(EncodedEvent::new(item, 0)))) + .send_all( + &mut stream::iter(0..22) + .map(|item| Ok(EncodedEvent::new(item, 0, JsonSize::zero()))), + ) .await .unwrap(); @@ -920,7 +929,10 @@ mod tests { let input = vec![Partitions::A, Partitions::B]; sink.sink_map_err(drop) - .send_all(&mut stream::iter(input).map(|item| Ok(EncodedEvent::new(item, 0)))) + .send_all( + &mut stream::iter(input) + .map(|item| Ok(EncodedEvent::new(item, 0, JsonSize::zero()))), + ) .await .unwrap(); @@ -947,7 +959,10 @@ mod tests { let input = vec![Partitions::A, Partitions::B, Partitions::A, Partitions::B]; sink.sink_map_err(drop) - .send_all(&mut stream::iter(input).map(|item| Ok(EncodedEvent::new(item, 0)))) + .send_all( + &mut stream::iter(input) + .map(|item| Ok(EncodedEvent::new(item, 0, JsonSize::zero()))), + ) .await .unwrap(); @@ -984,7 +999,7 @@ mod tests { Poll::Ready(Ok(())) )); assert!(matches!( - sink.start_send_unpin(EncodedEvent::new(1, 0)), + sink.start_send_unpin(EncodedEvent::new(1, 0, JsonSize::zero())), Ok(()) )); assert!(matches!(sink.poll_flush_unpin(&mut cx), Poll::Pending)); @@ -1024,6 +1039,7 @@ mod tests { finalizers, count: items, byte_size: 1, + json_byte_size: JsonSize::new(1), } }; @@ -1085,7 +1101,10 @@ mod tests { let input = (0..20).map(|i| (0, i)).chain((0..20).map(|i| (1, i))); sink.sink_map_err(drop) - .send_all(&mut stream::iter(input).map(|item| Ok(EncodedEvent::new(item, 0)))) + .send_all( + &mut stream::iter(input) + .map(|item| Ok(EncodedEvent::new(item, 0, JsonSize::zero()))), + ) .await .unwrap(); diff --git a/src/sinks/util/socket_bytes_sink.rs b/src/sinks/util/socket_bytes_sink.rs index c826213fe5bce..7d668c0201fb4 100644 --- a/src/sinks/util/socket_bytes_sink.rs +++ b/src/sinks/util/socket_bytes_sink.rs @@ -10,7 +10,10 @@ use futures::Sink; use pin_project::{pin_project, pinned_drop}; use tokio::io::AsyncWrite; use tokio_util::codec::{BytesCodec, FramedWrite}; -use vector_common::finalization::{EventFinalizers, EventStatus}; +use vector_common::{ + finalization::{EventFinalizers, EventStatus}, + json_size::JsonSize, +}; use super::EncodedEvent; use crate::internal_events::{SocketBytesSent, SocketEventsSent, SocketMode}; @@ -55,7 +58,7 @@ where shutdown_check: Box::new(shutdown_check), state: State { events_total: 0, - event_bytes: 0, + event_bytes: JsonSize::zero(), bytes_total: 0, socket_mode, finalizers: Vec::new(), @@ -67,7 +70,7 @@ where struct State { socket_mode: SocketMode, events_total: usize, - event_bytes: usize, + event_bytes: JsonSize, bytes_total: usize, finalizers: Vec, } @@ -92,7 +95,7 @@ impl State { } self.events_total = 0; - self.event_bytes = 0; + self.event_bytes = JsonSize::zero(); self.bytes_total = 0; } } @@ -129,7 +132,7 @@ where let pinned = self.project(); pinned.state.finalizers.push(item.finalizers); pinned.state.events_total += 1; - pinned.state.event_bytes += item.byte_size; + pinned.state.event_bytes += item.json_byte_size; pinned.state.bytes_total += item.item.len(); let result = pinned.inner.start_send(item.item); diff --git a/src/sinks/util/tcp.rs b/src/sinks/util/tcp.rs index 932638f0fed01..1fd97fa744e99 100644 --- a/src/sinks/util/tcp.rs +++ b/src/sinks/util/tcp.rs @@ -17,8 +17,9 @@ use tokio::{ time::sleep, }; use tokio_util::codec::Encoder; +use vector_common::json_size::JsonSize; use vector_config::configurable_component; -use vector_core::ByteSizeOf; +use vector_core::{ByteSizeOf, EstimatedJsonEncodedSizeOf}; use crate::{ codecs::Transformer, @@ -275,6 +276,7 @@ where let mut encoder = self.encoder.clone(); let mut input = input.map(|mut event| { let byte_size = event.size_of(); + let json_byte_size = event.estimated_json_encoded_size_of(); let finalizers = event.metadata_mut().take_finalizers(); self.transformer.transform(&mut event); let mut bytes = BytesMut::new(); @@ -286,9 +288,10 @@ where item, finalizers, byte_size, + json_byte_size, } } else { - EncodedEvent::new(Bytes::new(), 0) + EncodedEvent::new(Bytes::new(), 0, JsonSize::zero()) } }); diff --git a/src/sinks/util/unix.rs b/src/sinks/util/unix.rs index a5f2b97596c39..c8a3a8b93603b 100644 --- a/src/sinks/util/unix.rs +++ b/src/sinks/util/unix.rs @@ -6,8 +6,9 @@ use futures::{stream::BoxStream, SinkExt, StreamExt}; use snafu::{ResultExt, Snafu}; use tokio::{net::UnixStream, time::sleep}; use tokio_util::codec::Encoder; +use vector_common::json_size::JsonSize; use vector_config::configurable_component; -use vector_core::ByteSizeOf; +use vector_core::{ByteSizeOf, EstimatedJsonEncodedSizeOf}; use crate::{ codecs::Transformer, @@ -151,6 +152,7 @@ where let mut input = input .map(|mut event| { let byte_size = event.size_of(); + let json_byte_size = event.estimated_json_encoded_size_of(); transformer.transform(&mut event); @@ -164,9 +166,10 @@ where item, finalizers, byte_size, + json_byte_size, } } else { - EncodedEvent::new(Bytes::new(), 0) + EncodedEvent::new(Bytes::new(), 0, JsonSize::zero()) } }) .peekable(); diff --git a/src/sinks/vector/service.rs b/src/sinks/vector/service.rs index bfa4602a0cbd9..a93a196a58dc9 100644 --- a/src/sinks/vector/service.rs +++ b/src/sinks/vector/service.rs @@ -8,7 +8,10 @@ use hyper_proxy::ProxyConnector; use prost::Message; use tonic::{body::BoxBody, IntoRequest}; use tower::Service; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::{ + json_size::JsonSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; use super::VectorSinkError; @@ -29,7 +32,7 @@ pub struct VectorService { pub struct VectorResponse { events_count: usize, - events_byte_size: usize, + events_byte_size: JsonSize, } impl DriverResponse for VectorResponse { @@ -104,7 +107,9 @@ impl Service for VectorService { let mut service = self.clone(); let byte_size = list.request.encoded_len(); let events_count = list.get_metadata().event_count(); - let events_byte_size = list.get_metadata().events_byte_size(); + let events_byte_size = list + .get_metadata() + .events_estimated_json_encoded_byte_size(); let future = async move { service diff --git a/src/sinks/vector/sink.rs b/src/sinks/vector/sink.rs index 6b8fe8af766a9..09eede27d1844 100644 --- a/src/sinks/vector/sink.rs +++ b/src/sinks/vector/sink.rs @@ -4,6 +4,7 @@ use async_trait::async_trait; use futures::{stream::BoxStream, StreamExt}; use prost::Message; use tower::Service; +use vector_common::json_size::JsonSize; use vector_core::{ stream::{BatcherSettings, DriverResponse}, ByteSizeOf, @@ -62,7 +63,7 @@ where let builder = RequestMetadataBuilder::new( event_collection.events.len(), event_collection.events_byte_size, - event_collection.events_byte_size, // this is fine as it isn't being used + JsonSize::new(event_collection.events_byte_size), // this is fine as it isn't being used ); let encoded_events = proto_vector::PushEventsRequest { diff --git a/src/sources/dnstap/mod.rs b/src/sources/dnstap/mod.rs index f2871689f1087..69a9d13728805 100644 --- a/src/sources/dnstap/mod.rs +++ b/src/sources/dnstap/mod.rs @@ -13,7 +13,7 @@ use super::util::framestream::{build_framestream_unix_source, FrameHandler}; use crate::{ config::{log_schema, DataType, SourceConfig, SourceContext, SourceOutput}, event::{Event, LogEvent}, - internal_events::DnstapParseError, + internal_events::{DnstapParseError, SocketEventsReceived, SocketMode}, Result, }; @@ -24,7 +24,10 @@ pub mod schema; use dnsmsg_parser::{dns_message, dns_message_parser}; use lookup::lookup_v2::{parse_value_path, OptionalValuePath}; pub use schema::DnstapEventSchema; -use vector_core::config::{LegacyKey, LogNamespace}; +use vector_core::{ + config::{LegacyKey, LogNamespace}, + EstimatedJsonEncodedSizeOf, +}; /// Configuration for the `dnstap` source. #[configurable_component(source("dnstap", "Collect DNS logs from a dnstap-compatible server."))] @@ -260,12 +263,38 @@ impl FrameHandler for DnstapFrameHandler { * Takes a data frame from the unix socket and turns it into a Vector Event. **/ fn handle_event(&self, received_from: Option, frame: Bytes) -> Option { - // SocketEventsReceived is emitted already - self.bytes_received.emit(ByteSize(frame.len())); let mut log_event = LogEvent::default(); + if let Some(host) = received_from { + self.log_namespace.insert_source_metadata( + DnstapConfig::NAME, + &mut log_event, + self.host_key.as_ref().map(LegacyKey::Overwrite), + path!("host"), + host, + ); + } + + if self.raw_data_only { + log_event.insert( + self.schema.dnstap_root_data_schema().raw_data(), + BASE64_STANDARD.encode(&frame), + ); + } else if let Err(err) = parse_dnstap_data(&self.schema, &mut log_event, frame) { + emit!(DnstapParseError { + error: format!("Dnstap protobuf decode error {:?}.", err) + }); + return None; + } + + emit!(SocketEventsReceived { + mode: SocketMode::Unix, + byte_size: log_event.estimated_json_encoded_size_of(), + count: 1 + }); + if self.log_namespace == LogNamespace::Vector { // The timestamp is inserted by the parser which caters for the Legacy namespace. self.log_namespace.insert_vector_metadata( @@ -283,37 +312,7 @@ impl FrameHandler for DnstapFrameHandler { DnstapConfig::NAME, ); - if let Some(host) = received_from { - self.log_namespace.insert_source_metadata( - DnstapConfig::NAME, - &mut log_event, - self.host_key.as_ref().map(LegacyKey::Overwrite), - path!("host"), - host, - ); - } - - if self.raw_data_only { - log_event.insert( - self.schema.dnstap_root_data_schema().raw_data(), - BASE64_STANDARD.encode(&frame), - ); - let event = Event::from(log_event); - Some(event) - } else { - match parse_dnstap_data(&self.schema, &mut log_event, frame) { - Err(err) => { - emit!(DnstapParseError { - error: format!("Dnstap protobuf decode error {:?}.", err) - }); - None - } - Ok(_) => { - let event = Event::from(log_event); - Some(event) - } - } - } + Some(Event::from(log_event)) } fn socket_path(&self) -> PathBuf { diff --git a/src/sources/file.rs b/src/sources/file.rs index 76bec5f106d9f..3e68e57a2b04e 100644 --- a/src/sources/file.rs +++ b/src/sources/file.rs @@ -18,7 +18,10 @@ use tokio::{sync::oneshot, task::spawn_blocking}; use tracing::{Instrument, Span}; use vector_common::finalizer::OrderedFinalizer; use vector_config::configurable_component; -use vector_core::config::{LegacyKey, LogNamespace}; +use vector_core::{ + config::{LegacyKey, LogNamespace}, + EstimatedJsonEncodedSizeOf, +}; use vrl::value::Kind; use super::util::{EncodingConfig, MultilineConfig}; @@ -739,12 +742,6 @@ fn create_event( meta: &EventMetadata, log_namespace: LogNamespace, ) -> LogEvent { - emit!(FileEventsReceived { - count: 1, - file, - byte_size: line.len(), - }); - let deserializer = BytesDeserializer::new(); let mut event = deserializer.parse_single(line, log_namespace); @@ -791,6 +788,12 @@ fn create_event( file, ); + emit!(FileEventsReceived { + count: 1, + file, + byte_size: event.estimated_json_encoded_size_of(), + }); + event } diff --git a/src/sources/file_descriptors/mod.rs b/src/sources/file_descriptors/mod.rs index 32acfbede5e61..06710adec9c37 100644 --- a/src/sources/file_descriptors/mod.rs +++ b/src/sources/file_descriptors/mod.rs @@ -144,7 +144,7 @@ async fn process_stream( bytes_received.emit(ByteSize(byte_size)); events_received.emit(CountByteSize( events.len(), - events.estimated_json_encoded_size_of(), + events.estimated_json_encoded_size_of(), )); let now = Utc::now(); diff --git a/src/sources/internal_logs.rs b/src/sources/internal_logs.rs index bc923eef399c2..0d22b3b310208 100644 --- a/src/sources/internal_logs.rs +++ b/src/sources/internal_logs.rs @@ -155,12 +155,14 @@ async fn run( // any logs that don't break the loop, as that could cause an // infinite loop since it receives all such logs. while let Some(mut log) = rx.next().await { - let byte_size = log.estimated_json_encoded_size_of(); + // TODO: Should this actually be in memory size? + let byte_size = log.estimated_json_encoded_size_of().get(); + let json_byte_size = log.estimated_json_encoded_size_of(); // This event doesn't emit any log emit!(InternalLogsBytesReceived { byte_size }); emit!(InternalLogsEventsReceived { count: 1, - byte_size, + byte_size: json_byte_size, }); if let Ok(hostname) = &hostname { diff --git a/src/sources/internal_metrics.rs b/src/sources/internal_metrics.rs index bddf04240673d..03f447c7488d9 100644 --- a/src/sources/internal_metrics.rs +++ b/src/sources/internal_metrics.rs @@ -6,8 +6,7 @@ use tokio::time; use tokio_stream::wrappers::IntervalStream; use vector_common::internal_event::{CountByteSize, InternalEventHandle as _}; use vector_config::configurable_component; -use vector_core::config::LogNamespace; -use vector_core::EstimatedJsonEncodedSizeOf; +use vector_core::{config::LogNamespace, ByteSizeOf, EstimatedJsonEncodedSizeOf}; use crate::{ config::{log_schema, SourceConfig, SourceContext, SourceOutput}, @@ -166,10 +165,11 @@ impl<'a> InternalMetrics<'a> { let metrics = self.controller.capture_metrics(); let count = metrics.len(); - let byte_size = metrics.estimated_json_encoded_size_of(); + let byte_size = metrics.size_of(); + let json_size = metrics.estimated_json_encoded_size_of(); emit!(InternalMetricsBytesReceived { byte_size }); - events_received.emit(CountByteSize(count, byte_size)); + events_received.emit(CountByteSize(count, json_size)); let batch = metrics.into_iter().map(|mut metric| { // A metric starts out with a default "vector" namespace, but will be overridden diff --git a/src/sources/postgresql_metrics.rs b/src/sources/postgresql_metrics.rs index b7415c4217831..808028539e6f3 100644 --- a/src/sources/postgresql_metrics.rs +++ b/src/sources/postgresql_metrics.rs @@ -25,7 +25,10 @@ use tokio_postgres::{ Client, Config, Error as PgError, NoTls, Row, }; use tokio_stream::wrappers::IntervalStream; -use vector_common::internal_event::{CountByteSize, InternalEventHandle as _, Registered}; +use vector_common::{ + internal_event::{CountByteSize, InternalEventHandle as _, Registered}, + json_size::JsonSize, +}; use vector_config::configurable_component; use vector_core::config::LogNamespace; use vector_core::{metric_tags, ByteSizeOf, EstimatedJsonEncodedSizeOf}; @@ -566,20 +569,23 @@ impl PostgresqlMetrics { .await { Ok(result) => { - let (count, byte_size, received_byte_size) = - result.iter().fold((0, 0, 0), |res, (set, size)| { - ( - res.0 + set.len(), - res.1 + set.estimated_json_encoded_size_of(), - res.2 + size, - ) - }); + let (count, json_byte_size, received_byte_size) = + result + .iter() + .fold((0, JsonSize::zero(), 0), |res, (set, size)| { + ( + res.0 + set.len(), + res.1 + set.estimated_json_encoded_size_of(), + res.2 + size, + ) + }); emit!(EndpointBytesReceived { byte_size: received_byte_size, protocol: "tcp", endpoint: &self.endpoint, }); - self.events_received.emit(CountByteSize(count, byte_size)); + self.events_received + .emit(CountByteSize(count, json_byte_size)); self.client.set((client, client_version)); Ok(result.into_iter().flat_map(|(metrics, _)| metrics)) } diff --git a/src/sources/util/framestream.rs b/src/sources/util/framestream.rs index 8ed437d9bbaed..194dd48432074 100644 --- a/src/sources/util/framestream.rs +++ b/src/sources/util/framestream.rs @@ -28,9 +28,7 @@ use tracing::{field, Instrument}; use crate::{ event::Event, - internal_events::{ - SocketEventsReceived, SocketMode, UnixSocketError, UnixSocketFileDeleteError, - }, + internal_events::{UnixSocketError, UnixSocketFileDeleteError}, shutdown::ShutdownSignal, sources::Source, SourceSender, @@ -157,11 +155,6 @@ impl FrameStreamReader { } else { //data frame if self.state.control_state == ControlState::ReadingData { - emit!(SocketEventsReceived { - mode: SocketMode::Unix, - byte_size: frame.len(), - count: 1 - }); Some(frame) //return data frame } else { error!( diff --git a/src/sources/util/http_client.rs b/src/sources/util/http_client.rs index 9dbffb26e4f30..aa5cdb1b8db7e 100644 --- a/src/sources/util/http_client.rs +++ b/src/sources/util/http_client.rs @@ -15,6 +15,7 @@ use hyper::{Body, Request}; use std::time::{Duration, Instant}; use std::{collections::HashMap, future::ready}; use tokio_stream::wrappers::IntervalStream; +use vector_common::json_size::JsonSize; use crate::{ http::{Auth, HttpClient}, @@ -187,7 +188,7 @@ pub(crate) async fn call< // HttpClientEventsReceived event, we should // emit 0 when there aren't any usable // metrics. - 0 + JsonSize::zero() } else { events.estimated_json_encoded_size_of() }; diff --git a/website/content/en/highlights/2023-07-04-0-31-0-upgrade-guide.md b/website/content/en/highlights/2023-07-04-0-31-0-upgrade-guide.md new file mode 100644 index 0000000000000..51eada592553a --- /dev/null +++ b/website/content/en/highlights/2023-07-04-0-31-0-upgrade-guide.md @@ -0,0 +1,31 @@ +--- +date: "2023-07-04" +title: "0.31 Upgrade Guide" +description: "An upgrade guide that addresses breaking changes in 0.31.0" +authors: ["stephenwakely"] +release: "0.31.0" +hide_on_release_notes: false +badges: + type: breaking change +--- + +Vector's 0.31.0 release includes **breaking changes**: + +1. [`component_received_event_bytes_total` and `component_sent_event_bytes_total` consistently use estimated JSON size of the event](#event_json_size) + +We cover them below to help you upgrade quickly: + +## Upgrade guide + +### Breaking changes + +#### `component_received_event_bytes_total` and `component_sent_event_bytes_total` consistently use estimated JSON size of the event {#event_json_size} + +Prior to this Version, metrics emitted by Vector were inconsistently measuring +the byte size of the events that were being sent and received. These metrics +have been updated for all components so they always emit an estimate of the size +of the event should it be serialized to JSON. + +Measuring the events like this allows a consistent measurement to be applied +across all components regardless of how the source or sink serializes the event +when connecting to the external service. From 247bb807cae195c5c987a43e3c4e6ab6b885a94b Mon Sep 17 00:00:00 2001 From: neuronull Date: Wed, 31 May 2023 09:49:54 -0600 Subject: [PATCH 071/236] chore(external docs): fix reference to supported aarch64 architecture (#17553) --- .../content/en/docs/setup/installation/package-managers/rpm.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/en/docs/setup/installation/package-managers/rpm.md b/website/content/en/docs/setup/installation/package-managers/rpm.md index ba70ae61e82bb..80a847afa0fd9 100644 --- a/website/content/en/docs/setup/installation/package-managers/rpm.md +++ b/website/content/en/docs/setup/installation/package-managers/rpm.md @@ -15,7 +15,7 @@ sudo rpm -i https://packages.timber.io/vector/{{< version >}}/vector-{{< version Make sure to replace `{arch}` with one of the following: * `x86_64` -* `arm64` +* `aarch64` * `armv7` ## Other actions From 0dfa09c4a9b7e753802a4fa0700557752e2fc945 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Wed, 31 May 2023 11:25:38 -0600 Subject: [PATCH 072/236] chore(deps): Bump chrono to 0.4.26 (#17537) Still need the hack to drop the `oldtime` feature. --- Cargo.lock | 12 +++++++++--- Cargo.toml | 4 ++-- LICENSE-3rdparty.csv | 1 + 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index efe263a0f366c..f4e4e3024e053 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -130,6 +130,12 @@ dependencies = [ "url", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -1824,12 +1830,12 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.24" -source = "git+https://github.com/vectordotdev/chrono.git?tag=v0.4.24-no-default-time-1#7ec1ad93833787da5df64898fb3e6206221c6833" +version = "0.4.26" +source = "git+https://github.com/vectordotdev/chrono.git?tag=v0.4.26-no-default-time-1#d44a3b100183d68f8a3e3cb431fcc4a47152a0a3" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "serde", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 3bf10945b4d06..043aabb93f135 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -238,7 +238,7 @@ bloom = { version = "0.3.2", default-features = false, optional = true } bollard = { version = "0.14.0", default-features = false, features = ["ssl", "chrono"], optional = true } bytes = { version = "1.4.0", default-features = false, features = ["serde"] } bytesize = { version = "1.2.0", default-features = false } -chrono = { version = "0.4.24", default-features = false, features = ["serde"] } +chrono = { version = "0.4.26", default-features = false, features = ["serde"] } cidr-utils = { version = "0.5.10", default-features = false } clap = { version = "4.1.14", default-features = false, features = ["derive", "error-context", "env", "help", "std", "string", "usage", "wrap_help"] } colored = { version = "2.0.0", default-features = false } @@ -363,7 +363,7 @@ zstd = { version = "0.12.3", default-features = false } [patch.crates-io] # Removes dependency on `time` v0.1 # https://github.com/chronotope/chrono/pull/578 -chrono = { git = "https://github.com/vectordotdev/chrono.git", tag = "v0.4.24-no-default-time-1" } +chrono = { git = "https://github.com/vectordotdev/chrono.git", tag = "v0.4.26-no-default-time-1" } # The upgrade for `tokio-util` >= 0.6.9 is blocked on https://github.com/vectordotdev/vector/issues/11257. tokio-util = { git = "https://github.com/vectordotdev/tokio", branch = "tokio-util-0.7.4-framed-read-continue-on-error" } nix = { git = "https://github.com/vectordotdev/nix.git", branch = "memfd/gnu/musl" } diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 3f5b7ca6808e8..5680b71f16e37 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -7,6 +7,7 @@ aes,https://github.com/RustCrypto/block-ciphers,MIT OR Apache-2.0,RustCrypto Dev ahash,https://github.com/tkaitchuck/ahash,MIT OR Apache-2.0,Tom Kaitchuck aho-corasick,https://github.com/BurntSushi/aho-corasick,Unlicense OR MIT,Andrew Gallant amq-protocol,https://github.com/amqp-rs/amq-protocol,BSD-2-Clause,Marc-Antoine Perennou <%arc-Antoine@Perennou.com> +android-tzdata,https://github.com/RumovZ/android-tzdata,MIT OR Apache-2.0,RumovZ android_system_properties,https://github.com/nical/android_system_properties,MIT OR Apache-2.0,Nicolas Silva ansi_term,https://github.com/ogham/rust-ansi-term,MIT,"ogham@bsago.me, Ryan Scheel (Havvy) , Josh Triplett " anyhow,https://github.com/dtolnay/anyhow,MIT OR Apache-2.0,David Tolnay From 349c7183067f0aa91b05914f34a68ee899fea88b Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Wed, 31 May 2023 13:33:08 -0400 Subject: [PATCH 073/236] chore: Remove links to roadmap (#17554) Discovered by https://github.com/vectordotdev/vector/issues/17547#issuecomment-1570289321 We dropped the public roadmap as it seemed to be doing more harm than good as it proved not to be terribly accurate as priorities shift. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- README.md | 2 -- netlify.toml | 6 ------ website/cue/reference/urls.cue | 1 - 3 files changed, 9 deletions(-) diff --git a/README.md b/README.md index cb6c089fe9084..1ab1fb21812a2 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,6 @@ Vector**][docs.installation]. * [**Community**][urls.vector_community] - [chat][urls.vector_chat], [calendar][urls.vector_calendar], [@vectordotdev][urls.vector_twitter] * [**Releases**][urls.vector_releases] -* [**Roadmap**][urls.vector_roadmap] - [vote on new features][urls.vote_feature] * **Policies** - [Code of Conduct][urls.vector_code_of_conduct], [Privacy][urls.vector_privacy_policy], [Releases][urls.vector_releases_policy], [Security][urls.vector_security_policy], [Versioning][urls.vector_versioning_policy] ## Comparisons @@ -221,7 +220,6 @@ Vector is an end-to-end, unified, open data platform. [urls.vector_release_policy]: https://github.com/vectordotdev/vector/blob/master/RELEASING.md [urls.vector_releases]: https://vector.dev/releases/ [urls.vector_releases_policy]: https://github.com/vectordotdev/vector/blob/master/RELEASES.md -[urls.vector_roadmap]: https://roadmap.vector.dev [urls.vector_security_policy]: https://github.com/vectordotdev/vector/security/policy [urls.vector_test_harness]: https://github.com/vectordotdev/vector-test-harness/ [urls.vector_twitter]: https://twitter.com/vectordotdev diff --git a/netlify.toml b/netlify.toml index dad74bbc15e0a..b7c6deba7a70b 100644 --- a/netlify.toml +++ b/netlify.toml @@ -52,12 +52,6 @@ to = "https://github.com/vectordotdev/vector/discussions" status = 302 force = true -[[redirects]] -from = "https://roadmap.vector.dev/*" -to = "https://airtable.com/shriTZW5LeOE4cIyJ" -status = 302 -force = true - [[redirects]] from = "https://sh.vector.dev/*" to = "http://sh.vector.dev.s3-website-us-east-1.amazonaws.com/:splat" diff --git a/website/cue/reference/urls.cue b/website/cue/reference/urls.cue index 0344812ffa7bf..fd9cafc52d6dc 100644 --- a/website/cue/reference/urls.cue +++ b/website/cue/reference/urls.cue @@ -586,7 +586,6 @@ urls: { vector_remap_transform: "/docs/reference/configuration/transforms/remap/" vector_remap_transform_multiple: "/docs/reference/configuration/transforms/remap/#emitting-multiple-log-events" vector_repo: "\(github)/vectordotdev/vector" - vector_roadmap: "https://roadmap.vector.dev" vector_roles: "/docs/setup/deployment/roles" vector_route_transform: "/docs/reference/configuration/transforms/route" vector_rpm_source_files: "\(vector_repo)/tree/master/distribution/rpm" From bcc5b6c5c883e16bd959b610890f67ffc0405860 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 31 May 2023 19:23:24 +0000 Subject: [PATCH 074/236] chore(deps): Bump csv from 1.2.1 to 1.2.2 (#17555) Bumps [csv](https://github.com/BurntSushi/rust-csv) from 1.2.1 to 1.2.2.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=csv&package-manager=cargo&previous-version=1.2.1&new-version=1.2.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f4e4e3024e053..836d354b9e00c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2411,9 +2411,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b015497079b9a9d69c02ad25de6c0a6edef051ea6360a327d0bd05802ef64ad" +checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086" dependencies = [ "csv-core", "itoa", From 7a4f1f77470fbc804299e2c1be867b193052d275 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Thu, 1 Jun 2023 14:02:27 +0100 Subject: [PATCH 075/236] fix(observability): correct emitted metrics (#17562) Ref #17465 There were a couple of outstanding issues from the above PR. 1. The redis sink was counting the network bytes before actually creating the bytes, so the count was always zero. 2. The Vector sink was not counting the `JsonSize` of the event. The comment saying this was not being used was incorrect. Signed-off-by: Stephen Wakely --- src/sinks/redis.rs | 3 ++- src/sinks/vector/sink.rs | 8 ++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/sinks/redis.rs b/src/sinks/redis.rs index 084ff21ffd0e3..611127b4641d0 100644 --- a/src/sinks/redis.rs +++ b/src/sinks/redis.rs @@ -290,10 +290,11 @@ fn encode_event( transformer.transform(&mut event); let mut bytes = BytesMut::new(); - let byte_size = bytes.len(); // Errors are handled by `Encoder`. encoder.encode(event, &mut bytes).ok()?; + + let byte_size = bytes.len(); let value = bytes.freeze(); let event = EncodedEvent::new(RedisKvEntry { key, value }, byte_size, event_byte_size); diff --git a/src/sinks/vector/sink.rs b/src/sinks/vector/sink.rs index 09eede27d1844..229867194ddfd 100644 --- a/src/sinks/vector/sink.rs +++ b/src/sinks/vector/sink.rs @@ -7,7 +7,7 @@ use tower::Service; use vector_common::json_size::JsonSize; use vector_core::{ stream::{BatcherSettings, DriverResponse}, - ByteSizeOf, + ByteSizeOf, EstimatedJsonEncodedSizeOf, }; use super::service::VectorRequest; @@ -20,6 +20,7 @@ use crate::{ /// Data for a single event. struct EventData { byte_size: usize, + json_byte_size: JsonSize, finalizers: EventFinalizers, wrapper: EventWrapper, } @@ -30,6 +31,7 @@ struct EventCollection { pub finalizers: EventFinalizers, pub events: Vec, pub events_byte_size: usize, + pub events_json_byte_size: JsonSize, } pub struct VectorSink { @@ -48,6 +50,7 @@ where input .map(|mut event| EventData { byte_size: event.size_of(), + json_byte_size: event.estimated_json_encoded_size_of(), finalizers: event.take_finalizers(), wrapper: EventWrapper::from(event), }) @@ -57,13 +60,14 @@ where event_collection.finalizers.merge(item.finalizers); event_collection.events.push(item.wrapper); event_collection.events_byte_size += item.byte_size; + event_collection.events_json_byte_size += item.json_byte_size; }, )) .map(|event_collection| { let builder = RequestMetadataBuilder::new( event_collection.events.len(), event_collection.events_byte_size, - JsonSize::new(event_collection.events_byte_size), // this is fine as it isn't being used + event_collection.events_json_byte_size, ); let encoded_events = proto_vector::PushEventsRequest { From 23ed0e3adbffdd770a257635c3d6720a3bf072e7 Mon Sep 17 00:00:00 2001 From: Dominic Burkart Date: Thu, 1 Jun 2023 15:49:12 +0200 Subject: [PATCH 076/236] feat(configurable shutdown duration): make shutdown duration configurable (#17479) We want to make the graceful shutdown period configurable instead of hardcoding it to sixty seconds. Issues: https://github.com/vectordotdev/vector/issues/9042 Remove 60s hard cutoff period for graceful shutdowns https://github.com/vectordotdev/vector/issues/12831 Want to adjust graceful shutdown time This is my first PR in vector, not sure if this is the correct approach: - are the ergonomics (-1 for no timeout, 0+ for timeout durations) good? - any test recommendations beyond manual testing? --------- Co-authored-by: Bruce Guenter --- lib/vector-common/src/shutdown.rs | 34 +++++----- src/app.rs | 8 ++- src/cli.rs | 24 ++++++- src/config/builder.rs | 11 +++- src/config/compiler.rs | 2 + src/config/mod.rs | 2 + src/sources/journald.rs | 2 +- src/sources/statsd/mod.rs | 4 +- src/sources/syslog.rs | 6 +- src/topology/running.rs | 66 +++++++++++-------- website/cue/reference/cli.cue | 21 ++++++ .../cue/reference/components/sources/exec.cue | 3 +- 12 files changed, 130 insertions(+), 53 deletions(-) diff --git a/lib/vector-common/src/shutdown.rs b/lib/vector-common/src/shutdown.rs index 96107ba783c95..79f58978bd6e9 100644 --- a/lib/vector-common/src/shutdown.rs +++ b/lib/vector-common/src/shutdown.rs @@ -200,7 +200,7 @@ impl SourceShutdownCoordinator { /// /// Panics if this coordinator has had its triggers removed (ie /// has been taken over with `Self::takeover_source`). - pub fn shutdown_all(self, deadline: Instant) -> impl Future { + pub fn shutdown_all(self, deadline: Option) -> impl Future { let mut complete_futures = Vec::new(); let shutdown_begun_triggers = self.shutdown_begun_triggers; @@ -275,7 +275,7 @@ impl SourceShutdownCoordinator { shutdown_complete_tripwire, shutdown_force_trigger, id.clone(), - deadline, + Some(deadline), ) } @@ -297,23 +297,27 @@ impl SourceShutdownCoordinator { shutdown_complete_tripwire: Tripwire, shutdown_force_trigger: Trigger, id: ComponentKey, - deadline: Instant, + deadline: Option, ) -> impl Future { async move { - // Call `shutdown_force_trigger.disable()` on drop. - let shutdown_force_trigger = DisabledTrigger::new(shutdown_force_trigger); - let fut = shutdown_complete_tripwire.then(tripwire_handler); - if timeout_at(deadline, fut).await.is_ok() { - shutdown_force_trigger.into_inner().disable(); - true + if let Some(deadline) = deadline { + // Call `shutdown_force_trigger.disable()` on drop. + let shutdown_force_trigger = DisabledTrigger::new(shutdown_force_trigger); + if timeout_at(deadline, fut).await.is_ok() { + shutdown_force_trigger.into_inner().disable(); + true + } else { + error!( + "Source '{}' failed to shutdown before deadline. Forcing shutdown.", + id, + ); + shutdown_force_trigger.into_inner().cancel(); + false + } } else { - error!( - "Source '{}' failed to shutdown before deadline. Forcing shutdown.", - id, - ); - shutdown_force_trigger.into_inner().cancel(); - false + fut.await; + true } } .boxed() diff --git a/src/app.rs b/src/app.rs index c66adb2463375..e599499afe3c9 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,5 +1,5 @@ #![allow(missing_docs)] -use std::{collections::HashMap, num::NonZeroUsize, path::PathBuf}; +use std::{collections::HashMap, num::NonZeroUsize, path::PathBuf, time::Duration}; use exitcode::ExitCode; use futures::StreamExt; @@ -62,10 +62,14 @@ impl ApplicationConfig { ) -> Result { let config_paths = opts.config_paths_with_formats(); + let graceful_shutdown_duration = (!opts.no_graceful_shutdown_limit) + .then(|| Duration::from_secs(u64::from(opts.graceful_shutdown_limit_secs))); + let config = load_configs( &config_paths, opts.watch_config, opts.require_healthy, + graceful_shutdown_duration, signal_handler, ) .await?; @@ -410,6 +414,7 @@ pub async fn load_configs( config_paths: &[ConfigPath], watch_config: bool, require_healthy: Option, + graceful_shutdown_duration: Option, signal_handler: &mut SignalHandler, ) -> Result { let config_paths = config::process_paths(config_paths).ok_or(exitcode::CONFIG)?; @@ -440,6 +445,7 @@ pub async fn load_configs( info!("Health checks are disabled."); } config.healthchecks.set_require_healthy(require_healthy); + config.graceful_shutdown_duration = graceful_shutdown_duration; Ok(config) } diff --git a/src/cli.rs b/src/cli.rs index f9688a86cc133..68d49db299061 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -1,5 +1,5 @@ #![allow(missing_docs)] -use std::path::PathBuf; +use std::{num::NonZeroU64, path::PathBuf}; use clap::{ArgAction, CommandFactory, FromArgMatches, Parser}; @@ -159,6 +159,28 @@ pub struct RootOpts { )] pub internal_log_rate_limit: u64, + /// Set the duration in seconds to wait for graceful shutdown after SIGINT or SIGTERM are + /// received. After the duration has passed, Vector will force shutdown. To never force + /// shutdown, use `--no-graceful-shutdown-limit`. + #[arg( + long, + default_value = "60", + env = "VECTOR_GRACEFUL_SHUTDOWN_LIMIT_SECS", + group = "graceful-shutdown-limit" + )] + pub graceful_shutdown_limit_secs: NonZeroU64, + + /// Never time out while waiting for graceful shutdown after SIGINT or SIGTERM received. + /// This is useful when you would like for Vector to attempt to send data until terminated + /// by a SIGKILL. Overrides/cannot be set with `--graceful-shutdown-limit-secs`. + #[arg( + long, + default_value = "false", + env = "VECTOR_NO_GRACEFUL_SHUTDOWN_LIMIT", + group = "graceful-shutdown-limit" + )] + pub no_graceful_shutdown_limit: bool, + /// Set runtime allocation tracing #[cfg(feature = "allocation-tracing")] #[arg(long, env = "ALLOCATION_TRACING", default_value = "false")] diff --git a/src/config/builder.rs b/src/config/builder.rs index a758bac25316b..301e239627e42 100644 --- a/src/config/builder.rs +++ b/src/config/builder.rs @@ -1,6 +1,6 @@ #[cfg(feature = "enterprise")] use std::collections::BTreeMap; -use std::path::Path; +use std::{path::Path, time::Duration}; use indexmap::IndexMap; #[cfg(feature = "enterprise")] @@ -78,6 +78,13 @@ pub struct ConfigBuilder { /// All configured secrets backends. #[serde(default)] pub secret: IndexMap, + + /// The duration in seconds to wait for graceful shutdown after SIGINT or SIGTERM are received. + /// After the duration has passed, Vector will force shutdown. Default value is 60 seconds. This + /// value can be set using a [cli arg](crate::cli::RootOpts::graceful_shutdown_limit_secs). + #[serde(default, skip)] + #[doc(hidden)] + pub graceful_shutdown_duration: Option, } #[cfg(feature = "enterprise")] @@ -195,6 +202,7 @@ impl From for ConfigBuilder { transforms, tests, secret, + graceful_shutdown_duration, hash: _, } = config; @@ -225,6 +233,7 @@ impl From for ConfigBuilder { provider: None, tests, secret, + graceful_shutdown_duration, } } } diff --git a/src/config/compiler.rs b/src/config/compiler.rs index 90ca43ddd7adf..32465a479ab44 100644 --- a/src/config/compiler.rs +++ b/src/config/compiler.rs @@ -56,6 +56,7 @@ pub fn compile(mut builder: ConfigBuilder) -> Result<(Config, Vec), Vec< tests, provider: _, secret, + graceful_shutdown_duration, } = builder; let graph = match Graph::new(&sources, &transforms, &sinks, schema) { @@ -111,6 +112,7 @@ pub fn compile(mut builder: ConfigBuilder) -> Result<(Config, Vec), Vec< transforms, tests, secret, + graceful_shutdown_duration, }; config.propagate_acknowledgements()?; diff --git a/src/config/mod.rs b/src/config/mod.rs index 1de078ae873b9..4c2729738e6f3 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -5,6 +5,7 @@ use std::{ hash::Hash, net::SocketAddr, path::PathBuf, + time::Duration, }; use indexmap::IndexMap; @@ -105,6 +106,7 @@ pub struct Config { pub enrichment_tables: IndexMap, tests: Vec, secret: IndexMap, + pub graceful_shutdown_duration: Option, } impl Config { diff --git a/src/sources/journald.rs b/src/sources/journald.rs index 461e15f0e06bb..2436702dfe49a 100644 --- a/src/sources/journald.rs +++ b/src/sources/journald.rs @@ -1108,7 +1108,7 @@ mod tests { sleep(Duration::from_millis(100)).await; shutdown - .shutdown_all(Instant::now() + Duration::from_secs(1)) + .shutdown_all(Some(Instant::now() + Duration::from_secs(1))) .await; timeout(Duration::from_secs(1), rx.collect()).await.unwrap() diff --git a/src/sources/statsd/mod.rs b/src/sources/statsd/mod.rs index 4f1a09503492c..a655d44147549 100644 --- a/src/sources/statsd/mod.rs +++ b/src/sources/statsd/mod.rs @@ -487,7 +487,7 @@ mod test { // everything that was in up without having to know the exact count. sleep(Duration::from_millis(250)).await; shutdown - .shutdown_all(Instant::now() + Duration::from_millis(100)) + .shutdown_all(Some(Instant::now() + Duration::from_millis(100))) .await; // Read all the events into a `MetricState`, which handles normalizing metrics and tracking @@ -579,7 +579,7 @@ mod test { // everything that was in up without having to know the exact count. sleep(Duration::from_millis(250)).await; shutdown - .shutdown_all(Instant::now() + Duration::from_millis(100)) + .shutdown_all(Some(Instant::now() + Duration::from_millis(100))) .await; } } diff --git a/src/sources/syslog.rs b/src/sources/syslog.rs index 12c8a318901a2..71c8bd7b3b728 100644 --- a/src/sources/syslog.rs +++ b/src/sources/syslog.rs @@ -1153,7 +1153,7 @@ mod test { // Shutdown the source, and make sure we've got all the messages we sent in. shutdown - .shutdown_all(Instant::now() + Duration::from_millis(100)) + .shutdown_all(Some(Instant::now() + Duration::from_millis(100))) .await; shutdown_complete.await; @@ -1230,7 +1230,7 @@ mod test { sleep(Duration::from_secs(1)).await; shutdown - .shutdown_all(Instant::now() + Duration::from_millis(100)) + .shutdown_all(Some(Instant::now() + Duration::from_millis(100))) .await; shutdown_complete.await; @@ -1307,7 +1307,7 @@ mod test { // Shutdown the source, and make sure we've got all the messages we sent in. shutdown - .shutdown_all(Instant::now() + Duration::from_millis(100)) + .shutdown_all(Some(Instant::now() + Duration::from_millis(100))) .await; shutdown_complete.await; diff --git a/src/topology/running.rs b/src/topology/running.rs index 903d36be042f4..87fe3fe276f74 100644 --- a/src/topology/running.rs +++ b/src/topology/running.rs @@ -45,6 +45,7 @@ pub struct RunningTopology { abort_tx: mpsc::UnboundedSender<()>, watch: (WatchTx, WatchRx), pub(crate) running: Arc, + graceful_shutdown_duration: Option, } impl RunningTopology { @@ -54,7 +55,6 @@ impl RunningTopology { inputs_tap_metadata: HashMap::new(), outputs: HashMap::new(), outputs_tap_metadata: HashMap::new(), - config, shutdown_coordinator: SourceShutdownCoordinator::default(), detach_triggers: HashMap::new(), source_tasks: HashMap::new(), @@ -62,6 +62,8 @@ impl RunningTopology { abort_tx, watch: watch::channel(TapResource::default()), running: Arc::new(AtomicBool::new(true)), + graceful_shutdown_duration: config.graceful_shutdown_duration, + config, } } @@ -120,30 +122,36 @@ impl RunningTopology { check_handles.entry(key).or_default().push(task); } - // If we reach this, we will forcefully shutdown the sources. - let deadline = Instant::now() + Duration::from_secs(60); - - // If we reach the deadline, this future will print out which components - // won't gracefully shutdown since we will start to forcefully shutdown - // the sources. - let mut check_handles2 = check_handles.clone(); - let timeout = async move { - sleep_until(deadline).await; - // Remove all tasks that have shutdown. - check_handles2.retain(|_key, handles| { - retain(handles, |handle| handle.peek().is_none()); - !handles.is_empty() - }); - let remaining_components = check_handles2 - .keys() - .map(|item| item.to_string()) - .collect::>() - .join(", "); + // If we reach this, we will forcefully shutdown the sources. If None, we will never force shutdown. + let deadline = self + .graceful_shutdown_duration + .map(|grace_period| Instant::now() + grace_period); - error!( - components = ?remaining_components, - "Failed to gracefully shut down in time. Killing components." - ); + let timeout = if let Some(deadline) = deadline { + // If we reach the deadline, this future will print out which components + // won't gracefully shutdown since we will start to forcefully shutdown + // the sources. + let mut check_handles2 = check_handles.clone(); + Box::pin(async move { + sleep_until(deadline).await; + // Remove all tasks that have shutdown. + check_handles2.retain(|_key, handles| { + retain(handles, |handle| handle.peek().is_none()); + !handles.is_empty() + }); + let remaining_components = check_handles2 + .keys() + .map(|item| item.to_string()) + .collect::>() + .join(", "); + + error!( + components = ?remaining_components, + "Failed to gracefully shut down in time. Killing components." + ); + }) as future::BoxFuture<'static, ()> + } else { + Box::pin(future::pending()) as future::BoxFuture<'static, ()> }; // Reports in intervals which components are still running. @@ -163,10 +171,12 @@ impl RunningTopology { .collect::>() .join(", "); - let time_remaining = match deadline.checked_duration_since(Instant::now()) { - Some(remaining) => format!("{} seconds left", remaining.as_secs()), - None => "overdue".to_string(), - }; + let time_remaining = deadline + .map(|d| match d.checked_duration_since(Instant::now()) { + Some(remaining) => format!("{} seconds left", remaining.as_secs()), + None => "overdue".to_string(), + }) + .unwrap_or("no time limit".to_string()); info!( remaining_components = ?remaining_components, diff --git a/website/cue/reference/cli.cue b/website/cue/reference/cli.cue index c29abc8afd70a..b356f454de2ea 100644 --- a/website/cue/reference/cli.cue +++ b/website/cue/reference/cli.cue @@ -109,6 +109,10 @@ cli: { description: env_vars.VECTOR_WATCH_CONFIG.description env_var: "VECTOR_WATCH_CONFIG" } + "no-graceful-shutdown-limit": { + description: env_vars.VECTOR_NO_GRACEFUL_SHUTDOWN_LIMIT.description + env_var: "VECTOR_NO_GRACEFUL_SHUTDOWN_LIMIT" + } } _core_config_options: { @@ -139,6 +143,12 @@ cli: { type: "string" env_var: "VECTOR_CONFIG_YAML" } + "graceful-shutdown-limit-secs": { + description: env_vars.VECTOR_GRACEFUL_SHUTDOWN_LIMIT_SECS.description + default: env_vars.VECTOR_GRACEFUL_SHUTDOWN_LIMIT_SECS.type.uint.default + env_var: "VECTOR_GRACEFUL_SHUTDOWN_LIMIT_SECS" + type: "integer" + } } // Reusable options @@ -603,6 +613,17 @@ cli: { unit: null } } + VECTOR_GRACEFUL_SHUTDOWN_LIMIT_SECS: { + description: "Set the duration in seconds to wait for graceful shutdown after SIGINT or SIGTERM are received. After the duration has passed, Vector will force shutdown. To never force shutdown, use `--no-graceful-shutdown-limit`." + type: uint: { + default: 60 + unit: "seconds" + } + } + VECTOR_NO_GRACEFUL_SHUTDOWN_LIMIT: { + description: "Never time out while waiting for graceful shutdown after SIGINT or SIGTERM received. This is useful when you would like for Vector to attempt to send data until terminated by a SIGKILL. Overrides/cannot be set with `--graceful-shutdown-limit-secs`." + type: bool: default: false + } } // Helpers diff --git a/website/cue/reference/components/sources/exec.cue b/website/cue/reference/components/sources/exec.cue index 6c37b5b313897..4be168e338a09 100644 --- a/website/cue/reference/components/sources/exec.cue +++ b/website/cue/reference/components/sources/exec.cue @@ -117,7 +117,8 @@ components: sources: exec: { On *nix platforms, Vector will issue a SIGTERM to the child process, allowing it to gracefully shutdown, and the source will continue reading until the process exits or - Vector's shutdown grace period expires. + Vector's shutdown grace period expires. The duration of the grace period can be + configured using `--graceful-shutdown-limit-secs`. On Windows, the subprocess will be issued a SIGKILL and terminate abruptly. In the future we hope to support graceful shutdown of Windows processes as well. From f523f70d12053bd8d1d5ceee41c7c843780ded84 Mon Sep 17 00:00:00 2001 From: May Lee Date: Thu, 1 Jun 2023 10:51:53 -0400 Subject: [PATCH 077/236] chore(config): Update field labels for commonly used sources and transforms (#17517) This PR updates the commonly used sources and transforms' field labels that needs a human_name label. Similar to this [PR](https://github.com/vectordotdev/vector/pull/17475). --------- Signed-off-by: Spencer Gilbert Co-authored-by: Spencer Gilbert --- lib/vector-config-common/src/human_friendly.rs | 2 +- lib/vector-core/src/tcp.rs | 1 + src/sources/apache_metrics/mod.rs | 1 + src/sources/aws_ecs_metrics/mod.rs | 1 + src/sources/aws_kinesis_firehose/mod.rs | 4 ++-- src/sources/aws_s3/sqs.rs | 1 + src/sources/aws_sqs/config.rs | 1 + src/sources/gcp_pubsub.rs | 4 ++++ src/sources/http_client/client.rs | 1 + src/sources/kafka.rs | 4 ++++ src/sources/prometheus/scrape.rs | 1 + src/sources/splunk_hec/acknowledgements.rs | 12 +++++++++--- src/sources/util/multiline_config.rs | 1 + src/transforms/aggregate.rs | 1 + src/transforms/reduce/mod.rs | 2 ++ src/transforms/remap.rs | 3 +++ src/transforms/tag_cardinality_limit/config.rs | 1 + src/transforms/throttle.rs | 1 + .../components/sources/base/aws_kinesis_firehose.cue | 4 ++-- .../reference/components/sources/base/splunk_hec.cue | 6 +++--- 20 files changed, 41 insertions(+), 11 deletions(-) diff --git a/lib/vector-config-common/src/human_friendly.rs b/lib/vector-config-common/src/human_friendly.rs index 177e32f428ad7..fab77985ecf69 100644 --- a/lib/vector-config-common/src/human_friendly.rs +++ b/lib/vector-config-common/src/human_friendly.rs @@ -48,7 +48,7 @@ static WELL_KNOWN_ACRONYMS: Lazy> = Lazy::new(|| { "api", "amqp", "aws", "ec2", "ecs", "gcp", "hec", "http", "https", "nats", "nginx", "s3", "sqs", "tls", "ssl", "otel", "gelf", "csv", "json", "rfc3339", "lz4", "us", "eu", "bsd", "vrl", "tcp", "udp", "id", "uuid", "kms", "uri", "url", "acp", "uid", "ip", "pid", - "ndjson", "ewma", "rtt", "cpu", "acl", "imds", "acl", "alpn", + "ndjson", "ewma", "rtt", "cpu", "acl", "imds", "acl", "alpn", "sasl", ]; acronyms.iter().map(|s| s.to_lowercase()).collect() diff --git a/lib/vector-core/src/tcp.rs b/lib/vector-core/src/tcp.rs index 2dbbfafee118d..dd8f7b0e707a1 100644 --- a/lib/vector-core/src/tcp.rs +++ b/lib/vector-core/src/tcp.rs @@ -6,6 +6,7 @@ use vector_config::configurable_component; #[configurable_component] #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[serde(deny_unknown_fields)] +#[configurable(metadata(docs::human_name = "Wait Time"))] pub struct TcpKeepaliveConfig { /// The time to wait before starting to send TCP keepalive probes on an idle connection. #[configurable(metadata(docs::type_unit = "seconds"))] diff --git a/src/sources/apache_metrics/mod.rs b/src/sources/apache_metrics/mod.rs index 1a207624c8cb5..7a0fec5404e0e 100644 --- a/src/sources/apache_metrics/mod.rs +++ b/src/sources/apache_metrics/mod.rs @@ -42,6 +42,7 @@ pub struct ApacheMetricsConfig { /// The interval between scrapes. #[serde(default = "default_scrape_interval_secs")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Scrape Interval"))] scrape_interval_secs: Duration, /// The namespace of the metric. diff --git a/src/sources/aws_ecs_metrics/mod.rs b/src/sources/aws_ecs_metrics/mod.rs index e3db0a38c7619..fe652964cf80b 100644 --- a/src/sources/aws_ecs_metrics/mod.rs +++ b/src/sources/aws_ecs_metrics/mod.rs @@ -87,6 +87,7 @@ pub struct AwsEcsMetricsSourceConfig { /// The interval between scrapes, in seconds. #[serde(default = "default_scrape_interval_secs")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Scrape Interval"))] scrape_interval_secs: Duration, /// The namespace of the metric. diff --git a/src/sources/aws_kinesis_firehose/mod.rs b/src/sources/aws_kinesis_firehose/mod.rs index d546ce175b6cd..f5f43deb19f9b 100644 --- a/src/sources/aws_kinesis_firehose/mod.rs +++ b/src/sources/aws_kinesis_firehose/mod.rs @@ -37,7 +37,7 @@ pub struct AwsKinesisFirehoseConfig { #[configurable(metadata(docs::examples = "localhost:443"))] address: SocketAddr, - /// An optional access key to authenticate requests against. + /// An access key to authenticate requests against. /// /// AWS Kinesis Firehose can be configured to pass along a user-configurable access key with each request. If /// configured, `access_key` should be set to the same value. Otherwise, all requests are allowed. @@ -45,7 +45,7 @@ pub struct AwsKinesisFirehoseConfig { #[configurable(metadata(docs::examples = "A94A8FE5CCB19BA61C4C08"))] access_key: Option, - /// An optional list of access keys to authenticate requests against. + /// A list of access keys to authenticate requests against. /// /// AWS Kinesis Firehose can be configured to pass along a user-configurable access key with each request. If /// configured, `access_keys` should be set to the same value. Otherwise, all requests are allowed. diff --git a/src/sources/aws_s3/sqs.rs b/src/sources/aws_s3/sqs.rs index 3907ecaa9241c..b206e212a3465 100644 --- a/src/sources/aws_s3/sqs.rs +++ b/src/sources/aws_s3/sqs.rs @@ -90,6 +90,7 @@ pub(super) struct Config { #[serde(default = "default_visibility_timeout_secs")] #[derivative(Default(value = "default_visibility_timeout_secs()"))] #[configurable(metadata(docs::type_unit = "seconds"))] + #[configurable(metadata(docs::human_name = "Visibility Timeout"))] pub(super) visibility_timeout_secs: u32, /// Whether to delete the message once it is processed. diff --git a/src/sources/aws_sqs/config.rs b/src/sources/aws_sqs/config.rs index 733eab84c6c0c..03de7d24d52ae 100644 --- a/src/sources/aws_sqs/config.rs +++ b/src/sources/aws_sqs/config.rs @@ -58,6 +58,7 @@ pub struct AwsSqsConfig { #[serde(default = "default_visibility_timeout_secs")] #[derivative(Default(value = "default_visibility_timeout_secs()"))] #[configurable(metadata(docs::type_unit = "seconds"))] + #[configurable(metadata(docs::human_name = "Visibility Timeout"))] pub(super) visibility_timeout_secs: u32, /// Whether to delete the message once it is processed. diff --git a/src/sources/gcp_pubsub.rs b/src/sources/gcp_pubsub.rs index ffef638f2d3a6..262693551eb45 100644 --- a/src/sources/gcp_pubsub.rs +++ b/src/sources/gcp_pubsub.rs @@ -163,6 +163,7 @@ pub struct PubsubConfig { /// are all busy and so open a new stream. #[serde(default = "default_poll_time")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Poll Time"))] pub poll_time_seconds: Duration, /// The acknowledgement deadline, in seconds, to use for this stream. @@ -170,6 +171,7 @@ pub struct PubsubConfig { /// Messages that are not acknowledged when this deadline expires may be retransmitted. #[serde(default = "default_ack_deadline")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Acknowledgement Deadline"))] pub ack_deadline_secs: Duration, /// The acknowledgement deadline, in seconds, to use for this stream. @@ -183,6 +185,7 @@ pub struct PubsubConfig { /// The amount of time, in seconds, to wait between retry attempts after an error. #[serde(default = "default_retry_delay")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Retry Delay"))] pub retry_delay_secs: Duration, /// The amount of time, in seconds, to wait between retry attempts after an error. @@ -196,6 +199,7 @@ pub struct PubsubConfig { /// `60`, you may see periodic errors sent from the server. #[serde(default = "default_keepalive")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Keepalive"))] pub keepalive_secs: Duration, /// The namespace to use for logs. This overrides the global setting. diff --git a/src/sources/http_client/client.rs b/src/sources/http_client/client.rs index 61c7a6c572687..6732a4975aad7 100644 --- a/src/sources/http_client/client.rs +++ b/src/sources/http_client/client.rs @@ -55,6 +55,7 @@ pub struct HttpClientConfig { #[serde(default = "default_interval")] #[serde_as(as = "serde_with::DurationSeconds")] #[serde(rename = "scrape_interval_secs")] + #[configurable(metadata(docs::human_name = "Scrape Interval"))] pub interval: Duration, /// Custom parameters for the HTTP request query string. diff --git a/src/sources/kafka.rs b/src/sources/kafka.rs index 762c13f9c6cd9..39f16074f5342 100644 --- a/src/sources/kafka.rs +++ b/src/sources/kafka.rs @@ -107,6 +107,7 @@ pub struct KafkaSourceConfig { #[configurable(metadata(docs::examples = 5000, docs::examples = 10000))] #[configurable(metadata(docs::advanced))] #[serde(default = "default_session_timeout_ms")] + #[configurable(metadata(docs::human_name = "Session Timeout"))] session_timeout_ms: Duration, /// Timeout for network requests. @@ -114,6 +115,7 @@ pub struct KafkaSourceConfig { #[configurable(metadata(docs::examples = 30000, docs::examples = 60000))] #[configurable(metadata(docs::advanced))] #[serde(default = "default_socket_timeout_ms")] + #[configurable(metadata(docs::human_name = "Socket Timeout"))] socket_timeout_ms: Duration, /// Maximum time the broker may wait to fill the response. @@ -121,12 +123,14 @@ pub struct KafkaSourceConfig { #[configurable(metadata(docs::examples = 50, docs::examples = 100))] #[configurable(metadata(docs::advanced))] #[serde(default = "default_fetch_wait_max_ms")] + #[configurable(metadata(docs::human_name = "Max Fetch Wait Time"))] fetch_wait_max_ms: Duration, /// The frequency that the consumer offsets are committed (written) to offset storage. #[serde_as(as = "serde_with::DurationMilliSeconds")] #[serde(default = "default_commit_interval_ms")] #[configurable(metadata(docs::examples = 5000, docs::examples = 10000))] + #[configurable(metadata(docs::human_name = "Commit Interval"))] commit_interval_ms: Duration, /// Overrides the name of the log field used to add the message key to each event. diff --git a/src/sources/prometheus/scrape.rs b/src/sources/prometheus/scrape.rs index 7deb49eb7085b..4a7c66425359e 100644 --- a/src/sources/prometheus/scrape.rs +++ b/src/sources/prometheus/scrape.rs @@ -57,6 +57,7 @@ pub struct PrometheusScrapeConfig { #[serde(default = "default_interval")] #[serde_as(as = "serde_with::DurationSeconds")] #[serde(rename = "scrape_interval_secs")] + #[configurable(metadata(docs::human_name = "Scrape Interval"))] interval: Duration, /// The tag name added to each event representing the scraped instance's `host:port`. diff --git a/src/sources/splunk_hec/acknowledgements.rs b/src/sources/splunk_hec/acknowledgements.rs index 2eee1ed78ac8e..68a4980e0969b 100644 --- a/src/sources/splunk_hec/acknowledgements.rs +++ b/src/sources/splunk_hec/acknowledgements.rs @@ -27,28 +27,34 @@ pub struct HecAcknowledgementsConfig { /// Enables end-to-end acknowledgements. pub enabled: Option, - /// The maximum number of ack statuses pending query across all channels. + /// The maximum number of acknowledgement statuses pending query across all channels. /// /// Equivalent to the `max_number_of_acked_requests_pending_query` Splunk HEC setting. /// /// Minimum of `1`. + #[configurable(metadata(docs::human_name = "Max Number of Pending Acknowledgements"))] pub max_pending_acks: NonZeroU64, /// The maximum number of Splunk HEC channels clients can use with this source. /// /// Minimum of `1`. + #[configurable(metadata(docs::human_name = "Max Number of Acknowledgement Channels"))] pub max_number_of_ack_channels: NonZeroU64, - /// The maximum number of ack statuses pending query for a single channel. + /// The maximum number of acknowledgement statuses pending query for a single channel. /// /// Equivalent to the `max_number_of_acked_requests_pending_query_per_ack_channel` Splunk HEC setting. /// /// Minimum of `1`. + #[configurable(metadata( + docs::human_name = "Max Number of Pending Acknowledgements Per Channel" + ))] pub max_pending_acks_per_channel: NonZeroU64, /// Whether or not to remove channels after idling for `max_idle_time` seconds. /// - /// A channel is idling if it is not used for sending data or querying ack statuses. + /// A channel is idling if it is not used for sending data or querying acknowledgement statuses. + #[configurable(metadata(docs::human_name = "Acknowledgement Idle Cleanup"))] pub ack_idle_cleanup: bool, /// The amount of time, in seconds, a channel is allowed to idle before removal. diff --git a/src/sources/util/multiline_config.rs b/src/sources/util/multiline_config.rs index 1a840fa8a6d67..9a8d9df734798 100644 --- a/src/sources/util/multiline_config.rs +++ b/src/sources/util/multiline_config.rs @@ -41,6 +41,7 @@ pub struct MultilineConfig { #[serde_as(as = "serde_with::DurationMilliSeconds")] #[configurable(metadata(docs::examples = 1000))] #[configurable(metadata(docs::examples = 600000))] + #[configurable(metadata(docs::human_name = "Timeout"))] pub timeout_ms: Duration, } diff --git a/src/transforms/aggregate.rs b/src/transforms/aggregate.rs index f6a60d0139a5b..de97b69eafe41 100644 --- a/src/transforms/aggregate.rs +++ b/src/transforms/aggregate.rs @@ -26,6 +26,7 @@ pub struct AggregateConfig { /// /// During this time frame, metrics with the same series data (name, namespace, tags, and so on) are aggregated. #[serde(default = "default_interval_ms")] + #[configurable(metadata(docs::human_name = "Flush Interval"))] pub interval_ms: u64, } diff --git a/src/transforms/reduce/mod.rs b/src/transforms/reduce/mod.rs index 658abb1b1383e..455a4b142e4d6 100644 --- a/src/transforms/reduce/mod.rs +++ b/src/transforms/reduce/mod.rs @@ -47,12 +47,14 @@ pub struct ReduceConfig { #[serde(default = "default_expire_after_ms")] #[serde_as(as = "serde_with::DurationMilliSeconds")] #[derivative(Default(value = "default_expire_after_ms()"))] + #[configurable(metadata(docs::human_name = "Expire After"))] pub expire_after_ms: Duration, /// The interval to check for and flush any expired events, in milliseconds. #[serde(default = "default_flush_period_ms")] #[serde_as(as = "serde_with::DurationMilliSeconds")] #[derivative(Default(value = "default_flush_period_ms()"))] + #[configurable(metadata(docs::human_name = "Flush Period"))] pub flush_period_ms: Duration, /// The maximum number of events to group together. diff --git a/src/transforms/remap.rs b/src/transforms/remap.rs index 877a7dde048db..49c84faad97e2 100644 --- a/src/transforms/remap.rs +++ b/src/transforms/remap.rs @@ -102,6 +102,7 @@ pub struct RemapConfig { /// Additionally, dropped events can potentially be diverted to a specially named output for /// further logging and analysis by setting `reroute_dropped`. #[serde(default = "crate::serde::default_false")] + #[configurable(metadata(docs::human_name = "Drop Event on Error"))] pub drop_on_error: bool, /// Drops any event that is manually aborted during processing. @@ -117,6 +118,7 @@ pub struct RemapConfig { /// /// [vrl_docs_abort]: https://vector.dev/docs/reference/vrl/expressions/#abort #[serde(default = "crate::serde::default_true")] + #[configurable(metadata(docs::human_name = "Drop Event on Abort"))] pub drop_on_abort: bool, /// Reroutes dropped events to a named output instead of halting processing on them. @@ -129,6 +131,7 @@ pub struct RemapConfig { /// to a specially-named output, `dropped`. The original event is annotated with additional /// fields describing why the event was dropped. #[serde(default = "crate::serde::default_false")] + #[configurable(metadata(docs::human_name = "Reroute Dropped Events"))] pub reroute_dropped: bool, #[configurable(derived, metadata(docs::hidden))] diff --git a/src/transforms/tag_cardinality_limit/config.rs b/src/transforms/tag_cardinality_limit/config.rs index d8fe74ea1bb8d..e3dbe992e0fcc 100644 --- a/src/transforms/tag_cardinality_limit/config.rs +++ b/src/transforms/tag_cardinality_limit/config.rs @@ -60,6 +60,7 @@ pub struct BloomFilterConfig { /// The larger the cache size, the less likely it is to have a false positive, or a case where /// we allow a new value for tag even after we have reached the configured limits. #[serde(default = "default_cache_size")] + #[configurable(metadata(docs::human_name = "Cache Size per Key"))] pub cache_size_per_key: usize, } diff --git a/src/transforms/throttle.rs b/src/transforms/throttle.rs index 2eba0b532910b..740e99477bc2f 100644 --- a/src/transforms/throttle.rs +++ b/src/transforms/throttle.rs @@ -31,6 +31,7 @@ pub struct ThrottleConfig { /// The time window in which the configured `threshold` is applied, in seconds. #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Time Window"))] window_secs: Duration, /// The value to group events into separate buckets to be rate limited independently. diff --git a/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue b/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue index 033c1c7675cc9..998223c93dc03 100644 --- a/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue @@ -5,7 +5,7 @@ base: components: sources: aws_kinesis_firehose: configuration: { deprecated: true deprecated_message: "This option has been deprecated, use `access_keys` instead." description: """ - An optional access key to authenticate requests against. + An access key to authenticate requests against. AWS Kinesis Firehose can be configured to pass along a user-configurable access key with each request. If configured, `access_key` should be set to the same value. Otherwise, all requests are allowed. @@ -15,7 +15,7 @@ base: components: sources: aws_kinesis_firehose: configuration: { } access_keys: { description: """ - An optional list of access keys to authenticate requests against. + A list of access keys to authenticate requests against. AWS Kinesis Firehose can be configured to pass along a user-configurable access key with each request. If configured, `access_keys` should be set to the same value. Otherwise, all requests are allowed. diff --git a/website/cue/reference/components/sources/base/splunk_hec.cue b/website/cue/reference/components/sources/base/splunk_hec.cue index 55e33dd0e07c1..7b5178ab639bc 100644 --- a/website/cue/reference/components/sources/base/splunk_hec.cue +++ b/website/cue/reference/components/sources/base/splunk_hec.cue @@ -9,7 +9,7 @@ base: components: sources: splunk_hec: configuration: { description: """ Whether or not to remove channels after idling for `max_idle_time` seconds. - A channel is idling if it is not used for sending data or querying ack statuses. + A channel is idling if it is not used for sending data or querying acknowledgement statuses. """ required: false type: bool: default: false @@ -41,7 +41,7 @@ base: components: sources: splunk_hec: configuration: { } max_pending_acks: { description: """ - The maximum number of ack statuses pending query across all channels. + The maximum number of acknowledgement statuses pending query across all channels. Equivalent to the `max_number_of_acked_requests_pending_query` Splunk HEC setting. @@ -52,7 +52,7 @@ base: components: sources: splunk_hec: configuration: { } max_pending_acks_per_channel: { description: """ - The maximum number of ack statuses pending query for a single channel. + The maximum number of acknowledgement statuses pending query for a single channel. Equivalent to the `max_number_of_acked_requests_pending_query_per_ack_channel` Splunk HEC setting. From ced219e70405c9ed9012444cc04efad8f91d3590 Mon Sep 17 00:00:00 2001 From: Andrey Koshchiy Date: Thu, 1 Jun 2023 22:22:59 +0400 Subject: [PATCH 078/236] enhancement(compression): zstd compression support (#17371) --- src/sinks/aws_s3/integration_tests.rs | 52 +++++ src/sinks/azure_blob/request_builder.rs | 1 + src/sinks/http.rs | 105 ++++++--- src/sinks/util/buffer/compression.rs | 211 +++++++++++------- src/sinks/util/buffer/mod.rs | 18 +- src/sinks/util/compressor.rs | 15 +- src/sinks/util/mod.rs | 1 + src/sinks/util/zstd.rs | 68 ++++++ .../components/sinks/base/appsignal.cue | 5 + .../sinks/base/aws_cloudwatch_logs.cue | 5 + .../sinks/base/aws_cloudwatch_metrics.cue | 5 + .../sinks/base/aws_kinesis_firehose.cue | 5 + .../sinks/base/aws_kinesis_streams.cue | 5 + .../components/sinks/base/aws_s3.cue | 5 + .../reference/components/sinks/base/axiom.cue | 5 + .../components/sinks/base/azure_blob.cue | 5 + .../components/sinks/base/clickhouse.cue | 5 + .../components/sinks/base/datadog_logs.cue | 5 + .../components/sinks/base/datadog_traces.cue | 5 + .../components/sinks/base/elasticsearch.cue | 5 + .../sinks/base/gcp_cloud_storage.cue | 5 + .../reference/components/sinks/base/http.cue | 5 + .../components/sinks/base/humio_logs.cue | 5 + .../components/sinks/base/humio_metrics.cue | 5 + .../reference/components/sinks/base/loki.cue | 5 + .../components/sinks/base/new_relic.cue | 5 + .../components/sinks/base/splunk_hec_logs.cue | 5 + .../sinks/base/splunk_hec_metrics.cue | 5 + .../components/sinks/base/webhdfs.cue | 5 + 29 files changed, 455 insertions(+), 121 deletions(-) create mode 100644 src/sinks/util/zstd.rs diff --git a/src/sinks/aws_s3/integration_tests.rs b/src/sinks/aws_s3/integration_tests.rs index 9e1efa3e24620..a86d647fb3c6c 100644 --- a/src/sinks/aws_s3/integration_tests.rs +++ b/src/sinks/aws_s3/integration_tests.rs @@ -252,6 +252,51 @@ async fn s3_gzip() { assert_eq!(lines, response_lines); } +#[tokio::test] +async fn s3_zstd() { + // Here, we're creating a bunch of events, approximately 3000, while setting our batch size + // to 1000, and using zstd compression. We test to ensure that all of the keys we end up + // writing represent the sum total of the lines: we expect 3 batches, each of which should + // have 1000 lines. + let cx = SinkContext::new_test(); + + let bucket = uuid::Uuid::new_v4().to_string(); + + create_bucket(&bucket, false).await; + + let batch_size = 1_000; + let batch_multiplier = 3; + let config = S3SinkConfig { + compression: Compression::zstd_default(), + filename_time_format: "%s%f".into(), + ..config(&bucket, batch_size) + }; + + let prefix = config.key_prefix.clone(); + let service = config.create_service(&cx.globals.proxy).await.unwrap(); + let sink = config.build_processor(service).unwrap(); + + let (lines, events, receiver) = make_events_batch(100, batch_size * batch_multiplier); + run_and_assert_sink_compliance(sink, events, &AWS_SINK_TAGS).await; + assert_eq!(receiver.await, BatchStatus::Delivered); + + let keys = get_keys(&bucket, prefix).await; + assert_eq!(keys.len(), batch_multiplier); + + let mut response_lines: Vec = Vec::new(); + let mut key_stream = stream::iter(keys); + while let Some(key) = key_stream.next().await { + assert!(key.ends_with(".log.zst")); + + let obj = get_object(&bucket, key).await; + assert_eq!(obj.content_encoding, Some("zstd".to_string())); + + response_lines.append(&mut get_zstd_lines(obj).await); + } + + assert_eq!(lines, response_lines); +} + // NOTE: this test doesn't actually validate anything because localstack // doesn't enforce the required Content-MD5 header on the request for // buckets with object lock enabled @@ -481,6 +526,13 @@ async fn get_gzipped_lines(obj: GetObjectOutput) -> Vec { buf_read.lines().map(|l| l.unwrap()).collect() } +async fn get_zstd_lines(obj: GetObjectOutput) -> Vec { + let body = get_object_output_body(obj).await; + let decoder = zstd::Decoder::new(body).expect("zstd decoder initialization failed"); + let buf_read = BufReader::new(decoder); + buf_read.lines().map(|l| l.unwrap()).collect() +} + async fn get_object_output_body(obj: GetObjectOutput) -> impl std::io::Read { obj.body.collect().await.unwrap().reader() } diff --git a/src/sinks/azure_blob/request_builder.rs b/src/sinks/azure_blob/request_builder.rs index 2ec67a7c758eb..ae8f8770381a5 100644 --- a/src/sinks/azure_blob/request_builder.rs +++ b/src/sinks/azure_blob/request_builder.rs @@ -106,6 +106,7 @@ impl Compression { Self::None => "text/plain", Self::Gzip(_) => "application/gzip", Self::Zlib(_) => "application/zlib", + Self::Zstd(_) => "application/zstd", } } } diff --git a/src/sinks/http.rs b/src/sinks/http.rs index e85bf8b1f4365..a49335b77b5c3 100644 --- a/src/sinks/http.rs +++ b/src/sinks/http.rs @@ -2,7 +2,6 @@ use std::io::Write; use bytes::{BufMut, Bytes, BytesMut}; use codecs::encoding::{CharacterDelimitedEncoder, Framer, Serializer}; -use flate2::write::{GzEncoder, ZlibEncoder}; use futures::{future, FutureExt, SinkExt}; use http::{ header::{HeaderName, HeaderValue, AUTHORIZATION}, @@ -23,7 +22,7 @@ use crate::{ sinks::util::{ self, http::{BatchedHttpSink, HttpEventEncoder, RequestConfig}, - BatchConfig, Buffer, Compression, RealtimeSizeBasedDefaultBatchSettings, + BatchConfig, Buffer, Compression, Compressor, RealtimeSizeBasedDefaultBatchSettings, TowerRequestConfig, UriSerde, }, tls::{TlsConfig, TlsSettings}, @@ -375,24 +374,21 @@ impl util::http::HttpSink for HttpSink { builder = builder.header("Content-Type", content_type); } - match self.compression { - Compression::Gzip(level) => { - builder = builder.header("Content-Encoding", "gzip"); - - let buffer = BytesMut::new(); - let mut w = GzEncoder::new(buffer.writer(), level.as_flate2()); - w.write_all(&body).expect("Writing to Vec can't fail"); - body = w.finish().expect("Writing to Vec can't fail").into_inner(); - } - Compression::Zlib(level) => { - builder = builder.header("Content-Encoding", "deflate"); - - let buffer = BytesMut::new(); - let mut w = ZlibEncoder::new(buffer.writer(), level.as_flate2()); - w.write_all(&body).expect("Writing to Vec can't fail"); - body = w.finish().expect("Writing to Vec can't fail").into_inner(); - } - Compression::None => {} + let compression = self.compression; + + if compression.is_compressed() { + builder = builder.header( + "Content-Encoding", + compression + .content_encoding() + .expect("Encoding should be specified."), + ); + + let mut compressor = Compressor::from(compression); + compressor + .write_all(&body) + .expect("Writing to Vec can't fail."); + body = compressor.finish().expect("Writing to Vec can't fail."); } let headers = builder @@ -477,12 +473,12 @@ mod tests { encoding::FramingConfig, JsonSerializerConfig, NewlineDelimitedEncoderConfig, TextSerializerConfig, }; - use flate2::read::MultiGzDecoder; + use flate2::{read::MultiGzDecoder, read::ZlibDecoder}; use futures::{channel::mpsc, stream, StreamExt}; use headers::{Authorization, HeaderMapExt}; use http::request::Parts; use hyper::{Method, Response, StatusCode}; - use serde::Deserialize; + use serde::{de, Deserialize}; use vector_core::event::{BatchNotifier, BatchStatus, LogEvent}; use super::*; @@ -812,7 +808,36 @@ mod tests { } #[tokio::test] - async fn json_compression() { + async fn json_gzip_compression() { + json_compression("gzip").await; + } + + #[tokio::test] + async fn json_zstd_compression() { + json_compression("zstd").await; + } + + #[tokio::test] + async fn json_zlib_compression() { + json_compression("zlib").await; + } + + #[tokio::test] + async fn json_gzip_compression_with_payload_wrapper() { + json_compression_with_payload_wrapper("gzip").await; + } + + #[tokio::test] + async fn json_zlib_compression_with_payload_wrapper() { + json_compression_with_payload_wrapper("zlib").await; + } + + #[tokio::test] + async fn json_zstd_compression_with_payload_wrapper() { + json_compression_with_payload_wrapper("zstd").await; + } + + async fn json_compression(compression: &str) { components::assert_sink_compliance(&HTTP_SINK_TAGS, async { let num_lines = 1000; @@ -820,7 +845,7 @@ mod tests { let config = r#" uri = "http://$IN_ADDR/frames" - compression = "gzip" + compression = "$COMPRESSION" encoding.codec = "json" method = "post" @@ -829,7 +854,9 @@ mod tests { user = "waldo" password = "hunter2" "# - .replace("$IN_ADDR", &in_addr.to_string()); + .replace("$IN_ADDR", &in_addr.to_string()) + .replace("$COMPRESSION", compression); + let config: HttpSinkConfig = toml::from_str(&config).unwrap(); let cx = SinkContext::new_test(); @@ -856,8 +883,7 @@ mod tests { Some(Authorization::basic("waldo", "hunter2")), parts.headers.typed_get() ); - let lines: Vec = - serde_json::from_reader(MultiGzDecoder::new(body.reader())).unwrap(); + let lines: Vec = parse_compressed_json(compression, body); stream::iter(lines) }) .map(|line| line.get("message").unwrap().as_str().unwrap().to_owned()) @@ -870,8 +896,7 @@ mod tests { .await; } - #[tokio::test] - async fn json_compression_with_payload_wrapper() { + async fn json_compression_with_payload_wrapper(compression: &str) { components::assert_sink_compliance(&HTTP_SINK_TAGS, async { let num_lines = 1000; @@ -879,7 +904,7 @@ mod tests { let config = r#" uri = "http://$IN_ADDR/frames" - compression = "gzip" + compression = "$COMPRESSION" encoding.codec = "json" payload_prefix = '{"data":' payload_suffix = "}" @@ -890,7 +915,9 @@ mod tests { user = "waldo" password = "hunter2" "# - .replace("$IN_ADDR", &in_addr.to_string()); + .replace("$IN_ADDR", &in_addr.to_string()) + .replace("$COMPRESSION", compression); + let config: HttpSinkConfig = toml::from_str(&config).unwrap(); let cx = SinkContext::new_test(); @@ -918,8 +945,8 @@ mod tests { parts.headers.typed_get() ); - let message: serde_json::Value = - serde_json::from_reader(MultiGzDecoder::new(body.reader())).unwrap(); + let message: serde_json::Value = parse_compressed_json(compression, body); + let lines: Vec = message["data"].as_array().unwrap().to_vec(); stream::iter(lines) @@ -934,6 +961,18 @@ mod tests { .await; } + fn parse_compressed_json(compression: &str, buf: Bytes) -> T + where + T: de::DeserializeOwned, + { + match compression { + "gzip" => serde_json::from_reader(MultiGzDecoder::new(buf.reader())).unwrap(), + "zstd" => serde_json::from_reader(zstd::Decoder::new(buf.reader()).unwrap()).unwrap(), + "zlib" => serde_json::from_reader(ZlibDecoder::new(buf.reader())).unwrap(), + _ => panic!("undefined compression: {}", compression), + } + } + async fn get_received( rx: mpsc::Receiver<(Parts, Bytes)>, assert_parts: impl Fn(Parts), diff --git a/src/sinks/util/buffer/compression.rs b/src/sinks/util/buffer/compression.rs index f4e9e439f0cd6..8171e4a563238 100644 --- a/src/sinks/util/buffer/compression.rs +++ b/src/sinks/util/buffer/compression.rs @@ -13,6 +13,8 @@ use vector_config::{ }; use vector_config_common::attributes::CustomAttribute; +use crate::sinks::util::zstd::ZstdCompressionLevel; + /// Compression configuration. #[derive(Copy, Clone, Debug, Derivative, Eq, PartialEq)] #[derivative(Default)] @@ -30,6 +32,11 @@ pub enum Compression { /// /// [zlib]: https://zlib.net/ Zlib(CompressionLevel), + + /// [Zstandard][zstd] compression. + /// + /// [zstd]: https://facebook.github.io/zstd/ + Zstd(CompressionLevel), } impl Compression { @@ -53,11 +60,16 @@ impl Compression { Compression::Zlib(CompressionLevel::const_default()) } + pub const fn zstd_default() -> Compression { + Compression::Zstd(CompressionLevel::const_default()) + } + pub const fn content_encoding(self) -> Option<&'static str> { match self { Self::None => None, Self::Gzip(_) => Some("gzip"), Self::Zlib(_) => Some("deflate"), + Self::Zstd(_) => Some("zstd"), } } @@ -65,6 +77,7 @@ impl Compression { match self { Self::Gzip(_) => Some("gzip"), Self::Zlib(_) => Some("deflate"), + Self::Zstd(_) => Some("zstd"), _ => None, } } @@ -74,13 +87,23 @@ impl Compression { Self::None => "log", Self::Gzip(_) => "log.gz", Self::Zlib(_) => "log.zz", + Self::Zstd(_) => "log.zst", + } + } + + pub const fn max_compression_level_val(self) -> u32 { + match self { + Compression::None => 0, + Compression::Gzip(_) => 9, + Compression::Zlib(_) => 9, + Compression::Zstd(_) => 21, } } - pub const fn level(self) -> flate2::Compression { + pub const fn compression_level(self) -> CompressionLevel { match self { - Self::None => flate2::Compression::none(), - Self::Gzip(level) | Self::Zlib(level) => level.as_flate2(), + Self::None => CompressionLevel::None, + Self::Gzip(level) | Self::Zlib(level) | Self::Zstd(level) => level, } } } @@ -91,6 +114,9 @@ impl fmt::Display for Compression { Compression::None => write!(f, "none"), Compression::Gzip(ref level) => write!(f, "gzip({})", level.as_flate2().level()), Compression::Zlib(ref level) => write!(f, "zlib({})", level.as_flate2().level()), + Compression::Zstd(ref level) => { + write!(f, "zstd({})", ZstdCompressionLevel::from(*level)) + } } } } @@ -117,9 +143,10 @@ impl<'de> de::Deserialize<'de> for Compression { "none" => Ok(Compression::None), "gzip" => Ok(Compression::gzip_default()), "zlib" => Ok(Compression::zlib_default()), + "zstd" => Ok(Compression::zstd_default()), _ => Err(de::Error::invalid_value( de::Unexpected::Str(s), - &r#""none" or "gzip" or "zlib""#, + &r#""none" or "gzip" or "zlib" or "zstd""#, )), } } @@ -149,7 +176,7 @@ impl<'de> de::Deserialize<'de> for Compression { }; } - match algorithm + let compression = match algorithm .ok_or_else(|| de::Error::missing_field("algorithm"))? .as_str() { @@ -159,11 +186,26 @@ impl<'de> de::Deserialize<'de> for Compression { }, "gzip" => Ok(Compression::Gzip(level.unwrap_or_default())), "zlib" => Ok(Compression::Zlib(level.unwrap_or_default())), + "zstd" => Ok(Compression::Zstd(level.unwrap_or_default())), algorithm => Err(de::Error::unknown_variant( algorithm, - &["none", "gzip", "zlib"], + &["none", "gzip", "zlib", "zstd"], )), + }?; + + if let CompressionLevel::Val(level) = compression.compression_level() { + let max_level = compression.max_compression_level_val(); + if level > max_level { + let msg = std::format!( + "invalid value `{}`, expected value in range [0, {}]", + level, + max_level + ); + return Err(de::Error::custom(msg)); + } } + + Ok(compression) } } @@ -178,12 +220,10 @@ impl ser::Serialize for Compression { { use ser::SerializeMap; - let default_level = CompressionLevel::const_default(); - match self { Compression::None => serializer.serialize_str("none"), Compression::Gzip(gzip_level) => { - if *gzip_level != default_level { + if *gzip_level != CompressionLevel::Default { let mut map = serializer.serialize_map(None)?; map.serialize_entry("algorithm", "gzip")?; map.serialize_entry("level", &gzip_level)?; @@ -193,7 +233,7 @@ impl ser::Serialize for Compression { } } Compression::Zlib(zlib_level) => { - if *zlib_level != default_level { + if *zlib_level != CompressionLevel::Default { let mut map = serializer.serialize_map(None)?; map.serialize_entry("algorithm", "zlib")?; map.serialize_entry("level", &zlib_level)?; @@ -202,6 +242,16 @@ impl ser::Serialize for Compression { serializer.serialize_str("zlib") } } + Compression::Zstd(zstd_level) => { + if *zstd_level != CompressionLevel::Default { + let mut map = serializer.serialize_map(None)?; + map.serialize_entry("algorithm", "zstd")?; + map.serialize_entry("level", &zstd_level)?; + map.end() + } else { + serializer.serialize_str("zstd") + } + } } } } @@ -258,10 +308,17 @@ impl Configurable for Compression { "[zlib]: https://zlib.net/", ); + let zstd_string_subschema = generate_string_schema( + "Zstd", + Some("[Zstandard][zstd] compression."), + "[zstd]: https://facebook.github.io/zstd/", + ); + let mut all_string_oneof_subschema = generate_one_of_schema(&[ none_string_subschema, gzip_string_subschema, zlib_string_subschema, + zstd_string_subschema, ]); apply_base_metadata(&mut all_string_oneof_subschema, string_metadata); @@ -313,32 +370,28 @@ impl ToValue for Compression { /// Compression level. #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] -pub struct CompressionLevel(flate2::Compression); +pub enum CompressionLevel { + None, + #[default] + Default, + Best, + Fast, + Val(u32), +} impl CompressionLevel { - #[cfg(test)] - const fn new(level: u32) -> Self { - Self(flate2::Compression::new(level)) - } - - const fn const_default() -> Self { - Self(flate2::Compression::new(6)) + pub const fn const_default() -> Self { + CompressionLevel::Default } - const fn none() -> Self { - Self(flate2::Compression::none()) - } - - const fn best() -> Self { - Self(flate2::Compression::best()) - } - - const fn fast() -> Self { - Self(flate2::Compression::fast()) - } - - pub const fn as_flate2(self) -> flate2::Compression { - self.0 + pub fn as_flate2(self) -> flate2::Compression { + match self { + CompressionLevel::None => flate2::Compression::none(), + CompressionLevel::Default => flate2::Compression::default(), + CompressionLevel::Best => flate2::Compression::best(), + CompressionLevel::Fast => flate2::Compression::fast(), + CompressionLevel::Val(level) => flate2::Compression::new(level), + } } } @@ -353,7 +406,7 @@ impl<'de> de::Deserialize<'de> for CompressionLevel { type Value = CompressionLevel; fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("number or string") + f.write_str("unsigned number or string") } fn visit_str(self, s: &str) -> Result @@ -361,10 +414,10 @@ impl<'de> de::Deserialize<'de> for CompressionLevel { E: de::Error, { match s { - "none" => Ok(CompressionLevel::none()), - "fast" => Ok(CompressionLevel::fast()), - "default" => Ok(CompressionLevel::const_default()), - "best" => Ok(CompressionLevel::best()), + "none" => Ok(CompressionLevel::None), + "fast" => Ok(CompressionLevel::Fast), + "default" => Ok(CompressionLevel::Default), + "best" => Ok(CompressionLevel::Best), level => { return Err(de::Error::invalid_value( de::Unexpected::Str(level), @@ -374,28 +427,11 @@ impl<'de> de::Deserialize<'de> for CompressionLevel { } } - fn visit_i64(self, v: i64) -> Result - where - E: de::Error, - { - Err(de::Error::invalid_value( - de::Unexpected::Other(&v.to_string()), - &"0, 1, 2, 3, 4, 5, 6, 7, 8 or 9", - )) - } - fn visit_u64(self, v: u64) -> Result where E: de::Error, { - if v <= 9 { - Ok(CompressionLevel(flate2::Compression::new(v as u32))) - } else { - return Err(de::Error::invalid_value( - de::Unexpected::Unsigned(v), - &"0, 1, 2, 3, 4, 5, 6, 7, 8 or 9", - )); - } + Ok(CompressionLevel::Val(v as u32)) } } @@ -408,15 +444,12 @@ impl ser::Serialize for CompressionLevel { where S: ser::Serializer, { - const NONE: CompressionLevel = CompressionLevel::none(); - const FAST: CompressionLevel = CompressionLevel::fast(); - const BEST: CompressionLevel = CompressionLevel::best(); - match *self { - NONE => serializer.serialize_str("none"), - FAST => serializer.serialize_str("fast"), - BEST => serializer.serialize_str("best"), - level => serializer.serialize_u64(u64::from(level.0.level())), + CompressionLevel::None => serializer.serialize_str("none"), + CompressionLevel::Default => serializer.serialize_str("default"), + CompressionLevel::Best => serializer.serialize_str("best"), + CompressionLevel::Fast => serializer.serialize_str("fast"), + CompressionLevel::Val(level) => serializer.serialize_u64(u64::from(level)), } } } @@ -438,7 +471,7 @@ impl Configurable for CompressionLevel { .iter() .map(|s| serde_json::Value::from(*s)); - let level_consts = (0u32..=9).map(serde_json::Value::from); + let level_consts = (0u32..=21).map(serde_json::Value::from); let valid_values = string_consts.chain(level_consts).collect(); Ok(generate_enum_schema(valid_values)) @@ -460,38 +493,32 @@ mod test { fn deserialization() { let fixtures_valid = [ (r#""none""#, Compression::None), - ( - r#""gzip""#, - Compression::Gzip(CompressionLevel::const_default()), - ), - ( - r#""zlib""#, - Compression::Zlib(CompressionLevel::const_default()), - ), + (r#""gzip""#, Compression::Gzip(CompressionLevel::default())), + (r#""zlib""#, Compression::Zlib(CompressionLevel::default())), (r#"{"algorithm": "none"}"#, Compression::None), ( r#"{"algorithm": "gzip"}"#, - Compression::Gzip(CompressionLevel::const_default()), + Compression::Gzip(CompressionLevel::default()), ), ( r#"{"algorithm": "gzip", "level": "best"}"#, - Compression::Gzip(CompressionLevel::best()), + Compression::Gzip(CompressionLevel::Best), ), ( r#"{"algorithm": "gzip", "level": 8}"#, - Compression::Gzip(CompressionLevel::new(8)), + Compression::Gzip(CompressionLevel::Val(8)), ), ( r#"{"algorithm": "zlib"}"#, - Compression::Zlib(CompressionLevel::const_default()), + Compression::Zlib(CompressionLevel::default()), ), ( r#"{"algorithm": "zlib", "level": "best"}"#, - Compression::Zlib(CompressionLevel::best()), + Compression::Zlib(CompressionLevel::Best), ), ( r#"{"algorithm": "zlib", "level": 8}"#, - Compression::Zlib(CompressionLevel::new(8)), + Compression::Zlib(CompressionLevel::Val(8)), ), ]; for (sources, result) in fixtures_valid.iter() { @@ -506,11 +533,11 @@ mod test { ), ( r#""b42""#, - r#"invalid value: string "b42", expected "none" or "gzip" or "zlib" at line 1 column 5"#, + r#"invalid value: string "b42", expected "none" or "gzip" or "zlib" or "zstd" at line 1 column 5"#, ), ( r#"{"algorithm": "b42"}"#, - r#"unknown variant `b42`, expected one of `none`, `gzip`, `zlib` at line 1 column 20"#, + r#"unknown variant `b42`, expected one of `none`, `gzip`, `zlib`, `zstd` at line 1 column 20"#, ), ( r#"{"algorithm": "none", "level": "default"}"#, @@ -518,7 +545,7 @@ mod test { ), ( r#"{"algorithm": "gzip", "level": -1}"#, - r#"invalid value: -1, expected 0, 1, 2, 3, 4, 5, 6, 7, 8 or 9 at line 1 column 33"#, + r#"invalid type: integer `-1`, expected unsigned number or string at line 1 column 33"#, ), ( r#"{"algorithm": "gzip", "level": "good"}"#, @@ -526,12 +553,20 @@ mod test { ), ( r#"{"algorithm": "gzip", "level": {}}"#, - r#"invalid type: map, expected number or string at line 1 column 33"#, + r#"invalid type: map, expected unsigned number or string at line 1 column 33"#, ), ( r#"{"algorithm": "gzip", "level": "default", "key": 42}"#, r#"unknown field `key`, expected `algorithm` or `level` at line 1 column 47"#, ), + ( + r#"{"algorithm": "gzip", "level": 10}"#, + r#"invalid value `10`, expected value in range [0, 9] at line 1 column 34"#, + ), + ( + r#"{"algorithm": "zstd", "level": 22}"#, + r#"invalid value `22`, expected value in range [0, 21] at line 1 column 34"#, + ), ]; for (source, result) in fixtures_invalid.iter() { let deserialized: Result = serde_json::from_str(source); @@ -544,10 +579,14 @@ mod test { fn from_and_to_value() { let fixtures_valid = [ Compression::None, - Compression::Gzip(CompressionLevel::const_default()), - Compression::Gzip(CompressionLevel::new(7)), - Compression::Zlib(CompressionLevel::best()), - Compression::Zlib(CompressionLevel::new(7)), + Compression::Gzip(CompressionLevel::default()), + Compression::Gzip(CompressionLevel::Val(7)), + Compression::Zlib(CompressionLevel::Best), + Compression::Zlib(CompressionLevel::Val(7)), + Compression::Zstd(CompressionLevel::Val(6)), + Compression::Zstd(CompressionLevel::default()), + Compression::Zstd(CompressionLevel::Best), + Compression::Zstd(CompressionLevel::Fast), ]; for v in fixtures_valid { diff --git a/src/sinks/util/buffer/mod.rs b/src/sinks/util/buffer/mod.rs index 69bd67d17b2f3..296e14b02f9c8 100644 --- a/src/sinks/util/buffer/mod.rs +++ b/src/sinks/util/buffer/mod.rs @@ -3,7 +3,10 @@ use std::io::Write; use bytes::{BufMut, BytesMut}; use flate2::write::{GzEncoder, ZlibEncoder}; -use super::batch::{err_event_too_large, Batch, BatchSize, PushResult}; +use super::{ + batch::{err_event_too_large, Batch, BatchSize, PushResult}, + zstd::ZstdEncoder, +}; pub mod compression; pub mod json; @@ -28,6 +31,7 @@ pub enum InnerBuffer { Plain(bytes::buf::Writer), Gzip(GzEncoder>), Zlib(ZlibEncoder>), + Zstd(ZstdEncoder>), } impl Buffer { @@ -54,6 +58,10 @@ impl Buffer { Compression::Zlib(level) => { InnerBuffer::Zlib(ZlibEncoder::new(writer, level.as_flate2())) } + Compression::Zstd(level) => InnerBuffer::Zstd( + ZstdEncoder::new(writer, level.into()) + .expect("Zstd encoder should not fail on init."), + ), } }) } @@ -70,6 +78,9 @@ impl Buffer { InnerBuffer::Zlib(inner) => { inner.write_all(input).unwrap(); } + InnerBuffer::Zstd(inner) => { + inner.write_all(input).unwrap(); + } } } @@ -80,6 +91,7 @@ impl Buffer { InnerBuffer::Plain(inner) => inner.get_ref().is_empty(), InnerBuffer::Gzip(inner) => inner.get_ref().get_ref().is_empty(), InnerBuffer::Zlib(inner) => inner.get_ref().get_ref().is_empty(), + InnerBuffer::Zstd(inner) => inner.get_ref().get_ref().is_empty(), }) .unwrap_or(true) } @@ -126,6 +138,10 @@ impl Batch for Buffer { .finish() .expect("This can't fail because the inner writer is a Vec") .into_inner(), + Some(InnerBuffer::Zstd(inner)) => inner + .finish() + .expect("This can't fail because the inner writer is a Vec") + .into_inner(), None => BytesMut::new(), } } diff --git a/src/sinks/util/compressor.rs b/src/sinks/util/compressor.rs index c7e3310417b60..b918893a39bd2 100644 --- a/src/sinks/util/compressor.rs +++ b/src/sinks/util/compressor.rs @@ -3,12 +3,13 @@ use std::io; use bytes::{BufMut, BytesMut}; use flate2::write::{GzEncoder, ZlibEncoder}; -use super::Compression; +use super::{zstd::ZstdEncoder, Compression}; enum Writer { Plain(bytes::buf::Writer), Gzip(GzEncoder>), Zlib(ZlibEncoder>), + Zstd(ZstdEncoder>), } impl Writer { @@ -17,6 +18,7 @@ impl Writer { Writer::Plain(inner) => inner.get_ref(), Writer::Gzip(inner) => inner.get_ref().get_ref(), Writer::Zlib(inner) => inner.get_ref().get_ref(), + Writer::Zstd(inner) => inner.get_ref().get_ref(), } } } @@ -28,6 +30,11 @@ impl From for Writer { Compression::None => Writer::Plain(writer), Compression::Gzip(level) => Writer::Gzip(GzEncoder::new(writer, level.as_flate2())), Compression::Zlib(level) => Writer::Zlib(ZlibEncoder::new(writer, level.as_flate2())), + Compression::Zstd(level) => { + let encoder = ZstdEncoder::new(writer, level.into()) + .expect("Zstd encoder should not fail on init."); + Writer::Zstd(encoder) + } } } } @@ -39,6 +46,7 @@ impl io::Write for Writer { Writer::Plain(inner_buf) => inner_buf.write(buf), Writer::Gzip(writer) => writer.write(buf), Writer::Zlib(writer) => writer.write(buf), + Writer::Zstd(writer) => writer.write(buf), } } @@ -47,6 +55,7 @@ impl io::Write for Writer { Writer::Plain(writer) => writer.flush(), Writer::Gzip(writer) => writer.flush(), Writer::Zlib(writer) => writer.flush(), + Writer::Zstd(writer) => writer.flush(), } } } @@ -88,6 +97,7 @@ impl Compressor { Writer::Plain(writer) => writer, Writer::Gzip(writer) => writer.finish()?, Writer::Zlib(writer) => writer.finish()?, + Writer::Zstd(writer) => writer.finish()?, } .into_inner(); @@ -112,6 +122,9 @@ impl Compressor { Writer::Zlib(writer) => writer .finish() .expect("zlib writer should not fail to finish"), + Writer::Zstd(writer) => writer + .finish() + .expect("zstd writer should not fail to finish"), } .into_inner() } diff --git a/src/sinks/util/mod.rs b/src/sinks/util/mod.rs index a83b17673ddbc..4f222063312fd 100644 --- a/src/sinks/util/mod.rs +++ b/src/sinks/util/mod.rs @@ -22,6 +22,7 @@ pub mod udp; #[cfg(all(any(feature = "sinks-socket", feature = "sinks-statsd"), unix))] pub mod unix; pub mod uri; +pub mod zstd; use std::borrow::Cow; diff --git a/src/sinks/util/zstd.rs b/src/sinks/util/zstd.rs new file mode 100644 index 0000000000000..aaaffdc1d149d --- /dev/null +++ b/src/sinks/util/zstd.rs @@ -0,0 +1,68 @@ +use std::{fmt::Display, io}; + +use super::buffer::compression::CompressionLevel; + +#[derive(Debug)] +pub struct ZstdCompressionLevel(i32); + +impl From for ZstdCompressionLevel { + fn from(value: CompressionLevel) -> Self { + let val: i32 = match value { + CompressionLevel::None => 0, + CompressionLevel::Default => zstd::DEFAULT_COMPRESSION_LEVEL, + CompressionLevel::Best => 21, + CompressionLevel::Fast => 1, + CompressionLevel::Val(v) => v.clamp(1, 21) as i32, + }; + ZstdCompressionLevel(val) + } +} + +impl Display for ZstdCompressionLevel { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +pub struct ZstdEncoder { + inner: zstd::Encoder<'static, W>, +} + +impl ZstdEncoder { + pub fn new(writer: W, level: ZstdCompressionLevel) -> io::Result { + let encoder = zstd::Encoder::new(writer, level.0)?; + Ok(Self { inner: encoder }) + } + + pub fn finish(self) -> io::Result { + self.inner.finish() + } + + pub fn get_ref(&self) -> &W { + self.inner.get_ref() + } +} + +impl io::Write for ZstdEncoder { + fn write(&mut self, buf: &[u8]) -> io::Result { + #[allow(clippy::disallowed_methods)] // Caller handles the result of `write`. + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +impl std::fmt::Debug for ZstdEncoder { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ZstdEncoder") + .field("inner", &self.get_ref()) + .finish() + } +} + +/// Safety: +/// 1. There is no sharing references to zstd encoder. `Write` requires unique reference, and `finish` moves the instance itself. +/// 2. Sharing only internal writer, which implements `Sync` +unsafe impl Sync for ZstdEncoder {} diff --git a/website/cue/reference/components/sinks/base/appsignal.cue b/website/cue/reference/components/sinks/base/appsignal.cue index 13a72cf6da2f9..79b5a26ff04cd 100644 --- a/website/cue/reference/components/sinks/base/appsignal.cue +++ b/website/cue/reference/components/sinks/base/appsignal.cue @@ -83,6 +83,11 @@ base: components: sinks: appsignal: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/aws_cloudwatch_logs.cue b/website/cue/reference/components/sinks/base/aws_cloudwatch_logs.cue index 91f6204765875..e5ea223052de1 100644 --- a/website/cue/reference/components/sinks/base/aws_cloudwatch_logs.cue +++ b/website/cue/reference/components/sinks/base/aws_cloudwatch_logs.cue @@ -176,6 +176,11 @@ base: components: sinks: aws_cloudwatch_logs: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/aws_cloudwatch_metrics.cue b/website/cue/reference/components/sinks/base/aws_cloudwatch_metrics.cue index 01beb88eb1b10..36eede120e998 100644 --- a/website/cue/reference/components/sinks/base/aws_cloudwatch_metrics.cue +++ b/website/cue/reference/components/sinks/base/aws_cloudwatch_metrics.cue @@ -173,6 +173,11 @@ base: components: sinks: aws_cloudwatch_metrics: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/aws_kinesis_firehose.cue b/website/cue/reference/components/sinks/base/aws_kinesis_firehose.cue index 966082ca84f99..2a4c55bf3e8ee 100644 --- a/website/cue/reference/components/sinks/base/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sinks/base/aws_kinesis_firehose.cue @@ -176,6 +176,11 @@ base: components: sinks: aws_kinesis_firehose: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/aws_kinesis_streams.cue b/website/cue/reference/components/sinks/base/aws_kinesis_streams.cue index 50a8a22d90a4f..180d54f1b9a96 100644 --- a/website/cue/reference/components/sinks/base/aws_kinesis_streams.cue +++ b/website/cue/reference/components/sinks/base/aws_kinesis_streams.cue @@ -176,6 +176,11 @@ base: components: sinks: aws_kinesis_streams: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/aws_s3.cue b/website/cue/reference/components/sinks/base/aws_s3.cue index 542f8c0ceab1e..a7fa8c1eb2216 100644 --- a/website/cue/reference/components/sinks/base/aws_s3.cue +++ b/website/cue/reference/components/sinks/base/aws_s3.cue @@ -260,6 +260,11 @@ base: components: sinks: aws_s3: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/axiom.cue b/website/cue/reference/components/sinks/base/axiom.cue index 27d0e1533fdb2..bb619bc886098 100644 --- a/website/cue/reference/components/sinks/base/axiom.cue +++ b/website/cue/reference/components/sinks/base/axiom.cue @@ -48,6 +48,11 @@ base: components: sinks: axiom: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/azure_blob.cue b/website/cue/reference/components/sinks/base/azure_blob.cue index 9046389be1a57..5c2568145a597 100644 --- a/website/cue/reference/components/sinks/base/azure_blob.cue +++ b/website/cue/reference/components/sinks/base/azure_blob.cue @@ -132,6 +132,11 @@ base: components: sinks: azure_blob: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/clickhouse.cue b/website/cue/reference/components/sinks/base/clickhouse.cue index 7e34028aa5305..d030debc352aa 100644 --- a/website/cue/reference/components/sinks/base/clickhouse.cue +++ b/website/cue/reference/components/sinks/base/clickhouse.cue @@ -127,6 +127,11 @@ base: components: sinks: clickhouse: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/datadog_logs.cue b/website/cue/reference/components/sinks/base/datadog_logs.cue index 4211f27dd93a3..852c972024102 100644 --- a/website/cue/reference/components/sinks/base/datadog_logs.cue +++ b/website/cue/reference/components/sinks/base/datadog_logs.cue @@ -81,6 +81,11 @@ base: components: sinks: datadog_logs: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } default_api_key: { diff --git a/website/cue/reference/components/sinks/base/datadog_traces.cue b/website/cue/reference/components/sinks/base/datadog_traces.cue index c256f17bbccf5..91ea597ae84be 100644 --- a/website/cue/reference/components/sinks/base/datadog_traces.cue +++ b/website/cue/reference/components/sinks/base/datadog_traces.cue @@ -81,6 +81,11 @@ base: components: sinks: datadog_traces: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } default_api_key: { diff --git a/website/cue/reference/components/sinks/base/elasticsearch.cue b/website/cue/reference/components/sinks/base/elasticsearch.cue index 8baafc0942860..5c76cb9537ba7 100644 --- a/website/cue/reference/components/sinks/base/elasticsearch.cue +++ b/website/cue/reference/components/sinks/base/elasticsearch.cue @@ -274,6 +274,11 @@ base: components: sinks: elasticsearch: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/gcp_cloud_storage.cue b/website/cue/reference/components/sinks/base/gcp_cloud_storage.cue index 3b636da3f16ab..89b2f37613e0e 100644 --- a/website/cue/reference/components/sinks/base/gcp_cloud_storage.cue +++ b/website/cue/reference/components/sinks/base/gcp_cloud_storage.cue @@ -156,6 +156,11 @@ base: components: sinks: gcp_cloud_storage: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/http.cue b/website/cue/reference/components/sinks/base/http.cue index 7eefce772c920..c737b39f58f72 100644 --- a/website/cue/reference/components/sinks/base/http.cue +++ b/website/cue/reference/components/sinks/base/http.cue @@ -127,6 +127,11 @@ base: components: sinks: http: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/humio_logs.cue b/website/cue/reference/components/sinks/base/humio_logs.cue index e7967257dc81e..8d8253417eb4b 100644 --- a/website/cue/reference/components/sinks/base/humio_logs.cue +++ b/website/cue/reference/components/sinks/base/humio_logs.cue @@ -80,6 +80,11 @@ base: components: sinks: humio_logs: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/humio_metrics.cue b/website/cue/reference/components/sinks/base/humio_metrics.cue index faabfe3de69b1..b919e8c7b8f2e 100644 --- a/website/cue/reference/components/sinks/base/humio_metrics.cue +++ b/website/cue/reference/components/sinks/base/humio_metrics.cue @@ -80,6 +80,11 @@ base: components: sinks: humio_metrics: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/loki.cue b/website/cue/reference/components/sinks/base/loki.cue index 829bad773dd74..0071c5b2ac985 100644 --- a/website/cue/reference/components/sinks/base/loki.cue +++ b/website/cue/reference/components/sinks/base/loki.cue @@ -131,6 +131,11 @@ base: components: sinks: loki: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/new_relic.cue b/website/cue/reference/components/sinks/base/new_relic.cue index bc9be906f1938..9fcf952c07d8c 100644 --- a/website/cue/reference/components/sinks/base/new_relic.cue +++ b/website/cue/reference/components/sinks/base/new_relic.cue @@ -97,6 +97,11 @@ base: components: sinks: new_relic: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/splunk_hec_logs.cue b/website/cue/reference/components/sinks/base/splunk_hec_logs.cue index 00872c584d409..184db138643cc 100644 --- a/website/cue/reference/components/sinks/base/splunk_hec_logs.cue +++ b/website/cue/reference/components/sinks/base/splunk_hec_logs.cue @@ -121,6 +121,11 @@ base: components: sinks: splunk_hec_logs: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/splunk_hec_metrics.cue b/website/cue/reference/components/sinks/base/splunk_hec_metrics.cue index 61fdbb1582142..ce147a1055624 100644 --- a/website/cue/reference/components/sinks/base/splunk_hec_metrics.cue +++ b/website/cue/reference/components/sinks/base/splunk_hec_metrics.cue @@ -107,6 +107,11 @@ base: components: sinks: splunk_hec_metrics: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } diff --git a/website/cue/reference/components/sinks/base/webhdfs.cue b/website/cue/reference/components/sinks/base/webhdfs.cue index 006b33156564b..b07f00ae779ea 100644 --- a/website/cue/reference/components/sinks/base/webhdfs.cue +++ b/website/cue/reference/components/sinks/base/webhdfs.cue @@ -80,6 +80,11 @@ base: components: sinks: webhdfs: configuration: { [zlib]: https://zlib.net/ """ + zstd: """ + [Zstandard][zstd] compression. + + [zstd]: https://facebook.github.io/zstd/ + """ } } } From e1ddd0e99c0290a645a484c45cc42a391803c6c0 Mon Sep 17 00:00:00 2001 From: May Lee Date: Thu, 1 Jun 2023 14:31:32 -0400 Subject: [PATCH 079/236] chore(config): Update field labels for sinks (#17560) --- src/sinks/blackhole/config.rs | 1 + src/sinks/file/mod.rs | 1 + src/sinks/kafka/config.rs | 2 ++ src/sinks/mezmo.rs | 2 ++ src/sinks/prometheus/exporter.rs | 1 + src/sinks/util/batch.rs | 1 + src/sinks/util/service.rs | 5 +++++ src/sinks/util/service/health.rs | 2 ++ 8 files changed, 15 insertions(+) diff --git a/src/sinks/blackhole/config.rs b/src/sinks/blackhole/config.rs index e5d878a5434bf..070f32a99c0f2 100644 --- a/src/sinks/blackhole/config.rs +++ b/src/sinks/blackhole/config.rs @@ -26,6 +26,7 @@ pub struct BlackholeConfig { #[derivative(Default(value = "default_print_interval_secs()"))] #[serde(default = "default_print_interval_secs")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Print Interval"))] #[configurable(metadata(docs::examples = 10))] pub print_interval_secs: Duration, diff --git a/src/sinks/file/mod.rs b/src/sinks/file/mod.rs index d2063938d360d..c8b0784ddd435 100644 --- a/src/sinks/file/mod.rs +++ b/src/sinks/file/mod.rs @@ -61,6 +61,7 @@ pub struct FileSinkConfig { #[serde_as(as = "serde_with::DurationSeconds")] #[serde(rename = "idle_timeout_secs")] #[configurable(metadata(docs::examples = 600))] + #[configurable(metadata(docs::human_name = "Idle Timeout"))] pub idle_timeout: Duration, #[serde(flatten)] diff --git a/src/sinks/kafka/config.rs b/src/sinks/kafka/config.rs index 4bccf58a7806c..4fe8a13ee3a20 100644 --- a/src/sinks/kafka/config.rs +++ b/src/sinks/kafka/config.rs @@ -80,12 +80,14 @@ pub struct KafkaSinkConfig { #[serde(default = "default_socket_timeout_ms")] #[configurable(metadata(docs::examples = 30000, docs::examples = 60000))] #[configurable(metadata(docs::advanced))] + #[configurable(metadata(docs::human_name = "Socket Timeout"))] pub socket_timeout_ms: Duration, /// Local message timeout, in milliseconds. #[serde_as(as = "serde_with::DurationMilliSeconds")] #[configurable(metadata(docs::examples = 150000, docs::examples = 450000))] #[serde(default = "default_message_timeout_ms")] + #[configurable(metadata(docs::human_name = "Message Timeout"))] #[configurable(metadata(docs::advanced))] pub message_timeout_ms: Duration, diff --git a/src/sinks/mezmo.rs b/src/sinks/mezmo.rs index d7c055e701143..8f741e6e26074 100644 --- a/src/sinks/mezmo.rs +++ b/src/sinks/mezmo.rs @@ -82,10 +82,12 @@ pub struct MezmoConfig { /// The MAC address that is attached to each batch of events. #[configurable(metadata(docs::examples = "my-mac-address"))] + #[configurable(metadata(docs::human_name = "MAC Address"))] mac: Option, /// The IP address that is attached to each batch of events. #[configurable(metadata(docs::examples = "0.0.0.0"))] + #[configurable(metadata(docs::human_name = "IP Address"))] ip: Option, /// The tags that are attached to each batch of events. diff --git a/src/sinks/prometheus/exporter.rs b/src/sinks/prometheus/exporter.rs index 1134d9f47c5be..98bc9a504c014 100644 --- a/src/sinks/prometheus/exporter.rs +++ b/src/sinks/prometheus/exporter.rs @@ -126,6 +126,7 @@ pub struct PrometheusExporterConfig { #[serde(default = "default_flush_period_secs")] #[serde_as(as = "serde_with::DurationSeconds")] #[configurable(metadata(docs::advanced))] + #[configurable(metadata(docs::human_name = "Flush Interval"))] pub flush_period_secs: Duration, /// Suppresses timestamps on the Prometheus output. diff --git a/src/sinks/util/batch.rs b/src/sinks/util/batch.rs index b4bd7e70fdd60..13f911abacffd 100644 --- a/src/sinks/util/batch.rs +++ b/src/sinks/util/batch.rs @@ -114,6 +114,7 @@ where /// The maximum age of a batch before it is flushed. #[serde(default = "default_timeout::")] #[configurable(metadata(docs::type_unit = "seconds"))] + #[configurable(metadata(docs::human_name = "Timeout"))] pub timeout_secs: Option, #[serde(skip)] diff --git a/src/sinks/util/service.rs b/src/sinks/util/service.rs index c53c52b8713c4..d75030de8d79f 100644 --- a/src/sinks/util/service.rs +++ b/src/sinks/util/service.rs @@ -102,16 +102,19 @@ pub struct TowerRequestConfig { /// create orphaned requests, pile on retries, and result in duplicate data downstream. #[configurable(metadata(docs::type_unit = "seconds"))] #[serde(default = "default_timeout_secs")] + #[configurable(metadata(docs::human_name = "Timeout"))] pub timeout_secs: Option, /// The time window used for the `rate_limit_num` option. #[configurable(metadata(docs::type_unit = "seconds"))] #[serde(default = "default_rate_limit_duration_secs")] + #[configurable(metadata(docs::human_name = "Rate Limit Duration"))] pub rate_limit_duration_secs: Option, /// The maximum number of requests allowed within the `rate_limit_duration_secs` time window. #[configurable(metadata(docs::type_unit = "requests"))] #[serde(default = "default_rate_limit_num")] + #[configurable(metadata(docs::human_name = "Rate Limit Number"))] pub rate_limit_num: Option, /// The maximum number of retries to make for failed requests. @@ -124,6 +127,7 @@ pub struct TowerRequestConfig { /// The maximum amount of time to wait between retries. #[configurable(metadata(docs::type_unit = "seconds"))] #[serde(default = "default_retry_max_duration_secs")] + #[configurable(metadata(docs::human_name = "Max Retry Duration"))] pub retry_max_duration_secs: Option, /// The amount of time to wait before attempting the first retry for a failed request. @@ -131,6 +135,7 @@ pub struct TowerRequestConfig { /// After the first retry has failed, the fibonacci sequence is used to select future backoffs. #[configurable(metadata(docs::type_unit = "seconds"))] #[serde(default = "default_retry_initial_backoff_secs")] + #[configurable(metadata(docs::human_name = "Retry Initial Backoff"))] pub retry_initial_backoff_secs: Option, #[configurable(derived)] diff --git a/src/sinks/util/service/health.rs b/src/sinks/util/service/health.rs index 2c7f0baff600b..6fdb202afd846 100644 --- a/src/sinks/util/service/health.rs +++ b/src/sinks/util/service/health.rs @@ -37,11 +37,13 @@ pub struct HealthConfig { #[serde(default = "default_retry_initial_backoff_secs")] #[configurable(metadata(docs::type_unit = "seconds"))] // not using Duration type because the value is only used as a u64. + #[configurable(metadata(docs::human_name = "Retry Initial Backoff"))] pub retry_initial_backoff_secs: u64, /// Maximum delay between attempts to reactivate endpoints once they become unhealthy. #[serde_as(as = "serde_with::DurationSeconds")] #[serde(default = "default_retry_max_duration_secs")] + #[configurable(metadata(docs::human_name = "Max Retry Duration"))] pub retry_max_duration_secs: Duration, } From 8a741d55b8bfe361d6c5449cab4fd3728e1dae8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Jun 2023 12:42:54 -0600 Subject: [PATCH 080/236] chore(ci): Bump aws-actions/configure-aws-credentials from 2.0.0 to 2.1.0 (#17565) Bumps [aws-actions/configure-aws-credentials](https://github.com/aws-actions/configure-aws-credentials) from 2.0.0 to 2.1.0.
Release notes

Sourced from aws-actions/configure-aws-credentials's releases.

v2.1.0

See the changelog for details about the changes included in this release.

Changelog

Sourced from aws-actions/configure-aws-credentials's changelog.

2.1.0 (2023-05-31)

Features

  • role-chaining prop enables role chaining use case (6fbd316)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=aws-actions/configure-aws-credentials&package-manager=github_actions&previous-version=2.0.0&new-version=2.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/regression.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 2811a0042237c..4e69413384302 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -332,7 +332,7 @@ jobs: - compute-metadata steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 + uses: aws-actions/configure-aws-credentials@v2.1.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -364,7 +364,7 @@ jobs: docker load --input baseline-image.tar - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 + uses: aws-actions/configure-aws-credentials@v2.1.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -402,7 +402,7 @@ jobs: docker load --input comparison-image.tar - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 + uses: aws-actions/configure-aws-credentials@v2.1.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -448,7 +448,7 @@ jobs: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 + uses: aws-actions/configure-aws-credentials@v2.1.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -567,7 +567,7 @@ jobs: - uses: actions/checkout@v3 - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 + uses: aws-actions/configure-aws-credentials@v2.1.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -658,7 +658,7 @@ jobs: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.0.0 + uses: aws-actions/configure-aws-credentials@v2.1.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} From d7df52055152d9f85a6e48082d385e84c45f1501 Mon Sep 17 00:00:00 2001 From: neuronull Date: Fri, 2 Jun 2023 12:15:58 -0600 Subject: [PATCH 081/236] fix(http_client source): adapt int test to use breaking change of dep (#17583) - `dufs` a dep of the integration test, had a breaking change to the auth syntax - pegged the version to a specific release since this component isn't tied to a specific service --- scripts/integration/http-client/compose.yaml | 2 +- scripts/integration/http-client/test.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/integration/http-client/compose.yaml b/scripts/integration/http-client/compose.yaml index 69ad1c6ef3d94..a1723c93d8d1a 100644 --- a/scripts/integration/http-client/compose.yaml +++ b/scripts/integration/http-client/compose.yaml @@ -11,7 +11,7 @@ services: image: docker.io/sigoden/dufs:${CONFIG_VERSION} command: - -a - - /@user:pass + - "user:pass@/" - --auth-method - basic - /data diff --git a/scripts/integration/http-client/test.yaml b/scripts/integration/http-client/test.yaml index aa867b5b23e39..edc88113b7b6b 100644 --- a/scripts/integration/http-client/test.yaml +++ b/scripts/integration/http-client/test.yaml @@ -9,4 +9,4 @@ env: DUFS_HTTPS_ADDRESS: https://dufs-https:5000 matrix: - version: [latest] + version: ["v0.34.1"] From 4af5e6d8886cfc326209f8d6aa65d27f86f6e579 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Jun 2023 13:07:14 -0600 Subject: [PATCH 082/236] chore(deps): Bump openssl from 0.10.53 to 0.10.54 (#17573) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.53 to 0.10.54.
Release notes

Sourced from openssl's releases.

openssl-v0.10.54

What's Changed

Full Changelog: https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.53...openssl-v0.10.54

Commits
  • 4b4a344 Merge pull request #1942 from alex/openssl-release
  • 68ff80a Version bump for openssl v0.10.54 release
  • b811d71 Merge pull request #1941 from alex/pkcs8-passphrase
  • b83aec7 Remove converting PKCS#8 passphrase to CString
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=openssl&package-manager=cargo&previous-version=0.10.53&new-version=0.10.54)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 836d354b9e00c..b75de5a1e3151 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5587,9 +5587,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.53" +version = "0.10.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12df40a956736488b7b44fe79fe12d4f245bb5b3f5a1f6095e499760015be392" +checksum = "69b3f656a17a6cbc115b5c7a40c616947d213ba182135b014d6051b73ab6f019" dependencies = [ "bitflags", "cfg-if", diff --git a/Cargo.toml b/Cargo.toml index 043aabb93f135..d42c2efdf5055 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -281,7 +281,7 @@ nkeys = { version = "0.3.0", default-features = false, optional = true } nom = { version = "7.1.3", default-features = false, optional = true } notify = { version = "6.0.0", default-features = false, features = ["macos_fsevent"] } once_cell = { version = "1.17", default-features = false } -openssl = { version = "0.10.53", default-features = false, features = ["vendored"] } +openssl = { version = "0.10.54", default-features = false, features = ["vendored"] } openssl-probe = { version = "0.1.5", default-features = false } ordered-float = { version = "3.7.0", default-features = false } paste = "1.0.12" diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 9ef187e61bc68..9b627141968e0 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -31,7 +31,7 @@ mlua = { version = "0.8.9", default-features = false, features = ["lua54", "send no-proxy = { version = "0.3.2", default-features = false, features = ["serialize"] } once_cell = { version = "1.17", default-features = false } ordered-float = { version = "3.7.0", default-features = false } -openssl = { version = "0.10.53", default-features = false, features = ["vendored"] } +openssl = { version = "0.10.54", default-features = false, features = ["vendored"] } parking_lot = { version = "0.12.1", default-features = false } pin-project = { version = "1.1.0", default-features = false } proptest = { version = "1.2", optional = true } From 8823561a8ad544b4acd29273b466b1a5bd606cc2 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 2 Jun 2023 15:48:01 -0400 Subject: [PATCH 083/236] chore: Codify the use of abbreviate time units in config option names (#17582) Codifying the status quo since there wasn't a strong motivator to change it and we want to maintain consistency. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- docs/specs/configuration.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/specs/configuration.md b/docs/specs/configuration.md index 104da9e2506ae..7831f9389cb83 100644 --- a/docs/specs/configuration.md +++ b/docs/specs/configuration.md @@ -64,7 +64,8 @@ under entities and also used to define global Vector behavior. - MUST only contain ASCII alphanumeric, lowercase, and underscores - MUST be in snake case format when multiple words are used (e.g., `timeout_seconds`) - SHOULD use nouns, not verbs, as names (e.g., `fingerprint` instead of `fingerprinting`) -- MUST suffix options with their _full_ unit name (e.g., `_seconds`, `_bytes`, etc.) +- MUST suffix options with their _full_ unit name (e.g., `_megabytes` rather than `_mb`) or the + following abbreviations for time units: `_secs`, `_ms`, `_ns`. - SHOULD consistent with units within the same scope. (e.g., don't mix seconds and milliseconds) - MUST NOT repeat the name space in the option name (e.g., `fingerprint.bytes` instead of `fingerprint.fingerprint_bytes`) From 134578db2165b4b522013d0e7d6ac974f9e4e744 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 2 Jun 2023 15:48:10 -0400 Subject: [PATCH 084/236] chore: Codify flag naming including sentinel values (#17569) Starting with `SHOULD` while we feel this out some more. Might change to `MUST` once we build up more confidence. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- docs/specs/configuration.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/specs/configuration.md b/docs/specs/configuration.md index 7831f9389cb83..9e9a06e16760b 100644 --- a/docs/specs/configuration.md +++ b/docs/specs/configuration.md @@ -9,10 +9,12 @@ interpreted as described in [RFC 2119]. - [Introduction](#introduction) - [Scope](#scope) - [Terminology](#terminology) + - [Flag](#flag) - [Entity](#entity) - [Option](#option) - [Schema](#schema) - [Naming](#naming) + - [Flag naming](#flag-naming) - [Entity naming](#entity-naming) - [Option naming](#option-naming) - [Types](#types) @@ -37,6 +39,10 @@ relevant specifications, such as the [component specification]. ## Terminology +### Flag + +"Flag" refers to a CLI flag provided when running Vector. + ### Entity "Entity" refers to a Vector concept used to model Vector's processing graph. @@ -53,6 +59,16 @@ under entities and also used to define global Vector behavior. ### Naming +#### Flag naming + +- MUST only contain ASCII alphanumeric, lowercase, and hyphens +- MUST be in kebab-case format when multiple words are used (e.g., `config-dir`) +- For flags that take a value, but are also able to be "disabled", they SHOULD NOT use a sentinel + value. Instead they SHOULD have a second flag added prefixed with `no-` and SHOULD leave off any + unit suffixes. For example, to disable `--graceful-shutdown-limit-secs`, + a `--no-graceful-shutdown` flag was added. Vector MUST NOT allow both the flag and its negative to + be specified at the same time. + #### Entity naming - MUST only contain ASCII alphanumeric, lowercase, and underscores From 6e45477ddc27147887346c8d09dd077225ea2ef3 Mon Sep 17 00:00:00 2001 From: May Lee Date: Fri, 2 Jun 2023 16:06:18 -0400 Subject: [PATCH 085/236] chore(config): Update field labels for the rest of the sources and transforms fields (#17564) This PR updates the rest of the sources and transforms' field labels that needs a human_name label. Related to https://github.com/vectordotdev/vector/pull/17517. --------- Signed-off-by: Spencer Gilbert Co-authored-by: Spencer Gilbert --- lib/vector-core/src/config/proxy.rs | 2 +- src/sources/aws_sqs/config.rs | 1 + src/sources/docker_logs/mod.rs | 1 + src/sources/eventstoredb_metrics/mod.rs | 1 + src/sources/exec/mod.rs | 3 ++- src/sources/file.rs | 8 ++++++-- src/sources/file_descriptors/file_descriptor.rs | 1 + src/sources/host_metrics/mod.rs | 4 +++- src/sources/http_server.rs | 2 +- src/sources/internal_metrics.rs | 1 + src/sources/journald.rs | 1 + src/sources/kubernetes_logs/mod.rs | 4 ++++ src/sources/mongodb_metrics/mod.rs | 1 + src/sources/nginx_metrics/mod.rs | 1 + src/sources/postgresql_metrics.rs | 1 + src/sources/socket/tcp.rs | 1 + src/sources/splunk_hec/mod.rs | 2 +- src/sources/statsd/mod.rs | 1 + src/sources/syslog.rs | 2 +- src/sources/vector/mod.rs | 2 +- src/transforms/lua/mod.rs | 2 +- src/transforms/lua/v1/mod.rs | 2 +- src/transforms/lua/v2/mod.rs | 2 ++ website/cue/reference/components/base/sinks.cue | 2 +- website/cue/reference/components/base/sources.cue | 2 +- website/cue/reference/components/sources/base/exec.cue | 2 +- website/cue/reference/components/sources/base/file.cue | 4 ++-- .../reference/components/sources/base/host_metrics.cue | 2 +- website/cue/reference/components/sources/base/http.cue | 2 +- .../cue/reference/components/sources/base/http_server.cue | 2 +- .../cue/reference/components/sources/base/splunk_hec.cue | 2 +- website/cue/reference/components/sources/base/syslog.cue | 2 +- .../components/transforms/base/aws_ec2_metadata.cue | 2 +- 33 files changed, 46 insertions(+), 22 deletions(-) diff --git a/lib/vector-core/src/config/proxy.rs b/lib/vector-core/src/config/proxy.rs index 266ffed72d167..4f107db571960 100644 --- a/lib/vector-core/src/config/proxy.rs +++ b/lib/vector-core/src/config/proxy.rs @@ -41,7 +41,7 @@ impl NoProxyInterceptor { /// /// Configure to proxy traffic through an HTTP(S) proxy when making external requests. /// -/// Similar to common proxy configuration convention, users can set different proxies +/// Similar to common proxy configuration convention, you can set different proxies /// to use based on the type of traffic being proxied, as well as set specific hosts that /// should not be proxied. #[configurable_component] diff --git a/src/sources/aws_sqs/config.rs b/src/sources/aws_sqs/config.rs index 03de7d24d52ae..4f156cd47a3db 100644 --- a/src/sources/aws_sqs/config.rs +++ b/src/sources/aws_sqs/config.rs @@ -45,6 +45,7 @@ pub struct AwsSqsConfig { #[serde(default = "default_poll_secs")] #[derivative(Default(value = "default_poll_secs()"))] #[configurable(metadata(docs::type_unit = "seconds"))] + #[configurable(metadata(docs::human_name = "Poll Wait Time"))] pub poll_secs: u32, /// The visibility timeout to use for messages, in seconds. diff --git a/src/sources/docker_logs/mod.rs b/src/sources/docker_logs/mod.rs index 55c24e77a3c90..4c93b20abe12a 100644 --- a/src/sources/docker_logs/mod.rs +++ b/src/sources/docker_logs/mod.rs @@ -151,6 +151,7 @@ pub struct DockerLogsConfig { /// The amount of time to wait before retrying after an error. #[serde_as(as = "serde_with::DurationSeconds")] #[serde(default = "default_retry_backoff_secs")] + #[configurable(metadata(docs::human_name = "Retry Backoff"))] retry_backoff_secs: Duration, /// Multiline aggregation configuration. diff --git a/src/sources/eventstoredb_metrics/mod.rs b/src/sources/eventstoredb_metrics/mod.rs index 57017950f56be..1d205ae6799e2 100644 --- a/src/sources/eventstoredb_metrics/mod.rs +++ b/src/sources/eventstoredb_metrics/mod.rs @@ -41,6 +41,7 @@ pub struct EventStoreDbConfig { /// The interval between scrapes, in seconds. #[serde(default = "default_scrape_interval_secs")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Scrape Interval"))] scrape_interval_secs: Duration, /// Overrides the default namespace for the metrics emitted by the source. diff --git a/src/sources/exec/mod.rs b/src/sources/exec/mod.rs index 1f8d755e77e3f..5b163db025772 100644 --- a/src/sources/exec/mod.rs +++ b/src/sources/exec/mod.rs @@ -56,7 +56,7 @@ pub struct ExecConfig { #[configurable(derived)] pub streaming: Option, - /// The command to be run, plus any arguments required. + /// The command to run, plus any arguments required. #[configurable(metadata(docs::examples = "echo", docs::examples = "Hello World!"))] pub command: Vec, @@ -119,6 +119,7 @@ pub struct StreamingConfig { /// The amount of time, in seconds, before rerunning a streaming command that exited. #[serde(default = "default_respawn_interval_secs")] + #[configurable(metadata(docs::human_name = "Respawn Interval"))] respawn_interval_secs: u64, } diff --git a/src/sources/file.rs b/src/sources/file.rs index 3e68e57a2b04e..5e72bb7676917 100644 --- a/src/sources/file.rs +++ b/src/sources/file.rs @@ -121,6 +121,7 @@ pub struct FileConfig { #[serde(alias = "ignore_older", default)] #[configurable(metadata(docs::type_unit = "seconds"))] #[configurable(metadata(docs::examples = 600))] + #[configurable(metadata(docs::human_name = "Ignore Older Files"))] pub ignore_older_secs: Option, /// The maximum size of a line before it is discarded. @@ -149,6 +150,7 @@ pub struct FileConfig { /// [global_data_dir]: https://vector.dev/docs/reference/configuration/global-options/#data_dir #[serde(default)] #[configurable(metadata(docs::examples = "/var/local/lib/vector/"))] + #[configurable(metadata(docs::human_name = "Data Directory"))] pub data_dir: Option, /// Enables adding the file offset to each event and sets the name of the log field used. @@ -160,7 +162,7 @@ pub struct FileConfig { #[configurable(metadata(docs::examples = "offset"))] pub offset_key: Option, - /// Delay between file discovery calls. + /// The delay between file discovery calls. /// /// This controls the interval at which files are searched. A higher value results in greater /// chances of some short-lived files being missed between searches, but a lower value increases @@ -171,6 +173,7 @@ pub struct FileConfig { )] #[serde_as(as = "serde_with::DurationMilliSeconds")] #[configurable(metadata(docs::type_unit = "milliseconds"))] + #[configurable(metadata(docs::human_name = "Glob Minimum Cooldown"))] pub glob_minimum_cooldown_ms: Duration, #[configurable(derived)] @@ -211,7 +214,7 @@ pub struct FileConfig { #[serde(default)] pub oldest_first: bool, - /// Timeout from reaching `EOF` after which the file is removed from the filesystem, unless new data is written in the meantime. + /// After reaching EOF, the number of seconds to wait before removing the file, unless new data is written. /// /// If not specified, files are not removed. #[serde(alias = "remove_after", default)] @@ -219,6 +222,7 @@ pub struct FileConfig { #[configurable(metadata(docs::examples = 0))] #[configurable(metadata(docs::examples = 5))] #[configurable(metadata(docs::examples = 60))] + #[configurable(metadata(docs::human_name = "Wait Time Before Removing File"))] pub remove_after_secs: Option, /// String sequence used to separate one file line from another. diff --git a/src/sources/file_descriptors/file_descriptor.rs b/src/sources/file_descriptors/file_descriptor.rs index 0740674742e81..4770811ad401b 100644 --- a/src/sources/file_descriptors/file_descriptor.rs +++ b/src/sources/file_descriptors/file_descriptor.rs @@ -38,6 +38,7 @@ pub struct FileDescriptorSourceConfig { /// The file descriptor number to read from. #[configurable(metadata(docs::examples = 10))] + #[configurable(metadata(docs::human_name = "File Descriptor Number"))] pub fd: u32, /// The namespace to use for logs. This overrides the global setting. diff --git a/src/sources/host_metrics/mod.rs b/src/sources/host_metrics/mod.rs index d75ab4d0420f9..9f577b3b85a1a 100644 --- a/src/sources/host_metrics/mod.rs +++ b/src/sources/host_metrics/mod.rs @@ -93,6 +93,7 @@ pub struct HostMetricsConfig { /// The interval between metric gathering, in seconds. #[serde_as(as = "serde_with::DurationSeconds")] #[serde(default = "default_scrape_interval")] + #[configurable(metadata(docs::human_name = "Scrape Interval"))] pub scrape_interval_secs: Duration, /// The list of host metric collector services to use. @@ -136,7 +137,7 @@ pub struct HostMetricsConfig { pub struct CGroupsConfig { /// The number of levels of the cgroups hierarchy for which to report metrics. /// - /// A value of `1` means just the root or named cgroup. + /// A value of `1` means the root or named cgroup. #[derivative(Default(value = "default_levels()"))] #[serde(default = "default_levels")] #[configurable(metadata(docs::examples = 1))] @@ -157,6 +158,7 @@ pub struct CGroupsConfig { /// Base cgroup directory, for testing use only #[serde(skip_serializing)] #[configurable(metadata(docs::hidden))] + #[configurable(metadata(docs::human_name = "Base Directory"))] base_dir: Option, } diff --git a/src/sources/http_server.rs b/src/sources/http_server.rs index e082f5e1642b8..c4730fe7fee5b 100644 --- a/src/sources/http_server.rs +++ b/src/sources/http_server.rs @@ -81,7 +81,7 @@ pub struct SimpleHttpConfig { /// The expected encoding of received data. /// - /// Note: For `json` and `ndjson` encodings, the fields of the JSON objects are output as separate fields. + /// For `json` and `ndjson` encodings, the fields of the JSON objects are output as separate fields. #[serde(default)] encoding: Option, diff --git a/src/sources/internal_metrics.rs b/src/sources/internal_metrics.rs index 03f447c7488d9..908daab49e1f7 100644 --- a/src/sources/internal_metrics.rs +++ b/src/sources/internal_metrics.rs @@ -28,6 +28,7 @@ pub struct InternalMetricsConfig { /// The interval between metric gathering, in seconds. #[serde_as(as = "serde_with::DurationSeconds")] #[serde(default = "default_scrape_interval")] + #[configurable(metadata(docs::human_name = "Scrape Interval"))] pub scrape_interval_secs: Duration, #[configurable(derived)] diff --git a/src/sources/journald.rs b/src/sources/journald.rs index 2436702dfe49a..652e3577aeae0 100644 --- a/src/sources/journald.rs +++ b/src/sources/journald.rs @@ -152,6 +152,7 @@ pub struct JournaldConfig { /// permissions to this directory. #[serde(default)] #[configurable(metadata(docs::examples = "/var/lib/vector"))] + #[configurable(metadata(docs::human_name = "Data Directory"))] pub data_dir: Option, /// The systemd journal is read in batches, and a checkpoint is set at the end of each batch. diff --git a/src/sources/kubernetes_logs/mod.rs b/src/sources/kubernetes_logs/mod.rs index 0366c51921380..de609ff055758 100644 --- a/src/sources/kubernetes_logs/mod.rs +++ b/src/sources/kubernetes_logs/mod.rs @@ -143,6 +143,7 @@ pub struct Config { /// By default, the global `data_dir` option is used. Make sure the running user has write /// permissions to this directory. #[configurable(metadata(docs::examples = "/var/local/lib/vector/"))] + #[configurable(metadata(docs::human_name = "Data Directory"))] data_dir: Option, #[configurable(derived)] @@ -167,6 +168,7 @@ pub struct Config { #[serde(default)] #[configurable(metadata(docs::type_unit = "seconds"))] #[configurable(metadata(docs::examples = 600))] + #[configurable(metadata(docs::human_name = "Ignore Files Older Than"))] ignore_older_secs: Option, /// Max amount of bytes to read from a single file before switching over @@ -198,6 +200,7 @@ pub struct Config { /// in the underlying file server, so setting it too low may introduce /// a significant overhead. #[serde_as(as = "serde_with::DurationMilliSeconds")] + #[configurable(metadata(docs::human_name = "Glob Minimum Cooldown"))] glob_minimum_cooldown_ms: Duration, /// Overrides the name of the log field used to add the ingestion timestamp to each event. @@ -229,6 +232,7 @@ pub struct Config { /// removed. If relevant metadata has been removed, the log is forwarded un-enriched and a /// warning is emitted. #[serde_as(as = "serde_with::DurationMilliSeconds")] + #[configurable(metadata(docs::human_name = "Delay Deletion"))] delay_deletion_ms: Duration, /// The namespace to use for logs. This overrides the global setting. diff --git a/src/sources/mongodb_metrics/mod.rs b/src/sources/mongodb_metrics/mod.rs index b68356444d5dd..a144b5f09c2a9 100644 --- a/src/sources/mongodb_metrics/mod.rs +++ b/src/sources/mongodb_metrics/mod.rs @@ -89,6 +89,7 @@ pub struct MongoDbMetricsConfig { /// The interval between scrapes, in seconds. #[serde(default = "default_scrape_interval_secs")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Scrape Interval"))] scrape_interval_secs: Duration, /// Overrides the default namespace for the metrics emitted by the source. diff --git a/src/sources/nginx_metrics/mod.rs b/src/sources/nginx_metrics/mod.rs index 0ea02fffd83c1..0e6bb83049a12 100644 --- a/src/sources/nginx_metrics/mod.rs +++ b/src/sources/nginx_metrics/mod.rs @@ -74,6 +74,7 @@ pub struct NginxMetricsConfig { /// The interval between scrapes. #[serde(default = "default_scrape_interval_secs")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Scrape Interval"))] scrape_interval_secs: Duration, /// Overrides the default namespace for the metrics emitted by the source. diff --git a/src/sources/postgresql_metrics.rs b/src/sources/postgresql_metrics.rs index 808028539e6f3..6c6d8505c6b9d 100644 --- a/src/sources/postgresql_metrics.rs +++ b/src/sources/postgresql_metrics.rs @@ -159,6 +159,7 @@ pub struct PostgresqlMetricsConfig { /// The interval between scrapes. #[serde(default = "default_scrape_interval_secs")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Scrape Interval"))] scrape_interval_secs: Duration, /// Overrides the default namespace for the metrics emitted by the source. diff --git a/src/sources/socket/tcp.rs b/src/sources/socket/tcp.rs index 6135ca0e3cf68..1c0fb4574b101 100644 --- a/src/sources/socket/tcp.rs +++ b/src/sources/socket/tcp.rs @@ -33,6 +33,7 @@ pub struct TcpConfig { /// The timeout before a connection is forcefully closed during shutdown. #[serde(default = "default_shutdown_timeout_secs")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Shutdown Timeout"))] shutdown_timeout_secs: Duration, /// Overrides the name of the log field used to add the peer host to each event. diff --git a/src/sources/splunk_hec/mod.rs b/src/sources/splunk_hec/mod.rs index 0aae6053ac402..0c465ffaef97e 100644 --- a/src/sources/splunk_hec/mod.rs +++ b/src/sources/splunk_hec/mod.rs @@ -74,7 +74,7 @@ pub struct SplunkConfig { #[configurable(deprecated = "This option has been deprecated, use `valid_tokens` instead.")] token: Option, - /// Optional list of valid authorization tokens. + /// A list of valid authorization tokens. /// /// If supplied, incoming requests must supply one of these tokens in the `Authorization` header, just as a client /// would if it was communicating with the Splunk HEC endpoint directly. diff --git a/src/sources/statsd/mod.rs b/src/sources/statsd/mod.rs index a655d44147549..4f18e1d14be1b 100644 --- a/src/sources/statsd/mod.rs +++ b/src/sources/statsd/mod.rs @@ -99,6 +99,7 @@ pub struct TcpConfig { /// The timeout before a connection is forcefully closed during shutdown. #[serde(default = "default_shutdown_timeout_secs")] #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Shutdown Timeout"))] shutdown_timeout_secs: Duration, /// The size of the receive buffer used for each connection. diff --git a/src/sources/syslog.rs b/src/sources/syslog.rs index 71c8bd7b3b728..05e4394a9cd96 100644 --- a/src/sources/syslog.rs +++ b/src/sources/syslog.rs @@ -117,7 +117,7 @@ pub enum Mode { /// Unix file mode bits to be applied to the unix socket file as its designated file permissions. /// - /// Note: The file mode value can be specified in any numeric format supported by your configuration + /// The file mode value can be specified in any numeric format supported by your configuration /// language, but it is most intuitive to use an octal number. socket_file_mode: Option, }, diff --git a/src/sources/vector/mod.rs b/src/sources/vector/mod.rs index 7cdaa4633a9e9..66aef5f7b4b19 100644 --- a/src/sources/vector/mod.rs +++ b/src/sources/vector/mod.rs @@ -26,7 +26,7 @@ use crate::{ SourceSender, }; -/// Marker type for the version two of the configuration for the `vector` source. +/// Marker type for version two of the configuration for the `vector` source. #[configurable_component] #[derive(Clone, Debug)] enum VectorConfigVersion { diff --git a/src/transforms/lua/mod.rs b/src/transforms/lua/mod.rs index febcf58928ff5..24052542ac8b5 100644 --- a/src/transforms/lua/mod.rs +++ b/src/transforms/lua/mod.rs @@ -39,7 +39,7 @@ pub struct LuaConfigV1 { config: v1::LuaConfig, } -/// Marker type for the version two of the configuration for the `lua` transform. +/// Marker type for version two of the configuration for the `lua` transform. #[configurable_component] #[derive(Clone, Debug)] enum V2 { diff --git a/src/transforms/lua/v1/mod.rs b/src/transforms/lua/v1/mod.rs index 4aab930ede76b..4daae33000530 100644 --- a/src/transforms/lua/v1/mod.rs +++ b/src/transforms/lua/v1/mod.rs @@ -21,7 +21,7 @@ enum BuildError { InvalidLua { source: mlua::Error }, } -/// Configuration for the version one of the `lua` transform. +/// Configuration for version one of the `lua` transform. #[configurable_component] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] diff --git a/src/transforms/lua/v2/mod.rs b/src/transforms/lua/v2/mod.rs index f68454cdc6ea8..ca0790546b390 100644 --- a/src/transforms/lua/v2/mod.rs +++ b/src/transforms/lua/v2/mod.rs @@ -68,6 +68,7 @@ pub struct LuaConfig { /// If not specified, the modules are looked up in the configuration directories. #[serde(default = "default_config_paths")] #[configurable(metadata(docs::examples = "/etc/vector/lua"))] + #[configurable(metadata(docs::human_name = "Search Directories"))] search_dirs: Vec, #[configurable(derived)] @@ -156,6 +157,7 @@ struct HooksConfig { struct TimerConfig { /// The interval to execute the handler, in seconds. #[serde_as(as = "serde_with::DurationSeconds")] + #[configurable(metadata(docs::human_name = "Interval"))] interval_seconds: Duration, /// The handler function which is called when the timer ticks. diff --git a/website/cue/reference/components/base/sinks.cue b/website/cue/reference/components/base/sinks.cue index abb7e9c6ff801..4c71d4549e778 100644 --- a/website/cue/reference/components/base/sinks.cue +++ b/website/cue/reference/components/base/sinks.cue @@ -118,7 +118,7 @@ base: components: sinks: configuration: { Configure to proxy traffic through an HTTP(S) proxy when making external requests. - Similar to common proxy configuration convention, users can set different proxies + Similar to common proxy configuration convention, you can set different proxies to use based on the type of traffic being proxied, as well as set specific hosts that should not be proxied. """ diff --git a/website/cue/reference/components/base/sources.cue b/website/cue/reference/components/base/sources.cue index 9755857f5ea01..f7f48b24c9eb7 100644 --- a/website/cue/reference/components/base/sources.cue +++ b/website/cue/reference/components/base/sources.cue @@ -6,7 +6,7 @@ base: components: sources: configuration: proxy: { Configure to proxy traffic through an HTTP(S) proxy when making external requests. - Similar to common proxy configuration convention, users can set different proxies + Similar to common proxy configuration convention, you can set different proxies to use based on the type of traffic being proxied, as well as set specific hosts that should not be proxied. """ diff --git a/website/cue/reference/components/sources/base/exec.cue b/website/cue/reference/components/sources/base/exec.cue index d9d1745d5c649..06c7c09fadb96 100644 --- a/website/cue/reference/components/sources/base/exec.cue +++ b/website/cue/reference/components/sources/base/exec.cue @@ -2,7 +2,7 @@ package metadata base: components: sources: exec: configuration: { command: { - description: "The command to be run, plus any arguments required." + description: "The command to run, plus any arguments required." required: true type: array: items: type: string: examples: ["echo", "Hello World!"] } diff --git a/website/cue/reference/components/sources/base/file.cue b/website/cue/reference/components/sources/base/file.cue index e47f4e45077eb..17b32bb7648e0 100644 --- a/website/cue/reference/components/sources/base/file.cue +++ b/website/cue/reference/components/sources/base/file.cue @@ -144,7 +144,7 @@ base: components: sources: file: configuration: { } glob_minimum_cooldown_ms: { description: """ - Delay between file discovery calls. + The delay between file discovery calls. This controls the interval at which files are searched. A higher value results in greater chances of some short-lived files being missed between searches, but a lower value increases @@ -336,7 +336,7 @@ base: components: sources: file: configuration: { } remove_after_secs: { description: """ - Timeout from reaching `EOF` after which the file is removed from the filesystem, unless new data is written in the meantime. + After reaching EOF, the number of seconds to wait before removing the file, unless new data is written. If not specified, files are not removed. """ diff --git a/website/cue/reference/components/sources/base/host_metrics.cue b/website/cue/reference/components/sources/base/host_metrics.cue index a38bcc8746cb7..f76224493a7c0 100644 --- a/website/cue/reference/components/sources/base/host_metrics.cue +++ b/website/cue/reference/components/sources/base/host_metrics.cue @@ -54,7 +54,7 @@ base: components: sources: host_metrics: configuration: { description: """ The number of levels of the cgroups hierarchy for which to report metrics. - A value of `1` means just the root or named cgroup. + A value of `1` means the root or named cgroup. """ required: false type: uint: { diff --git a/website/cue/reference/components/sources/base/http.cue b/website/cue/reference/components/sources/base/http.cue index e562985c48955..c48d01eb2a4a9 100644 --- a/website/cue/reference/components/sources/base/http.cue +++ b/website/cue/reference/components/sources/base/http.cue @@ -97,7 +97,7 @@ base: components: sources: http: configuration: { description: """ The expected encoding of received data. - Note: For `json` and `ndjson` encodings, the fields of the JSON objects are output as separate fields. + For `json` and `ndjson` encodings, the fields of the JSON objects are output as separate fields. """ required: false type: string: enum: { diff --git a/website/cue/reference/components/sources/base/http_server.cue b/website/cue/reference/components/sources/base/http_server.cue index e7e66b4845c7c..6558067f7c8f6 100644 --- a/website/cue/reference/components/sources/base/http_server.cue +++ b/website/cue/reference/components/sources/base/http_server.cue @@ -97,7 +97,7 @@ base: components: sources: http_server: configuration: { description: """ The expected encoding of received data. - Note: For `json` and `ndjson` encodings, the fields of the JSON objects are output as separate fields. + For `json` and `ndjson` encodings, the fields of the JSON objects are output as separate fields. """ required: false type: string: enum: { diff --git a/website/cue/reference/components/sources/base/splunk_hec.cue b/website/cue/reference/components/sources/base/splunk_hec.cue index 7b5178ab639bc..36cf3a36655e2 100644 --- a/website/cue/reference/components/sources/base/splunk_hec.cue +++ b/website/cue/reference/components/sources/base/splunk_hec.cue @@ -193,7 +193,7 @@ base: components: sources: splunk_hec: configuration: { } valid_tokens: { description: """ - Optional list of valid authorization tokens. + A list of valid authorization tokens. If supplied, incoming requests must supply one of these tokens in the `Authorization` header, just as a client would if it was communicating with the Splunk HEC endpoint directly. diff --git a/website/cue/reference/components/sources/base/syslog.cue b/website/cue/reference/components/sources/base/syslog.cue index dd22384324fed..679a0fc6d2518 100644 --- a/website/cue/reference/components/sources/base/syslog.cue +++ b/website/cue/reference/components/sources/base/syslog.cue @@ -87,7 +87,7 @@ base: components: sources: syslog: configuration: { description: """ Unix file mode bits to be applied to the unix socket file as its designated file permissions. - Note: The file mode value can be specified in any numeric format supported by your configuration + The file mode value can be specified in any numeric format supported by your configuration language, but it is most intuitive to use an octal number. """ relevant_when: "mode = \"unix\"" diff --git a/website/cue/reference/components/transforms/base/aws_ec2_metadata.cue b/website/cue/reference/components/transforms/base/aws_ec2_metadata.cue index ed78aa4b1373c..de6895f194a1d 100644 --- a/website/cue/reference/components/transforms/base/aws_ec2_metadata.cue +++ b/website/cue/reference/components/transforms/base/aws_ec2_metadata.cue @@ -25,7 +25,7 @@ base: components: transforms: aws_ec2_metadata: configuration: { Configure to proxy traffic through an HTTP(S) proxy when making external requests. - Similar to common proxy configuration convention, users can set different proxies + Similar to common proxy configuration convention, you can set different proxies to use based on the type of traffic being proxied, as well as set specific hosts that should not be proxied. """ From 1c1beb8123e1b0c82537ae3c2e26235bc6c0c43b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Jun 2023 20:10:13 +0000 Subject: [PATCH 086/236] chore(deps): Bump mock_instant from 0.3.0 to 0.3.1 (#17574) Bumps [mock_instant](https://github.com/museun/mock_instant) from 0.3.0 to 0.3.1.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=mock_instant&package-manager=cargo&previous-version=0.3.0&new-version=0.3.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b75de5a1e3151..4b703a0e8cebc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5004,9 +5004,9 @@ dependencies = [ [[package]] name = "mock_instant" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c734e0ceadb79b49feb5a39b038035c7881bfd163e999916dc79b57f4996b6f" +checksum = "6c1a54de846c4006b88b1516731cc1f6026eb5dc4bcb186aa071ef66d40524ec" [[package]] name = "mongodb" From 854980945e685485388bda2dd8f9cd9ad040029e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Jun 2023 20:53:45 +0000 Subject: [PATCH 087/236] chore(deps): Bump clap_complete from 4.3.0 to 4.3.1 (#17586) Bumps [clap_complete](https://github.com/clap-rs/clap) from 4.3.0 to 4.3.1.
Changelog

Sourced from clap_complete's changelog.

[4.3.1] - 2023-06-02

Performance

  • (derive) Reduce the amount of generated code
Commits
  • 50f0e6b chore: Release
  • 1471457 docs: Update changelog
  • 7ead9ab Merge pull request #4947 from klensy/formatless-error
  • df5d901 perf(derive): Reduce amount of generated code
  • e8a3568 Merge pull request #4944 from clap-rs/renovate/criterion-0.x
  • a4f8391 Merge pull request #4943 from clap-rs/renovate/compatible-(dev)
  • 4eb03ea chore(deps): update rust crate criterion to 0.5.1
  • 534be34 chore(deps): update compatible (dev)
  • 78bb48b chore: Release
  • 3430d62 Merge pull request #4935 from epage/nu
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=clap_complete&package-manager=cargo&previous-version=4.3.0&new-version=4.3.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- vdev/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b703a0e8cebc..18176b52a3474 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1967,9 +1967,9 @@ dependencies = [ [[package]] name = "clap_complete" -version = "4.3.0" +version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a04ddfaacc3bc9e6ea67d024575fafc2a813027cf374b8f24f7bc233c6b6be12" +checksum = "7f6b5c519bab3ea61843a7923d074b04245624bb84a64a8c150f5deb014e388b" dependencies = [ "clap 4.1.14", ] diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 8b7b3c676e12d..f498d000fcf5b 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -14,7 +14,7 @@ cached = "0.43.0" chrono = { version = "0.4.22", default-features = false, features = ["serde", "clock"] } clap = { version = "4.1.14", features = ["derive"] } clap-verbosity-flag = "2.0.1" -clap_complete = "4.3.0" +clap_complete = "4.3.1" confy = "0.5.1" directories = "5.0.1" # remove this when stabilized https://doc.rust-lang.org/stable/std/path/fn.absolute.html From 3395cfdb90b165653dda7e9014057aac1dba2d28 Mon Sep 17 00:00:00 2001 From: neuronull Date: Fri, 2 Jun 2023 15:19:13 -0600 Subject: [PATCH 088/236] chore(deps): bump pulsar from 5.1.1 to 6.0.0 (#17587) - Bumps [pulsar](https://github.com/streamnative/pulsar-rs) from 5.1.1 to 6.0.0. - As part of this, a new config option for batch `max_bytes` is exposed for the sink. --- Cargo.lock | 4 +-- Cargo.toml | 2 +- src/sinks/pulsar/config.rs | 7 ++++- .../components/sinks/base/pulsar.cue | 27 ++++++++++++------- 4 files changed, 26 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18176b52a3474..07f0a845e9e39 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6340,9 +6340,9 @@ dependencies = [ [[package]] name = "pulsar" -version = "5.1.1" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20f237570b5665b38c7d5228f9a1d2990e369c00e635704528996bcd5219f540" +checksum = "06fbacec81fe6fb82f076279c3aaeb05324478f62c3074f13ecd0452cbec27b2" dependencies = [ "async-trait", "bit-vec 0.6.3", diff --git a/Cargo.toml b/Cargo.toml index d42c2efdf5055..aed442756ae8b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -288,7 +288,7 @@ paste = "1.0.12" percent-encoding = { version = "2.2.0", default-features = false } pin-project = { version = "1.1.0", default-features = false } postgres-openssl = { version = "0.5.0", default-features = false, features = ["runtime"], optional = true } -pulsar = { version = "5.1.1", default-features = false, features = ["tokio-runtime", "auth-oauth2", "flate2", "lz4", "snap", "zstd"], optional = true } +pulsar = { version = "6.0.0", default-features = false, features = ["tokio-runtime", "auth-oauth2", "flate2", "lz4", "snap", "zstd"], optional = true } rand = { version = "0.8.5", default-features = false, features = ["small_rng"] } rand_distr = { version = "0.4.3", default-features = false } rdkafka = { version = "0.31.0", default-features = false, features = ["tokio", "libz", "ssl", "zstd"], optional = true } diff --git a/src/sinks/pulsar/config.rs b/src/sinks/pulsar/config.rs index eba7eba4694d8..d7b9f505175b6 100644 --- a/src/sinks/pulsar/config.rs +++ b/src/sinks/pulsar/config.rs @@ -87,13 +87,17 @@ pub struct PulsarSinkConfig { #[configurable_component] #[derive(Clone, Copy, Debug, Default)] pub(crate) struct PulsarBatchConfig { - /// The maximum size of a batch before it is flushed. + /// The maximum amount of events in a batch before it is flushed. /// /// Note this is an unsigned 32 bit integer which is a smaller capacity than /// many of the other sink batch settings. #[configurable(metadata(docs::type_unit = "events"))] #[configurable(metadata(docs::examples = 1000))] pub max_events: Option, + + /// The maximum size of a batch before it is flushed. + #[configurable(metadata(docs::type_unit = "bytes"))] + pub max_bytes: Option, } /// Authentication configuration. @@ -235,6 +239,7 @@ impl PulsarSinkConfig { metadata: Default::default(), schema: None, batch_size: self.batch.max_events, + batch_byte_size: self.batch.max_bytes, compression: None, }; diff --git a/website/cue/reference/components/sinks/base/pulsar.cue b/website/cue/reference/components/sinks/base/pulsar.cue index cd55deaf6093c..cc62959b42f8a 100644 --- a/website/cue/reference/components/sinks/base/pulsar.cue +++ b/website/cue/reference/components/sinks/base/pulsar.cue @@ -86,17 +86,24 @@ base: components: sinks: pulsar: configuration: { batch: { description: "Event batching behavior." required: false - type: object: options: max_events: { - description: """ - The maximum size of a batch before it is flushed. + type: object: options: { + max_bytes: { + description: "The maximum size of a batch before it is flushed." + required: false + type: uint: unit: "bytes" + } + max_events: { + description: """ + The maximum amount of events in a batch before it is flushed. - Note this is an unsigned 32 bit integer which is a smaller capacity than - many of the other sink batch settings. - """ - required: false - type: uint: { - examples: [1000] - unit: "events" + Note this is an unsigned 32 bit integer which is a smaller capacity than + many of the other sink batch settings. + """ + required: false + type: uint: { + examples: [1000] + unit: "events" + } } } } From 25e7699bb505e1856d04634ed6571eb22631b140 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Fri, 2 Jun 2023 23:32:07 +0100 Subject: [PATCH 089/236] fix(loki sink): use json size of unencoded event (#17572) Currently the `loki` sink is encoding the event and then taking the estimated json size of that encoded event. This is wrong. All sinks should take the estimated json size of the unencoded event. There is an open question around whether we should be taking the json size before or after the `only_fields` and `except_fields` are applied. I'm currently trying to get an answer to that. Currently everything is before. The `loki` sink works a little bit different to the other stream based sinks. Most sinks pass the event unencoded to the request builder. It is at this point that the json size of the metrics are calculated. However, `loki` encodes the event and passes the encoded value to the request builder. This PR changes it so it also passes the json size to the request builder so it can use that value to calculate the metrics. Signed-off-by: Stephen Wakely --- src/sinks/loki/event.rs | 18 ++---------------- src/sinks/loki/sink.rs | 4 +++- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/src/sinks/loki/event.rs b/src/sinks/loki/event.rs index 76389e2c64fb9..1aede42e35954 100644 --- a/src/sinks/loki/event.rs +++ b/src/sinks/loki/event.rs @@ -139,21 +139,6 @@ impl ByteSizeOf for LokiEvent { } } -/// This implementation approximates the `Serialize` implementation below, without any allocations. -impl EstimatedJsonEncodedSizeOf for LokiEvent { - fn estimated_json_encoded_size_of(&self) -> JsonSize { - static BRACKETS_SIZE: JsonSize = JsonSize::new(2); - static COLON_SIZE: JsonSize = JsonSize::new(1); - static QUOTES_SIZE: JsonSize = JsonSize::new(2); - - BRACKETS_SIZE - + QUOTES_SIZE - + self.timestamp.estimated_json_encoded_size_of() - + COLON_SIZE - + self.event.estimated_json_encoded_size_of() - } -} - impl Serialize for LokiEvent { fn serialize(&self, serializer: S) -> Result where @@ -172,6 +157,7 @@ pub struct LokiRecord { pub partition: PartitionKey, pub labels: Labels, pub event: LokiEvent, + pub json_byte_size: JsonSize, pub finalizers: EventFinalizers, } @@ -187,7 +173,7 @@ impl ByteSizeOf for LokiRecord { impl EstimatedJsonEncodedSizeOf for LokiRecord { fn estimated_json_encoded_size_of(&self) -> JsonSize { - self.event.estimated_json_encoded_size_of() + self.json_byte_size } } diff --git a/src/sinks/loki/sink.rs b/src/sinks/loki/sink.rs index 766526bb12bff..b5871d4e876e4 100644 --- a/src/sinks/loki/sink.rs +++ b/src/sinks/loki/sink.rs @@ -12,7 +12,7 @@ use vector_core::{ partition::Partitioner, sink::StreamSink, stream::BatcherSettings, - ByteSizeOf, + ByteSizeOf, EstimatedJsonEncodedSizeOf, }; use super::{ @@ -268,6 +268,7 @@ impl EventEncoder { pub(super) fn encode_event(&mut self, mut event: Event) -> Option { let tenant_id = self.key_partitioner.partition(&event); let finalizers = event.take_finalizers(); + let json_byte_size = event.estimated_json_encoded_size_of(); let mut labels = self.build_labels(&event); self.remove_label_fields(&mut event); @@ -302,6 +303,7 @@ impl EventEncoder { }, partition, finalizers, + json_byte_size, }) } } From fa8a55385dd391aa2429c3f2e9821198c364c6a0 Mon Sep 17 00:00:00 2001 From: neuronull Date: Sun, 4 Jun 2023 12:21:55 -0600 Subject: [PATCH 090/236] chore(ci): int test yaml file detection (#17590) - Adds file detection for changes to the yaml files used in the creation of the int test containers. --- .github/workflows/changes.yml | 38 +++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index a4bae7367aea3..bc230f3fadf59 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -207,9 +207,11 @@ jobs: - "src/sources/amqp.rs" - "src/sources/util/**" - "src/sinks/util/**" + - "scripts/integration/amqp/**" appsignal: - "src/sinks/appsignal/**" - "src/sinks/util/**" + - "scripts/integration/appsignal/**" aws: - "src/aws/**" - "src/internal_events/aws*" @@ -225,18 +227,23 @@ jobs: - "src/sinks/aws_sqs/**" - "src/sinks/util/**" - "src/transforms/aws*" + - "scripts/integration/aws/**" axiom: - "src/sinks/axiom.rs" - "src/sinks/util/**" + - "scripts/integration/axiom/**" azure: - "src/sinks/azure_**" - "src/sinks/util/**" + - "scripts/integration/azure/**" clickhouse: - "src/sinks/clickhouse/**" - "src/sinks/util/**" + - "scripts/integration/clickhouse/**" databend: - "src/sinks/databend/**" - "src/sinks/util/**" + - "scripts/integration/databend/**" datadog: - "src/common/datadog.rs" - "src/internal_events/datadog_*" @@ -244,22 +251,34 @@ jobs: - "src/sinks/datadog/**" - "src/sinks/datadog_archives.rs" - "src/sinks/util/**" + - "scripts/integration/datadog-agent/**" + - "scripts/integration/datadog-logs/**" + - "scripts/integration/datadog-metrics/**" + - "scripts/integration/datadog-traces/**" + dnstap: + - "src/internal_events/dnstap.rs" + - "src/sources/dnstap/**" + - "scripts/integration/dnstap/**" docker-logs: - "src/docker.rs" - "src/internal_events/docker_logs.rs" - "src/sources/docker_logs/**" - "src/sources/util/**" + - "scripts/integration/docker-logs/**" elasticsearch: - "src/sinks/elasticsearch/**" - "src/sinks/util/**" + - "scripts/integration/elasticsearch/**" eventstoredb: - "src/internal_events/eventstoredb_metrics.rs" - "src/sources/eventstoredb_metrics/**" - "src/sources/util/**" + - "scripts/integration/eventstoredb/**" fluent: - "src/internal_events/fluent.rs" - "src/sources/fluent/**" - "src/sources/util/**" + - "scripts/integration/fluent/**" gcp: - "src/internal_events/gcp_pubsub.rs" - "src/sources/gcp_pubsub.rs" @@ -267,15 +286,21 @@ jobs: - "src/sinks/gcp/**" - "src/sinks/util/**" - "src/gcp.rs" + - "scripts/integration/gcp/**" + - "scripts/integration/chronicle/**" humio: - "src/sinks/humio/**" - "src/sinks/util/**" + - "scripts/integration/humio/**" http-client: - "src/sinks/http-client/**" + - "src/sinks/util/**" + - "scripts/integration/http-client/**" influxdb: - "src/internal_events/influxdb.rs" - "src/sinks/influxdb/**" - "src/sinks/util/**" + - "scripts/integration/influxdb/**" kafka: - "src/internal_events/kafka.rs" - "src/sinks/kafka/**" @@ -283,17 +308,21 @@ jobs: - "src/sources/kafka.rs" - "src/sources/util/**" - "src/kafka.rs" + - "scripts/integration/kafka/**" logstash: - "src/sources/logstash.rs" - "src/sources/util/**" + - "scripts/integration/logstash/**" loki: - "src/internal_events/loki.rs" - "src/sinks/loki/**" - "src/sinks/util/**" + - "scripts/integration/loki/**" mongodb: - "src/internal_events/mongodb_metrics.rs" - "src/sources/mongodb_metrics/**" - "src/sources/util/**" + - "scripts/integration/mongodb/**" nats: - "src/internal_events/nats.rs" - "src/sources/nats.rs" @@ -301,40 +330,49 @@ jobs: - "src/sinks/nats.rs" - "src/sinks/util/**" - "src/nats.rs" + - "scripts/integration/nats/**" nginx: - "src/internal_events/nginx_metrics.rs" - "src/sources/nginx_metrics/**" - "src/sources/util/**" + - "scripts/integration/nginx/**" opentelemetry: - "src/sources/opentelemetry/**" - "src/sources/util/**" + - "scripts/integration/opentelemetry/**" postgres: - "src/internal_events/postgresql_metrics.rs" - "src/sources/postgresql_metrics.rs" - "src/sources/util/**" + - "scripts/integration/postgres/**" prometheus: - "src/internal_events/prometheus.rs" - "src/sources/prometheus/**" - "src/sources/util/**" - "src/sinks/prometheus/**" - "src/sinks/util/**" + - "scripts/integration/prometheus/**" pulsar: - "src/internal_events/pulsar.rs" - "src/sinks/pulsar/**" - "src/sinks/util/**" + - "scripts/integration/pulsar/**" redis: - "src/internal_events/redis.rs" - "src/sources/redis/**" - "src/sources/util/**" - "src/sinks/redis.rs" - "src/sinks/util/**" + - "scripts/integration/redis/**" splunk: - "src/internal_events/splunk_hec.rs" - "src/sources/splunk_hec/**" - "src/sources/util/**" - "src/sinks/splunk_hec/**" - "src/sinks/util/**" + - "scripts/integration/splunk/**" webhdfs: - "src/sinks/webhdfs/**" - "src/sinks/util/**" + - "scripts/integration/webhdfs/**" From a164952a145109d95c465645bf08b387a61e408a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 13:10:16 +0000 Subject: [PATCH 091/236] chore(deps): Bump indicatif from 0.17.4 to 0.17.5 (#17597) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [indicatif](https://github.com/console-rs/indicatif) from 0.17.4 to 0.17.5.
Release notes

Sourced from indicatif's releases.

0.17.5

Another smaller release. Fixes a regression from 0.17.4 and should drastically improve ETA and speed estimation thanks to great work by @​afontenot in #539.

On behalf of @​chris-laplante and @​djc, thanks to all contributors!

Commits
  • 4ffc40e bump version to 0.17.5
  • 011c998 Switch Estimator to use an double exponential time-based weighting
  • 2845b7f Refactor estimator's prev tuple into separate elements
  • 36d11e8 refactor estimator to use steps/sec instead of secs/step
  • f88ec3b Fix subtract with overflow when measuring terminal line length
  • bd320a1 Add builder-like with_finish method to ProgressBarIter. (#548)
  • a81fd6c tests: suppress clippy redundant clone check in test code
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=indicatif&package-manager=cargo&previous-version=0.17.4&new-version=0.17.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- vdev/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07f0a845e9e39..c30cf2e39e80f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4099,9 +4099,9 @@ dependencies = [ [[package]] name = "indicatif" -version = "0.17.4" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db45317f37ef454e6519b6c3ed7d377e5f23346f0823f86e65ca36912d1d0ef8" +checksum = "8ff8cc23a7393a397ed1d7f56e6365cba772aba9f9912ab968b03043c395d057" dependencies = [ "console", "instant", diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index f498d000fcf5b..29e315b426872 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -22,7 +22,7 @@ dunce = "1.0.4" glob = { version = "0.3.1", default-features = false } hashlink = { version = "0.8.2", features = ["serde_impl"] } hex = "0.4.3" -indicatif = { version = "0.17.4", features = ["improved_unicode"] } +indicatif = { version = "0.17.5", features = ["improved_unicode"] } itertools = "0.10.5" log = "0.4.18" once_cell = "1.17" From da939ca645e49cd02cbd739cddcdfe00dcb88a55 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Mon, 5 Jun 2023 10:27:39 -0400 Subject: [PATCH 092/236] chore: add sink prelude (#17595) Generally our stream based sinks will share a lot of imports. This adds a `crate::sinks::prelude` module that can be imported to bring in the commonly used imports. The advantage is not only reducing the size of our imports and making it easier to maintain (often changes to the framework result in having to change the imports of all the sinks modules which is tedious) but it also encourages and guides using the shared components available. This also updates the following sinks to use the prelude: - amqp - kafka - aws_kinesis - loki - pulsar --------- Signed-off-by: Stephen Wakely --- docs/tutorials/sinks/1_basic_sink.md | 11 +----- docs/tutorials/sinks/2_http_sink.md | 23 +----------- src/internal_events/mod.rs | 2 +- src/sinks/amqp/config.rs | 11 +----- src/sinks/amqp/encoder.rs | 7 ++-- src/sinks/amqp/request_builder.rs | 14 +------- src/sinks/amqp/service.rs | 13 +++---- src/sinks/amqp/sink.rs | 12 +------ src/sinks/aws_kinesis/config.rs | 18 ++++------ src/sinks/aws_kinesis/firehose/config.rs | 3 +- src/sinks/aws_kinesis/service.rs | 7 ++-- src/sinks/aws_kinesis/sink.rs | 17 +++------ src/sinks/aws_kinesis/streams/config.rs | 3 +- src/sinks/kafka/config.rs | 7 +--- src/sinks/kafka/service.rs | 18 ++-------- src/sinks/kafka/sink.rs | 15 +++----- src/sinks/kafka/tests.rs | 4 +-- src/sinks/loki/config.rs | 11 +----- src/sinks/loki/event.rs | 6 +--- src/sinks/loki/service.rs | 11 ++---- src/sinks/loki/sink.rs | 25 ++----------- src/sinks/loki/tests.rs | 6 ++-- src/sinks/mod.rs | 1 + src/sinks/prelude.rs | 45 ++++++++++++++++++++++++ src/sinks/pulsar/config.rs | 6 +--- src/sinks/pulsar/request_builder.rs | 13 ++----- src/sinks/pulsar/service.rs | 10 +----- src/sinks/pulsar/sink.rs | 15 +------- src/sinks/util/encoding.rs | 2 +- 29 files changed, 97 insertions(+), 239 deletions(-) create mode 100644 src/sinks/prelude.rs diff --git a/docs/tutorials/sinks/1_basic_sink.md b/docs/tutorials/sinks/1_basic_sink.md index 4e999d227e822..ca91266925e80 100644 --- a/docs/tutorials/sinks/1_basic_sink.md +++ b/docs/tutorials/sinks/1_basic_sink.md @@ -22,16 +22,7 @@ Provide some module level comments to explain what the sink does. Let's setup all the imports we will need for the tutorial: ```rust -use super::Healthcheck; -use crate::config::{GenerateConfig, SinkConfig, SinkContext}; -use futures::{stream::BoxStream, StreamExt}; -use vector_common::finalization::{EventStatus, Finalizable}; -use vector_config::configurable_component; -use vector_core::{ - config::{AcknowledgementsConfig, Input}, - event::Event, - sink::{StreamSink, VectorSink}, -}; +use crate::prelude::*; ``` # Configuration diff --git a/docs/tutorials/sinks/2_http_sink.md b/docs/tutorials/sinks/2_http_sink.md index ed99ca4105d4b..7090ef41a88d1 100644 --- a/docs/tutorials/sinks/2_http_sink.md +++ b/docs/tutorials/sinks/2_http_sink.md @@ -12,32 +12,11 @@ To start, update our imports to the following: use std::task::Poll; use crate::{ - config::{GenerateConfig, SinkConfig, SinkContext}, + sinks::prelude::*, http::HttpClient, internal_events::SinkRequestBuildError, - sinks::util::{ - encoding::{write_all, Encoder}, - metadata::RequestMetadataBuilder, - request_builder::EncodeResult, - Compression, RequestBuilder, SinkBuilderExt, - }, - sinks::Healthcheck, }; use bytes::Bytes; -use futures::{future::BoxFuture, stream::BoxStream, StreamExt}; -use vector_common::{ - finalization::{EventFinalizers, EventStatus, Finalizable}, - internal_event::CountByteSize, - request_metadata::{MetaDescriptive, RequestMetadata}, -}; -use vector_config::configurable_component; -use vector_core::{ - config::{AcknowledgementsConfig, Input}, - event::Event, - sink::{StreamSink, VectorSink}, - stream::DriverResponse, - tls::TlsSettings, -}; ``` # Configuration diff --git a/src/internal_events/mod.rs b/src/internal_events/mod.rs index f357cbb0f469b..7d30daba29d97 100644 --- a/src/internal_events/mod.rs +++ b/src/internal_events/mod.rs @@ -259,7 +259,7 @@ pub(crate) use self::unix::*; pub(crate) use self::websocket::*; #[cfg(windows)] pub(crate) use self::windows::*; -pub(crate) use self::{ +pub use self::{ adaptive_concurrency::*, batch::*, common::*, conditions::*, encoding_transcode::*, heartbeat::*, open::*, process::*, socket::*, tcp::*, template::*, udp::*, }; diff --git a/src/sinks/amqp/config.rs b/src/sinks/amqp/config.rs index 3af34e943d7b2..a83cb7c1f2aad 100644 --- a/src/sinks/amqp/config.rs +++ b/src/sinks/amqp/config.rs @@ -1,17 +1,8 @@ //! Configuration functionality for the `AMQP` sink. -use crate::{ - amqp::AmqpConfig, - codecs::EncodingConfig, - config::{DataType, GenerateConfig, Input, SinkConfig, SinkContext}, - sinks::{Healthcheck, VectorSink}, - template::Template, -}; +use crate::{amqp::AmqpConfig, sinks::prelude::*}; use codecs::TextSerializerConfig; -use futures::FutureExt; use lapin::{types::ShortString, BasicProperties}; use std::sync::Arc; -use vector_config::configurable_component; -use vector_core::config::AcknowledgementsConfig; use super::sink::AmqpSink; diff --git a/src/sinks/amqp/encoder.rs b/src/sinks/amqp/encoder.rs index 6e86828c82923..d3d449811372f 100644 --- a/src/sinks/amqp/encoder.rs +++ b/src/sinks/amqp/encoder.rs @@ -1,8 +1,5 @@ //! Encoding for the `AMQP` sink. -use crate::{ - event::Event, - sinks::util::encoding::{write_all, Encoder}, -}; +use crate::sinks::prelude::*; use bytes::BytesMut; use std::io; use tokio_util::codec::Encoder as _; @@ -13,7 +10,7 @@ pub(super) struct AmqpEncoder { pub(super) transformer: crate::codecs::Transformer, } -impl Encoder for AmqpEncoder { +impl encoding::Encoder for AmqpEncoder { fn encode_input(&self, mut input: Event, writer: &mut dyn io::Write) -> io::Result { let mut body = BytesMut::new(); self.transformer.transform(&mut input); diff --git a/src/sinks/amqp/request_builder.rs b/src/sinks/amqp/request_builder.rs index ad8fe36565453..ace1af1f66fe4 100644 --- a/src/sinks/amqp/request_builder.rs +++ b/src/sinks/amqp/request_builder.rs @@ -1,22 +1,10 @@ //! Request builder for the `AMQP` sink. //! Responsible for taking the event (which includes rendered template values) and turning //! it into the raw bytes and other data needed to send the request to `AMQP`. -use crate::{ - event::Event, - sinks::util::{ - metadata::RequestMetadataBuilder, request_builder::EncodeResult, Compression, - RequestBuilder, - }, -}; +use crate::sinks::prelude::*; use bytes::Bytes; use lapin::BasicProperties; use std::io; -use vector_common::{ - finalization::{EventFinalizers, Finalizable}, - json_size::JsonSize, - request_metadata::RequestMetadata, -}; -use vector_core::EstimatedJsonEncodedSizeOf; use super::{encoder::AmqpEncoder, service::AmqpRequest, sink::AmqpEvent}; diff --git a/src/sinks/amqp/service.rs b/src/sinks/amqp/service.rs index ff1e71487298a..20b16b99e6e39 100644 --- a/src/sinks/amqp/service.rs +++ b/src/sinks/amqp/service.rs @@ -1,6 +1,9 @@ //! The main tower service that takes the request created by the request builder //! and sends it to `AMQP`. -use crate::internal_events::sink::{AmqpAcknowledgementError, AmqpDeliveryError}; +use crate::{ + internal_events::sink::{AmqpAcknowledgementError, AmqpDeliveryError}, + sinks::prelude::*, +}; use bytes::Bytes; use futures::future::BoxFuture; use lapin::{options::BasicPublishOptions, BasicProperties}; @@ -9,14 +12,6 @@ use std::{ sync::Arc, task::{Context, Poll}, }; -use tower::Service; -use vector_common::{ - finalization::{EventFinalizers, EventStatus, Finalizable}, - internal_event::CountByteSize, - json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, -}; -use vector_core::stream::DriverResponse; /// The request contains the data to send to `AMQP` together /// with the information need to route the message. diff --git a/src/sinks/amqp/sink.rs b/src/sinks/amqp/sink.rs index ff0bdeb9d0042..f1da0b8d944f0 100644 --- a/src/sinks/amqp/sink.rs +++ b/src/sinks/amqp/sink.rs @@ -1,19 +1,9 @@ //! The sink for the `AMQP` sink that wires together the main stream that takes the //! event and sends it to `AMQP`. -use crate::{ - codecs::Transformer, event::Event, internal_events::TemplateRenderingError, - sinks::util::builder::SinkBuilderExt, template::Template, -}; -use async_trait::async_trait; -use futures::StreamExt; -use futures_util::stream::BoxStream; +use crate::sinks::prelude::*; use lapin::{options::ConfirmSelectOptions, BasicProperties}; use serde::Serialize; use std::sync::Arc; -use tower::ServiceBuilder; -use vector_buffers::EventCount; -use vector_common::json_size::JsonSize; -use vector_core::{sink::StreamSink, ByteSizeOf, EstimatedJsonEncodedSizeOf}; use super::{ config::{AmqpPropertiesConfig, AmqpSinkConfig}, diff --git a/src/sinks/aws_kinesis/config.rs b/src/sinks/aws_kinesis/config.rs index e817d98881985..4e2d136a054ff 100644 --- a/src/sinks/aws_kinesis/config.rs +++ b/src/sinks/aws_kinesis/config.rs @@ -1,19 +1,13 @@ use std::marker::PhantomData; -use tower::ServiceBuilder; -use vector_config::configurable_component; -use vector_core::{ - config::{DataType, Input}, - sink::VectorSink, - stream::BatcherSettings, -}; +use vector_core::stream::BatcherSettings; use crate::{ aws::{AwsAuthentication, RegionOrEndpoint}, - codecs::{Encoder, EncodingConfig}, - config::AcknowledgementsConfig, - sinks::util::{retries::RetryLogic, Compression, ServiceBuilderExt, TowerRequestConfig}, - tls::TlsConfig, + sinks::{ + prelude::*, + util::{retries::RetryLogic, TowerRequestConfig}, + }, }; use super::{ @@ -78,7 +72,7 @@ impl KinesisSinkBaseConfig { } /// Builds an aws_kinesis sink. -pub async fn build_sink( +pub fn build_sink( config: &KinesisSinkBaseConfig, partition_key_field: Option, batch_settings: BatcherSettings, diff --git a/src/sinks/aws_kinesis/firehose/config.rs b/src/sinks/aws_kinesis/firehose/config.rs index 8255c6bf220bd..c8080e0711da3 100644 --- a/src/sinks/aws_kinesis/firehose/config.rs +++ b/src/sinks/aws_kinesis/firehose/config.rs @@ -141,8 +141,7 @@ impl SinkConfig for KinesisFirehoseSinkConfig { None, batch_settings, KinesisFirehoseClient { client }, - ) - .await?; + )?; Ok((sink, healthcheck)) } diff --git a/src/sinks/aws_kinesis/service.rs b/src/sinks/aws_kinesis/service.rs index 9ceeb8c8d4938..3539fee4e2eab 100644 --- a/src/sinks/aws_kinesis/service.rs +++ b/src/sinks/aws_kinesis/service.rs @@ -5,16 +5,13 @@ use std::{ use aws_smithy_client::SdkError; use aws_types::region::Region; -use futures::future::BoxFuture; -use tower::Service; -use vector_common::{json_size::JsonSize, request_metadata::MetaDescriptive}; -use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; +use vector_core::internal_event::CountByteSize; use super::{ record::{Record, SendRecord}, sink::BatchKinesisRequest, }; -use crate::event::EventStatus; +use crate::{event::EventStatus, sinks::prelude::*}; pub struct KinesisService { pub client: C, diff --git a/src/sinks/aws_kinesis/sink.rs b/src/sinks/aws_kinesis/sink.rs index 0f74320ad4be1..bc3d53947c338 100644 --- a/src/sinks/aws_kinesis/sink.rs +++ b/src/sinks/aws_kinesis/sink.rs @@ -1,22 +1,13 @@ use std::{borrow::Cow, fmt::Debug, marker::PhantomData, num::NonZeroUsize}; -use async_trait::async_trait; -use futures::{future, stream::BoxStream, StreamExt}; use rand::random; -use tower::Service; -use vector_common::{ - finalization::{EventFinalizers, Finalizable}, - request_metadata::{MetaDescriptive, RequestMetadata}, -}; -use vector_core::{ - partition::Partitioner, - stream::{BatcherSettings, DriverResponse}, -}; use crate::{ - event::{Event, LogEvent}, internal_events::{AwsKinesisStreamNoPartitionKeyError, SinkRequestBuildError}, - sinks::util::{processed_event::ProcessedEvent, SinkBuilderExt, StreamSink}, + sinks::{ + prelude::*, + util::{processed_event::ProcessedEvent, StreamSink}, + }, }; use super::{ diff --git a/src/sinks/aws_kinesis/streams/config.rs b/src/sinks/aws_kinesis/streams/config.rs index 9d3461a6d14c8..673ab7b4d212a 100644 --- a/src/sinks/aws_kinesis/streams/config.rs +++ b/src/sinks/aws_kinesis/streams/config.rs @@ -148,8 +148,7 @@ impl SinkConfig for KinesisStreamsSinkConfig { self.partition_key_field.clone(), batch_settings, KinesisStreamClient { client }, - ) - .await?; + )?; Ok((sink, healthcheck)) } diff --git a/src/sinks/kafka/config.rs b/src/sinks/kafka/config.rs index 4fe8a13ee3a20..9615c22ec7280 100644 --- a/src/sinks/kafka/config.rs +++ b/src/sinks/kafka/config.rs @@ -5,20 +5,15 @@ use futures::FutureExt; use rdkafka::ClientConfig; use serde_with::serde_as; use vector_config::configurable_component; -use vector_core::schema::Requirement; use vrl::value::Kind; use crate::{ - codecs::EncodingConfig, - config::{AcknowledgementsConfig, DataType, GenerateConfig, Input, SinkConfig, SinkContext}, kafka::{KafkaAuthConfig, KafkaCompression}, serde::json::to_string, sinks::{ kafka::sink::{healthcheck, KafkaSink}, - util::{BatchConfig, NoDefaultsBatchSettings}, - Healthcheck, VectorSink, + prelude::*, }, - template::Template, }; pub(crate) const QUEUED_MIN_MESSAGES: u64 = 100000; diff --git a/src/sinks/kafka/service.rs b/src/sinks/kafka/service.rs index 89a1fb5ce6827..f271a7a580e53 100644 --- a/src/sinks/kafka/service.rs +++ b/src/sinks/kafka/service.rs @@ -1,29 +1,17 @@ use std::task::{Context, Poll}; use bytes::Bytes; -use futures::future::BoxFuture; use rdkafka::{ error::KafkaError, message::OwnedHeaders, producer::{FutureProducer, FutureRecord}, util::Timeout, }; -use tower::Service; -use vector_common::{ - json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, -}; -use vector_core::{ - internal_event::{ - ByteSize, BytesSent, CountByteSize, InternalEventHandle as _, Protocol, Registered, - }, - stream::DriverResponse, +use vector_core::internal_event::{ + ByteSize, BytesSent, CountByteSize, InternalEventHandle as _, Protocol, Registered, }; -use crate::{ - event::{EventFinalizers, EventStatus, Finalizable}, - kafka::KafkaStatisticsContext, -}; +use crate::{kafka::KafkaStatisticsContext, sinks::prelude::*}; pub struct KafkaRequest { pub body: Bytes, diff --git a/src/sinks/kafka/sink.rs b/src/sinks/kafka/sink.rs index 80fe87d835744..e3060900f71c2 100644 --- a/src/sinks/kafka/sink.rs +++ b/src/sinks/kafka/sink.rs @@ -1,5 +1,4 @@ -use async_trait::async_trait; -use futures::{future, stream::BoxStream, StreamExt}; +use futures::future; use rdkafka::{ consumer::{BaseConsumer, Consumer}, error::KafkaError, @@ -12,17 +11,11 @@ use tower::limit::ConcurrencyLimit; use super::config::{KafkaRole, KafkaSinkConfig}; use crate::{ - codecs::{Encoder, Transformer}, - event::{Event, LogEvent}, kafka::KafkaStatisticsContext, - sinks::{ - kafka::{ - config::QUEUED_MIN_MESSAGES, request_builder::KafkaRequestBuilder, - service::KafkaService, - }, - util::{builder::SinkBuilderExt, StreamSink}, + sinks::kafka::{ + config::QUEUED_MIN_MESSAGES, request_builder::KafkaRequestBuilder, service::KafkaService, }, - template::{Template, TemplateParseError}, + sinks::prelude::*, }; #[derive(Debug, Snafu)] diff --git a/src/sinks/kafka/tests.rs b/src/sinks/kafka/tests.rs index 4efa9e4d313e3..ba1e62e1eeb9e 100644 --- a/src/sinks/kafka/tests.rs +++ b/src/sinks/kafka/tests.rs @@ -29,10 +29,8 @@ mod integration_test { sink::KafkaSink, *, }, - util::{BatchConfig, NoDefaultsBatchSettings}, - VectorSink, + prelude::*, }, - template::Template, test_util::{ components::{assert_sink_compliance, SINK_TAGS}, random_lines_with_stream, random_string, wait_for, diff --git a/src/sinks/loki/config.rs b/src/sinks/loki/config.rs index 108cfa0aa3f99..6cb74c426ec08 100644 --- a/src/sinks/loki/config.rs +++ b/src/sinks/loki/config.rs @@ -1,21 +1,12 @@ use std::collections::HashMap; -use futures::future::FutureExt; -use vector_config::configurable_component; use vrl::value::Kind; use super::{healthcheck::healthcheck, sink::LokiSink}; use crate::{ - codecs::EncodingConfig, - config::{AcknowledgementsConfig, DataType, GenerateConfig, Input, SinkConfig, SinkContext}, http::{Auth, HttpClient, MaybeAuth}, schema, - sinks::{ - util::{BatchConfig, Compression, SinkBatchSettings, TowerRequestConfig, UriSerde}, - VectorSink, - }, - template::Template, - tls::{TlsConfig, TlsSettings}, + sinks::{prelude::*, util::UriSerde}, }; /// Loki-specific compression. diff --git a/src/sinks/loki/event.rs b/src/sinks/loki/event.rs index 1aede42e35954..6b85153c0655b 100644 --- a/src/sinks/loki/event.rs +++ b/src/sinks/loki/event.rs @@ -1,13 +1,9 @@ use std::{collections::HashMap, io}; +use crate::sinks::prelude::*; use bytes::Bytes; use serde::{ser::SerializeSeq, Serialize}; use vector_buffers::EventCount; -use vector_common::json_size::JsonSize; -use vector_core::{ - event::{EventFinalizers, Finalizable}, - ByteSizeOf, EstimatedJsonEncodedSizeOf, -}; use crate::sinks::util::encoding::{write_all, Encoder}; diff --git a/src/sinks/loki/service.rs b/src/sinks/loki/service.rs index ec62cb690e432..1ac3c871631cb 100644 --- a/src/sinks/loki/service.rs +++ b/src/sinks/loki/service.rs @@ -1,22 +1,15 @@ use std::task::{Context, Poll}; use bytes::Bytes; -use futures::future::BoxFuture; use http::StatusCode; use snafu::Snafu; -use tower::Service; use tracing::Instrument; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; -use vector_core::{ - event::{EventFinalizers, EventStatus, Finalizable}, - internal_event::CountByteSize, - stream::DriverResponse, -}; +use vector_core::internal_event::CountByteSize; use crate::sinks::loki::config::{CompressionConfigAdapter, ExtendedCompression}; use crate::{ http::{Auth, HttpClient}, - sinks::util::{retries::RetryLogic, UriSerde}, + sinks::{prelude::*, util::UriSerde}, }; #[derive(Clone)] diff --git a/src/sinks/loki/sink.rs b/src/sinks/loki/sink.rs index b5871d4e876e4..1ba3cbee6268a 100644 --- a/src/sinks/loki/sink.rs +++ b/src/sinks/loki/sink.rs @@ -1,44 +1,25 @@ use std::{collections::HashMap, num::NonZeroUsize}; use bytes::{Bytes, BytesMut}; -use futures::{stream::BoxStream, StreamExt}; use once_cell::sync::Lazy; use regex::Regex; use snafu::Snafu; use tokio_util::codec::Encoder as _; -use vector_common::request_metadata::RequestMetadata; -use vector_core::{ - event::{Event, EventFinalizers, Finalizable, Value}, - partition::Partitioner, - sink::StreamSink, - stream::BatcherSettings, - ByteSizeOf, EstimatedJsonEncodedSizeOf, -}; use super::{ config::{LokiConfig, OutOfOrderAction}, event::{LokiBatchEncoder, LokiEvent, LokiRecord, PartitionKey}, service::{LokiRequest, LokiRetryLogic, LokiService}, }; +use crate::sinks::loki::config::{CompressionConfigAdapter, ExtendedCompression}; use crate::sinks::loki::event::LokiBatchEncoding; -use crate::sinks::{ - loki::config::{CompressionConfigAdapter, ExtendedCompression}, - util::metadata::RequestMetadataBuilder, -}; use crate::{ - codecs::{Encoder, Transformer}, http::{get_http_scheme_from_uri, HttpClient}, internal_events::{ LokiEventUnlabeledError, LokiOutOfOrderEventDroppedError, LokiOutOfOrderEventRewritten, - SinkRequestBuildError, TemplateRenderingError, - }, - sinks::util::{ - builder::SinkBuilderExt, - request_builder::EncodeResult, - service::{ServiceBuilderExt, Svc}, - Compression, RequestBuilder, + SinkRequestBuildError, }, - template::Template, + sinks::prelude::*, }; #[derive(Clone)] diff --git a/src/sinks/loki/tests.rs b/src/sinks/loki/tests.rs index 5661b0c6ec8b8..cf34b729684c1 100644 --- a/src/sinks/loki/tests.rs +++ b/src/sinks/loki/tests.rs @@ -1,13 +1,11 @@ -use futures::StreamExt; +use vector_core::config::proxy::ProxyConfig; use super::{config::LokiConfig, healthcheck::healthcheck, sink::LokiSink}; use crate::{ - config::ProxyConfig, - event::{Event, LogEvent}, http::HttpClient, + sinks::prelude::*, sinks::util::test::{build_test_server, load_sink}, test_util, - tls::TlsSettings, }; #[test] diff --git a/src/sinks/mod.rs b/src/sinks/mod.rs index bce42f6769ba6..b21c5749841c6 100644 --- a/src/sinks/mod.rs +++ b/src/sinks/mod.rs @@ -3,6 +3,7 @@ use enum_dispatch::enum_dispatch; use futures::future::BoxFuture; use snafu::Snafu; +pub mod prelude; pub mod util; #[cfg(feature = "sinks-amqp")] diff --git a/src/sinks/prelude.rs b/src/sinks/prelude.rs new file mode 100644 index 0000000000000..15f5d99376a0f --- /dev/null +++ b/src/sinks/prelude.rs @@ -0,0 +1,45 @@ +//! Prelude module for sinks which will re-export the symbols that most +//! stream based sinks are likely to use. + +pub use crate::{ + codecs::{Encoder, EncodingConfig, Transformer}, + config::{DataType, GenerateConfig, SinkConfig, SinkContext}, + event::{Event, LogEvent}, + internal_events::TemplateRenderingError, + sinks::util::retries::RetryLogic, + sinks::{ + util::{ + builder::SinkBuilderExt, + encoding::{self, write_all}, + metadata::RequestMetadataBuilder, + request_builder::EncodeResult, + service::{ServiceBuilderExt, Svc}, + BatchConfig, Compression, NoDefaultsBatchSettings, RequestBuilder, SinkBatchSettings, + TowerRequestConfig, + }, + Healthcheck, + }, + template::{Template, TemplateParseError}, + tls::TlsConfig, +}; +pub use async_trait::async_trait; +pub use futures::{future, future::BoxFuture, stream::BoxStream, FutureExt, StreamExt}; +pub use tower::{Service, ServiceBuilder}; +pub use vector_buffers::EventCount; +pub use vector_common::{ + finalization::{EventFinalizers, EventStatus, Finalizable}, + internal_event::CountByteSize, + json_size::JsonSize, + request_metadata::{MetaDescriptive, RequestMetadata}, +}; +pub use vector_config::configurable_component; +pub use vector_core::{ + config::{AcknowledgementsConfig, Input}, + event::Value, + partition::Partitioner, + schema::Requirement, + sink::{StreamSink, VectorSink}, + stream::{BatcherSettings, DriverResponse}, + tls::TlsSettings, + ByteSizeOf, EstimatedJsonEncodedSizeOf, +}; diff --git a/src/sinks/pulsar/config.rs b/src/sinks/pulsar/config.rs index d7b9f505175b6..7ec5ef601f32e 100644 --- a/src/sinks/pulsar/config.rs +++ b/src/sinks/pulsar/config.rs @@ -1,12 +1,9 @@ use crate::{ - codecs::EncodingConfig, - config::{AcknowledgementsConfig, GenerateConfig, Input, SinkConfig, SinkContext}, schema, sinks::{ + prelude::*, pulsar::sink::{healthcheck, PulsarSink}, - Healthcheck, VectorSink, }, - template::Template, }; use codecs::{encoding::SerializerConfig, TextSerializerConfig}; use futures_util::FutureExt; @@ -21,7 +18,6 @@ use pulsar::{ use pulsar::{error::AuthenticationError, OperationRetryOptions}; use snafu::ResultExt; use vector_common::sensitive_string::SensitiveString; -use vector_config::configurable_component; use vector_core::config::DataType; use vrl::value::Kind; diff --git a/src/sinks/pulsar/request_builder.rs b/src/sinks/pulsar/request_builder.rs index ec104d0ebf508..b284ffef1ab26 100644 --- a/src/sinks/pulsar/request_builder.rs +++ b/src/sinks/pulsar/request_builder.rs @@ -1,17 +1,10 @@ use bytes::Bytes; use std::collections::HashMap; use std::io; -use vector_common::finalization::EventFinalizers; -use vector_common::request_metadata::RequestMetadata; -use crate::sinks::pulsar::encoder::PulsarEncoder; -use crate::sinks::pulsar::sink::PulsarEvent; -use crate::sinks::util::metadata::RequestMetadataBuilder; -use crate::sinks::util::request_builder::EncodeResult; -use crate::sinks::util::{Compression, RequestBuilder}; -use crate::{ - event::{Event, Finalizable}, - sinks::pulsar::service::PulsarRequest, +use crate::sinks::{ + prelude::*, + pulsar::{encoder::PulsarEncoder, service::PulsarRequest, sink::PulsarEvent}, }; #[derive(Clone)] diff --git a/src/sinks/pulsar/service.rs b/src/sinks/pulsar/service.rs index bb61dcee92ed3..b04d2eb0d13e5 100644 --- a/src/sinks/pulsar/service.rs +++ b/src/sinks/pulsar/service.rs @@ -3,21 +3,13 @@ use std::sync::Arc; use std::task::{Context, Poll}; use bytes::Bytes; -use futures::future::BoxFuture; use pulsar::producer::Message; use pulsar::{Error as PulsarError, Executor, MultiTopicProducer, ProducerOptions, Pulsar}; use tokio::sync::Mutex; -use tower::Service; use vector_common::internal_event::CountByteSize; -use vector_core::stream::DriverResponse; -use crate::event::{EventFinalizers, EventStatus, Finalizable}; use crate::internal_events::PulsarSendingError; -use crate::sinks::pulsar::request_builder::PulsarMetadata; -use vector_common::{ - json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, -}; +use crate::sinks::{prelude::*, pulsar::request_builder::PulsarMetadata}; #[derive(Clone)] pub(super) struct PulsarRequest { diff --git a/src/sinks/pulsar/sink.rs b/src/sinks/pulsar/sink.rs index c8ab0bcae256c..8644aa561dc58 100644 --- a/src/sinks/pulsar/sink.rs +++ b/src/sinks/pulsar/sink.rs @@ -1,24 +1,11 @@ use async_trait::async_trait; use bytes::Bytes; -use futures::{stream::BoxStream, StreamExt}; use pulsar::{Error as PulsarError, Pulsar, TokioExecutor}; use serde::Serialize; use snafu::Snafu; use std::collections::HashMap; -use tower::ServiceBuilder; -use crate::{ - codecs::{Encoder, Transformer}, - event::Event, - sinks::util::SinkBuilderExt, - template::Template, -}; -use vector_buffers::EventCount; -use vector_common::{byte_size_of::ByteSizeOf, json_size::JsonSize}; -use vector_core::{ - event::{EstimatedJsonEncodedSizeOf, LogEvent}, - sink::StreamSink, -}; +use crate::sinks::prelude::*; use super::{ config::PulsarSinkConfig, encoder::PulsarEncoder, request_builder::PulsarRequestBuilder, diff --git a/src/sinks/util/encoding.rs b/src/sinks/util/encoding.rs index c2705f5deff5a..00dc6944bdbad 100644 --- a/src/sinks/util/encoding.rs +++ b/src/sinks/util/encoding.rs @@ -78,7 +78,7 @@ impl Encoder for (Transformer, crate::codecs::Encoder<()>) { /// * `writer` - The object implementing io::Write to write data to. /// * `n_events_pending` - The number of events that are dropped if this write fails. /// * `buf` - The buffer to write. -pub(crate) fn write_all( +pub fn write_all( writer: &mut dyn io::Write, n_events_pending: usize, buf: &[u8], From 6b34868e285a4608914405b7701ae1ee82deb536 Mon Sep 17 00:00:00 2001 From: neuronull Date: Mon, 5 Jun 2023 11:11:04 -0600 Subject: [PATCH 093/236] enhancement(dev): move blocked/waiting gardener issues to triage on comment (#17588) Reverts https://github.com/vectordotdev/vector/pull/16950 --- .github/workflows/gardener_issue_comment.yml | 89 ++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 .github/workflows/gardener_issue_comment.yml diff --git a/.github/workflows/gardener_issue_comment.yml b/.github/workflows/gardener_issue_comment.yml new file mode 100644 index 0000000000000..355482633d445 --- /dev/null +++ b/.github/workflows/gardener_issue_comment.yml @@ -0,0 +1,89 @@ +# Gardener Issue Comment +# +# This workflow moves GH issues from the Gardener board's "Blocked / Waiting" column +# to the "Triage", so that the Gardener can assess the issue in light of new information. + +name: Gardener Issue Comment + +on: + issue_comment: + types: [created] + +jobs: + move-to-backlog: + name: Move issues back to Gardener project board Triage + runs-on: ubuntu-latest + if: contains(github.event.issue.url, 'issues') + steps: + - name: Move issue back to Triage if status is Blocked/Waiting + env: + GH_TOKEN: ${{ secrets.GH_PAT_PROJECTS }} + run: | + issue_id=${{ github.event.issue.node_id }} + + # IDs fetched from https://docs.github.com/en/graphql/overview/explorer + project_id="PVT_kwDOAQFeYs4AAsTr" # Gardener + status_field_id="PVTF_lADOAQFeYs4AAsTrzgAXRuU" # Status + triage_option_id="f75ad846" + + # ensures that the issue is already on board but also seems to be the only way to fetch + # the item id + item_id="$(gh api graphql -f query=' + mutation($project_id: ID!, $content_id: ID!) { + addProjectV2ItemById(input: {projectId: $project_id, contentId: $content_id}) { + item { + id + } + } + }' -f project_id="$project_id" -f content_id="$issue_id" -q '.data.addProjectV2ItemById.item.id' + )" + + echo "item_id: $item_id" + + if [ -z "$item_id" ] ; then + echo "Issue not found in Gardener board" + exit 0 + else + echo "Found issue on Gardener board" + fi + + current_status="$(gh api graphql -f query=' + query($item_id: ID!) { + node(id: $item_id) { + ... on ProjectV2Item { + fieldValueByName(name: "Status") { + ... on ProjectV2ItemFieldSingleSelectValue { + name + } + } + } + } + }' -f item_id="$item_id" + )" + + current_status=$(echo $current_status | jq -c -r '.["data"]["node"]["fieldValueByName"]["name"]') + + echo "Current issue status is: '${current_status}'" + + if [ "$current_status" = "Blocked / Waiting" ] ; then + echo "Moving issue from 'Blocked / Waiting' to 'Triage'" + gh api graphql -f query=' + mutation($project_id: ID!, $item_id: ID!, $field_id: ID!, $option_id: String) { + updateProjectV2ItemFieldValue( + input: { + projectId: $project_id + itemId: $item_id + fieldId: $field_id + value: { + singleSelectOptionId: $option_id + } + } + ) { + projectV2Item { + id + } + } + }' -f project_id="$project_id" -f item_id="$item_id" -f field_id="$status_field_id" -f option_id="$triage_option_id" + else + echo "Issue is in '${current_status}', not moving." + fi From dc6bef2a2e6c47e145c776b4fd91042b112a0890 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 17:23:59 +0000 Subject: [PATCH 094/236] chore(deps): Bump once_cell from 1.17.2 to 1.18.0 (#17596) Bumps [once_cell](https://github.com/matklad/once_cell) from 1.17.2 to 1.18.0.
Changelog

Sourced from once_cell's changelog.

1.18.0

  • MSRV is updated to 1.60.0 to take advantage of dep: syntax for cargo features, removing "implementation details" from publicly visible surface.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=once_cell&package-manager=cargo&previous-version=1.17.2&new-version=1.18.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- lib/codecs/Cargo.toml | 2 +- lib/vector-buffers/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- vdev/Cargo.toml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c30cf2e39e80f..5a8a164c4cded 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5492,9 +5492,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.2" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "onig" diff --git a/Cargo.toml b/Cargo.toml index aed442756ae8b..39029b16b7008 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -280,7 +280,7 @@ nats = { version = "0.24.0", default-features = false, optional = true } nkeys = { version = "0.3.0", default-features = false, optional = true } nom = { version = "7.1.3", default-features = false, optional = true } notify = { version = "6.0.0", default-features = false, features = ["macos_fsevent"] } -once_cell = { version = "1.17", default-features = false } +once_cell = { version = "1.18", default-features = false } openssl = { version = "0.10.54", default-features = false, features = ["vendored"] } openssl-probe = { version = "0.1.5", default-features = false } ordered-float = { version = "3.7.0", default-features = false } diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 16463c94e33a1..4bc98d62ff0b2 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -14,7 +14,7 @@ derivative = { version = "2", default-features = false } dyn-clone = { version = "1", default-features = false } lookup = { package = "vector-lookup", path = "../vector-lookup", default-features = false } memchr = { version = "2", default-features = false } -once_cell = { version = "1.17", default-features = false } +once_cell = { version = "1.18", default-features = false } ordered-float = { version = "3.7.0", default-features = false } prost = { version = "0.11.8", default-features = false, features = ["std"] } regex = { version = "1.8.3", default-features = false, features = ["std", "perf"] } diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index 28f767129c9a1..30fd200f326f7 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -38,7 +38,7 @@ crossbeam-queue = "0.3.8" hdrhistogram = "7.5.2" metrics-tracing-context = { version = "0.14.0", default-features = false } metrics-util = { version = "0.15.0", default-features = false, features = ["debugging"] } -once_cell = "1.17" +once_cell = "1.18" proptest = "1.2" quickcheck = "1.0" rand = "0.8.5" diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 9b627141968e0..456d93ac4b8e4 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -29,7 +29,7 @@ metrics-tracing-context = { version = "0.14.0", default-features = false } metrics-util = { version = "0.15.0", default-features = false, features = ["registry"] } mlua = { version = "0.8.9", default-features = false, features = ["lua54", "send", "vendored"], optional = true } no-proxy = { version = "0.3.2", default-features = false, features = ["serialize"] } -once_cell = { version = "1.17", default-features = false } +once_cell = { version = "1.18", default-features = false } ordered-float = { version = "3.7.0", default-features = false } openssl = { version = "0.10.54", default-features = false, features = ["vendored"] } parking_lot = { version = "0.12.1", default-features = false } diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 29e315b426872..a8025605c2a15 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -25,7 +25,7 @@ hex = "0.4.3" indicatif = { version = "0.17.5", features = ["improved_unicode"] } itertools = "0.10.5" log = "0.4.18" -once_cell = "1.17" +once_cell = "1.18" os_info = { version = "3.7.0", default-features = false } # watch https://github.com/epage/anstyle for official interop with Clap owo-colors = { version = "3.5.0", features = ["supports-colors"] } From 8e042590117989394f8bc246dc6d7de61d00123a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 17:24:54 +0000 Subject: [PATCH 095/236] chore(deps): Bump percent-encoding from 2.2.0 to 2.3.0 (#17602) Bumps [percent-encoding](https://github.com/servo/rust-url) from 2.2.0 to 2.3.0.
Commits
  • 35abc32 Merge pull request #791 from servo/ver_2.3.0
  • 25266fc Bump url version to 2.3.0
  • 587e962 Merge pull request #790 from crowlKats/remove_idna
  • eaa23e5 remove IDNA feature
  • a3df365 feat(url): add quirks::internal_components (#788)
  • 868719d Merge pull request #734 from andrewbanchich/fix-set-host
  • eb3f20d Fix setting file hosts to None
  • 6e5df8f perf(idna): fast-path simple/ascii domains (#761)
  • 1d307ae Merge pull request #787 from jiftoo/query-pairs-patch
  • 4c5e4ee fix unclosed doctest
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=percent-encoding&package-manager=cargo&previous-version=2.2.0&new-version=2.3.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5a8a164c4cded..e31cfb6552069 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5809,9 +5809,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" diff --git a/Cargo.toml b/Cargo.toml index 39029b16b7008..246c963bca96e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -285,7 +285,7 @@ openssl = { version = "0.10.54", default-features = false, features = ["vendored openssl-probe = { version = "0.1.5", default-features = false } ordered-float = { version = "3.7.0", default-features = false } paste = "1.0.12" -percent-encoding = { version = "2.2.0", default-features = false } +percent-encoding = { version = "2.3.0", default-features = false } pin-project = { version = "1.1.0", default-features = false } postgres-openssl = { version = "0.5.0", default-features = false, features = ["runtime"], optional = true } pulsar = { version = "6.0.0", default-features = false, features = ["tokio-runtime", "auth-oauth2", "flate2", "lz4", "snap", "zstd"], optional = true } From 7a55210ed814e0c47618905a299eba0d896a0646 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 17:50:36 +0000 Subject: [PATCH 096/236] chore(deps): Bump cached from 0.43.0 to 0.44.0 (#17599) Bumps [cached](https://github.com/jaemk/cached) from 0.43.0 to 0.44.0.
Changelog

Sourced from cached's changelog.

[0.44.0] / [cached_proc_macro[0.17.0]]

Added

  • Option to enable redis multiplex-connection manager on AsyncRedisCache

Changed

  • Show proc-macro documentation on docs.rs

  • Document needed feature flags

  • Hide implementation details in documentation

  • Relax Cached trait's cache_get, cache_get_mut and cache_remove key parameter. Allow K: Borrow<Q> like std::collections::HashMap and friends. Avoids copies particularly on Cached<String, _> where now you can do cache.cache_get("key") and before you had to cache.cache_get("key".to_string()).

    Note: This is a minor breaking change for anyone manually implementing the Cached trait. The signatures of cache_get, cache_get_mut, and cache_remove must be updated to include the additional trait bound on the key type:

     fn cache_get<Q>(&mut self, key:
    &Q) -> Option<&V>
      where
          K: std::borrow::Borrow<Q>,
          Q: std::hash::Hash + Eq + ?Sized,
      {
    

Removed

  • Dependency to lazy_static and async_once are removed.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=cached&package-manager=cargo&previous-version=0.43.0&new-version=0.44.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 16 ++++------------ vdev/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e31cfb6552069..df2562bc789a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -616,12 +616,6 @@ dependencies = [ "syn 2.0.10", ] -[[package]] -name = "async_once" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ce4f10ea3abcd6617873bae9f91d1c5332b4a778bd9ce34d0cd517474c1de82" - [[package]] name = "atomic-waker" version = "1.0.0" @@ -1731,18 +1725,16 @@ checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" [[package]] name = "cached" -version = "0.43.0" +version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc2fafddf188d13788e7099295a59b99e99b2148ab2195cae454e754cc099925" +checksum = "b195e4fbc4b6862bbd065b991a34750399c119797efff72492f28a5864de8700" dependencies = [ "async-trait", - "async_once", "cached_proc_macro", "cached_proc_macro_types", "futures 0.3.28", "hashbrown 0.13.2", "instant", - "lazy_static", "once_cell", "thiserror", "tokio", @@ -1750,9 +1742,9 @@ dependencies = [ [[package]] name = "cached_proc_macro" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10ca87c81aaa3a949dbbe2b5e6c2c45dbc94ba4897e45ea31ff9ec5087be3dc" +checksum = "b48814962d2fd604c50d2b9433c2a41a0ab567779ee2c02f7fba6eca1221f082" dependencies = [ "cached_proc_macro_types", "darling 0.14.2", diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index a8025605c2a15..6f1e16d848ef5 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -10,7 +10,7 @@ publish = false [dependencies] anyhow = "1.0.71" atty = "0.2.14" -cached = "0.43.0" +cached = "0.44.0" chrono = { version = "0.4.22", default-features = false, features = ["serde", "clock"] } clap = { version = "4.1.14", features = ["derive"] } clap-verbosity-flag = "2.0.1" From 657758db74496ec9adede09fc8f132bd8bed3bc3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 18:54:46 +0000 Subject: [PATCH 097/236] chore(deps): Bump regex from 1.8.3 to 1.8.4 (#17601) Bumps [regex](https://github.com/rust-lang/regex) from 1.8.3 to 1.8.4.
Changelog

Sourced from regex's changelog.

1.8.4 (2023-06-05)

This is a patch release that fixes a bug where (?-u:\B) was allowed in Unicode regexes, despite the fact that the current matching engines can report match offsets between the code units of a single UTF-8 encoded codepoint. That in turn means that match offsets that split a codepoint could be reported, which in turn results in panicking when one uses them to slice a &str.

This bug occurred in the transition to regex 1.8 because the underlying syntactical error that prevented this regex from compiling was intentionally removed. That's because (?-u:\B) will be permitted in Unicode regexes in regex 1.9, but the matching engines will guarantee to never report match offsets that split a codepoint. When the underlying syntactical error was removed, no code was added to ensure that (?-u:\B) didn't compile in the regex 1.8 transition release. This release, regex 1.8.4, adds that code such that Regex::new(r"(?-u:\B)") returns to the regex <1.8 behavior of not compiling. (A bytes::Regex can still of course compile it.)

Bug fixes:

  • [BUG #1006](rust-lang/regex#1006): Fix a bug where (?-u:\B) was allowed in Unicode regexes, and in turn could lead to match offsets that split a codepoint in &str.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=regex&package-manager=cargo&previous-version=1.8.3&new-version=1.8.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- lib/codecs/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- vdev/Cargo.toml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df2562bc789a1..ae649b5ceba0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6705,9 +6705,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.8.3" +version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ca098a9821bd52d6b24fd8b10bd081f47d39c22778cafaa75a2857a62c6390" +checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f" dependencies = [ "aho-corasick 1.0.1", "memchr", diff --git a/Cargo.toml b/Cargo.toml index 246c963bca96e..0082226603050 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,7 +293,7 @@ rand = { version = "0.8.5", default-features = false, features = ["small_rng"] } rand_distr = { version = "0.4.3", default-features = false } rdkafka = { version = "0.31.0", default-features = false, features = ["tokio", "libz", "ssl", "zstd"], optional = true } redis = { version = "0.23.0", default-features = false, features = ["connection-manager", "tokio-comp", "tokio-native-tls-comp"], optional = true } -regex = { version = "1.8.3", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.4", default-features = false, features = ["std", "perf"] } roaring = { version = "0.10.1", default-features = false, optional = true } seahash = { version = "4.1.0", default-features = false } semver = { version = "1.0.17", default-features = false, features = ["serde", "std"], optional = true } diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 4bc98d62ff0b2..93aa9b6e2980f 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -17,7 +17,7 @@ memchr = { version = "2", default-features = false } once_cell = { version = "1.18", default-features = false } ordered-float = { version = "3.7.0", default-features = false } prost = { version = "0.11.8", default-features = false, features = ["std"] } -regex = { version = "1.8.3", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.4", default-features = false, features = ["std", "perf"] } serde = { version = "1", default-features = false, features = ["derive"] } serde_json = { version = "1", default-features = false } smallvec = { version = "1", default-features = false, features = ["union"] } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 456d93ac4b8e4..a4c5d089146ac 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -38,7 +38,7 @@ proptest = { version = "1.2", optional = true } prost-types = { version = "0.11", default-features = false } prost = { version = "0.11", default-features = false, features = ["std"] } quanta = { version = "0.11.1", default-features = false } -regex = { version = "1.8.3", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.4", default-features = false, features = ["std", "perf"] } ryu = { version = "1", default-features = false } serde = { version = "1.0.163", default-features = false, features = ["derive", "rc"] } serde_json = { version = "1.0.96", default-features = false } diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 6f1e16d848ef5..5d8730c83ffdf 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -30,7 +30,7 @@ os_info = { version = "3.7.0", default-features = false } # watch https://github.com/epage/anstyle for official interop with Clap owo-colors = { version = "3.5.0", features = ["supports-colors"] } paste = "1.0.12" -regex = { version = "1.8.3", default-features = false, features = ["std", "perf"] } +regex = { version = "1.8.4", default-features = false, features = ["std", "perf"] } reqwest = { version = "0.11", features = ["json", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.96" From 9395eba89ed10488914ac042aabba068356bb84b Mon Sep 17 00:00:00 2001 From: neuronull Date: Tue, 6 Jun 2023 06:59:56 -0600 Subject: [PATCH 098/236] fix(ci): use correct secret for gardener board comment (#17605) --- .github/workflows/gardener_issue_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gardener_issue_comment.yml b/.github/workflows/gardener_issue_comment.yml index 355482633d445..795de5c3a0517 100644 --- a/.github/workflows/gardener_issue_comment.yml +++ b/.github/workflows/gardener_issue_comment.yml @@ -17,7 +17,7 @@ jobs: steps: - name: Move issue back to Triage if status is Blocked/Waiting env: - GH_TOKEN: ${{ secrets.GH_PAT_PROJECTS }} + GH_TOKEN: ${{ secrets.GH_PROJECT_PAT }} run: | issue_id=${{ github.event.issue.node_id }} From baa04e59d9b234c4e71f8545a6ad8fdb2517f805 Mon Sep 17 00:00:00 2001 From: neuronull Date: Tue, 6 Jun 2023 07:05:53 -0600 Subject: [PATCH 099/236] fix(ci): checkout a greater depth in regression workflow (#17604) - Forces a greater depth when checking out the PR branch. - This can later be improved to try a smaller depth, and keep increasing that until the base sha is found. --- .github/workflows/regression.yml | 51 +++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 4e69413384302..e2f8f43e31ae0 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -129,12 +129,31 @@ jobs: - uses: actions/checkout@v3 - - name: Checkout PR branch (issue_comment) + - name: Checkout PR (issue_comment) if: github.event_name == 'issue_comment' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: gh pr checkout ${{ github.event.issue.number }} + - name: Get PR branch name (issue_comment) + id: get-pr-branch-name + if: github.event_name == 'issue_comment' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + export BRANCH=$(git branch --show-current) + echo "BRANCH=${BRANCH}" + echo "BRANCH=${BRANCH}" >> $GITHUB_OUTPUT + + - name: Checkout PR branch (issue_comment) + if: github.event_name == 'issue_comment' + uses: actions/checkout@v3 + with: + # TODO: this can be done more elegantly in a follow-up by using a depth value and + # increasing it until the merge-base is found. + fetch-depth: 500 + ref: "${{ steps.get-pr-branch-name.outputs.BRANCH }}" + # If triggered by issue comment, the event payload doesn't directly contain the head and base sha from the PR. # But, we can retrieve this info from some commands. - name: Get PR metadata (issue_comment) @@ -166,6 +185,21 @@ jobs: echo "comparison sha is: ${COMPARISON_SHA}" echo "comparison tag is: ${COMPARISON_TAG}" + if [ "${BASELINE_SHA}" = "" ] ; then + echo "BASELINE_SHA not found, exiting." + exit 1 + fi + + if [ "${COMPARISON_SHA}" = "" ] ; then + echo "COMPARISON_SHA not found, exiting." + exit 1 + fi + + if [ "${PR_NUMBER}" = "" ] ; then + echo "PR_NUMBER not found, exiting." + exit 1 + fi + # If triggered by merge queue, the PR number is not available in the payload. While we restrict the number of PRs in the # queue to 1, we can get the PR number by parsing the merge queue temp branch's ref. - name: Get PR metadata (merge queue) @@ -195,6 +229,21 @@ jobs: echo "comparison sha is: ${COMPARISON_SHA}" echo "comparison tag is: ${COMPARISON_TAG}" + if [ "${BASELINE_SHA}" = "" ] ; then + echo "BASELINE_SHA not found, exiting." + exit 1 + fi + + if [ "${COMPARISON_SHA}" = "" ] ; then + echo "COMPARISON_SHA not found, exiting." + exit 1 + fi + + if [ "${PR_NUMBER}" = "" ] ; then + echo "PR_NUMBER not found, exiting." + exit 1 + fi + - name: Setup experimental metadata id: experimental-meta run: | From 154e39382f4e80998814a693f9d6bb5c89ebebf7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 13:10:22 +0000 Subject: [PATCH 100/236] chore(deps): Bump hashbrown from 0.13.2 to 0.14.0 (#17609) Bumps [hashbrown](https://github.com/rust-lang/hashbrown) from 0.13.2 to 0.14.0.
Changelog

Sourced from hashbrown's changelog.

[v0.14.0] - 2023-06-01

Added

  • Support for allocator-api2 crate for interfacing with custom allocators on stable. (#417)
  • Optimized implementation for ARM using NEON instructions. (#430)
  • Support for rkyv serialization. (#432)
  • Equivalent trait to look up values without Borrow. (#345)
  • Hash{Map,Set}::raw_table_mut is added whic returns a mutable reference. (#404)
  • Fast path for clear on empty tables. (#428)

Changed

  • Optimized insertion to only perform a single lookup. (#277)
  • DrainFilter has been renamed to ExtractIf and no longer drops remaining elements when the iterator is dropped. #(374)
  • Bumped MSRV to 1.64.0. (#431)
  • {Map,Set}::raw_table now returns an immutable reference. (#404)
  • VacantEntry and OccupiedEntry now use the default hasher if none is specified in generics. (#389)
  • RawTable::data_start now returns a NonNull to match RawTable::data_end. (#387)
  • RawIter::{reflect_insert, reflect_remove} are now unsafe. (#429)
  • RawTable::find_potential is renamed to find_or_find_insert_slot and returns an InsertSlot. (#429)
  • RawTable::remove now also returns an InsertSlot. (#429)
  • InsertSlot can be used to insert an element with RawTable::insert_in_slot. (#429)
  • RawIterHash no longer has a lifetime tied to that of the RawTable. (#427)
  • The trait bounds of HashSet::raw_table have been relaxed to not require Eq + Hash. (#423)
  • EntryRef::and_replace_entry_with and OccupiedEntryRef::replace_entry_with were changed to give a &K instead of a &Q to the closure.

Removed

  • Support for bumpalo as an allocator with custom wrapper. Use allocator-api2 feature in bumpalo to use it as an allocator for hashbrown collections. (#417)
Commits
  • 3056ee9 Auto merge of #434 - Amanieu:release-0.14.0, r=Amanieu
  • 32b125e Update CHANGELOG.md
  • c5e0388 Prepare release of v0.14.0
  • 3784c2f Auto merge of #433 - Amanieu:internal_cleanups, r=Amanieu
  • 9f20bd0 Replace intrinsics::cttz_nonzero with NonZero::trailing_zeros
  • d677fd4 Remove backup implementation of likely/unlikely that didn't work
  • 4d8c059 Enable bumpalo/allocator-api2 in dev-dependencies for doc-tests
  • bceae1e Remove redundant make_insert_hash internal function
  • f552bdb Auto merge of #432 - Amanieu:rkyv, r=Amanieu
  • 33afe8f Auto merge of #431 - Amanieu:msrv-1.64, r=Amanieu
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=hashbrown&package-manager=cargo&previous-version=0.13.2&new-version=0.14.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 13 +++++++++++-- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae649b5ceba0a..4c5b522aaa8e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1561,7 +1561,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "822462c1e7b17b31961798a6874b36daea6818e99e0cb7d3b7b0fa3c477751c3" dependencies = [ "borsh-derive", - "hashbrown 0.13.2", + "hashbrown 0.12.3", ] [[package]] @@ -3606,6 +3606,15 @@ dependencies = [ "ahash 0.8.2", ] +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +dependencies = [ + "ahash 0.8.2", +] + [[package]] name = "hashlink" version = "0.8.2" @@ -9154,7 +9163,7 @@ dependencies = [ "grok", "h2", "hash_hasher", - "hashbrown 0.13.2", + "hashbrown 0.14.0", "headers", "heim", "hex", diff --git a/Cargo.toml b/Cargo.toml index 0082226603050..d98b848387200 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -256,7 +256,7 @@ governor = { version = "0.5.1", default-features = false, features = ["dashmap", grok = { version = "2.0.0", default-features = false, optional = true } h2 = { version = "0.3.19", default-features = false, optional = true } hash_hasher = { version = "2.0.0", default-features = false } -hashbrown = { version = "0.13.2", default-features = false, optional = true, features = ["ahash"] } +hashbrown = { version = "0.14.0", default-features = false, optional = true, features = ["ahash"] } headers = { version = "0.3.8", default-features = false } hostname = { version = "0.3.1", default-features = false } http = { version = "0.2.9", default-features = false } From d956092efdcc4ccea718365d9e9ef7bd537563a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 13:11:46 +0000 Subject: [PATCH 101/236] chore(deps): Bump url from 2.3.1 to 2.4.0 (#17608) Bumps [url](https://github.com/servo/rust-url) from 2.3.1 to 2.4.0.
Commits
  • a3e07c7 Merge pull request #840 from servo/update-ver
  • 1317d9d update dependencies
  • a25f3a8 Update url to v2.4.0
  • 2a12745 Update idna to 0.4.0
  • 1e6fd5d Update form_urlencoded to 1.2.0
  • 90833ff Update percent-encoding to 2.3.0
  • f5b961c Update data-url to 0.3.0
  • 0e25146 Merge pull request #839 from servo/fix-838
  • 21f32d6 Fix lint
  • df88a29 Also fix issue where path segment could be confused with drive letter because...
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=url&package-manager=cargo&previous-version=2.3.1&new-version=2.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 2 +- lib/vector-api-client/Cargo.toml | 2 +- lib/vector-config/Cargo.toml | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4c5b522aaa8e7..303238ac1d0c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3233,9 +3233,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -4079,9 +4079,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -8921,9 +8921,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.8" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" @@ -8998,12 +8998,12 @@ dependencies = [ [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.3.0", + "idna 0.4.0", "percent-encoding", "serde", ] diff --git a/Cargo.toml b/Cargo.toml index d98b848387200..50b1f8cdfee4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -311,7 +311,7 @@ toml = { version = "0.7.4", default-features = false, features = ["parse", "disp tonic = { version = "0.9", optional = true, default-features = false, features = ["transport", "codegen", "prost", "tls", "tls-roots", "gzip"] } trust-dns-proto = { version = "0.22.0", default-features = false, features = ["dnssec"], optional = true } typetag = { version = "0.2.8", default-features = false } -url = { version = "2.3.1", default-features = false, features = ["serde"] } +url = { version = "2.4.0", default-features = false, features = ["serde"] } uuid = { version = "1", default-features = false, features = ["serde", "v4"] } warp = { version = "0.3.5", default-features = false } zstd = { version = "0.12.3", default-features = false } diff --git a/lib/vector-api-client/Cargo.toml b/lib/vector-api-client/Cargo.toml index ec2763cd1e49a..ad455736c62e6 100644 --- a/lib/vector-api-client/Cargo.toml +++ b/lib/vector-api-client/Cargo.toml @@ -31,6 +31,6 @@ tokio-tungstenite = { version = "0.19.0", default-features = false, features = [ # External libs chrono = { version = "0.4.6", default-features = false, features = ["serde"] } clap = { version = "4.1.14", default-features = false, features = ["derive"] } -url = { version = "2.3.1", default-features = false } +url = { version = "2.4.0", default-features = false } uuid = { version = "1", default-features = false, features = ["serde", "v4"] } indoc = { version = "2.0.1", default-features = false } diff --git a/lib/vector-config/Cargo.toml b/lib/vector-config/Cargo.toml index 67f99178aa472..bae87581cc42f 100644 --- a/lib/vector-config/Cargo.toml +++ b/lib/vector-config/Cargo.toml @@ -25,7 +25,7 @@ serde_with = { version = "2.3.2", default-features = false, features = ["std"] } snafu = { version = "0.7.4", default-features = false } toml = { version = "0.7.4", default-features = false } tracing = { version = "0.1.34", default-features = false } -url = { version = "2.3.1", default-features = false, features = ["serde"] } +url = { version = "2.4.0", default-features = false, features = ["serde"] } vrl = { version = "0.4.0", default-features = false, features = ["compiler"] } vector-config-common = { path = "../vector-config-common" } vector-config-macros = { path = "../vector-config-macros" } From a9324892a289e94214707f1e09ea2931ae27d5e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 13:58:40 +0000 Subject: [PATCH 102/236] chore(deps): Bump xml-rs from 0.8.4 to 0.8.14 (#17607) Bumps [xml-rs](https://github.com/kornelski/xml-rs) from 0.8.4 to 0.8.14.
Changelog

Sourced from xml-rs's changelog.

Version 0.8.7

  • Basic parsing of DTD internal subset
  • Speed improvements

Version 0.8.6

  • Fixed parsing of incorrectly nested comments and processing instructions

Version 0.8.5

  • Updated source code to edition 2018 and fixed/updated some Rust idioms.
Commits
  • 2776563 chore: Release xml-rs version 0.8.14
  • ca962a9 Explain next_pos wonkiness
  • 6c13445 Balance pos pushes when ignoring comments
  • 129ab96 Merge pull request #226 from 00xc/unreachable_token
  • c09549a Avoid panic when displaying unexpected token error
  • 563f975 Fix change to public fields
  • 9179e79 Bump
  • 3562bff Require spaces between attributes
  • 1eefdc9 Require spaces between prolog attrs
  • 535914e Forbid whitespace before XML prolog
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=xml-rs&package-manager=cargo&previous-version=0.8.4&new-version=0.8.14)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/vectordotdev/vector/network/alerts).
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: neuronull --- Cargo.lock | 4 ++-- LICENSE-3rdparty.csv | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 303238ac1d0c2..67ebb872565c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10203,9 +10203,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.4" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" +checksum = "52839dc911083a8ef63efa4d039d1f58b5e409f923e44c80828f206f66e5541c" [[package]] name = "xmlparser" diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 5680b71f16e37..2af031a4db1ea 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -610,7 +610,7 @@ winnow,https://github.com/winnow-rs/winnow,MIT,The winnow Authors winreg,https://github.com/gentoo90/winreg-rs,MIT,Igor Shaula woothee,https://github.com/woothee/woothee-rust,Apache-2.0,hhatto wyz,https://github.com/myrrlyn/wyz,MIT,myrrlyn -xml-rs,https://github.com/netvl/xml-rs,MIT,Vladimir Matveev +xml-rs,https://github.com/kornelski/xml-rs,MIT,Vladimir Matveev xmlparser,https://github.com/RazrFalcon/xmlparser,MIT OR Apache-2.0,Evgeniy Reizner yaml-rust,https://github.com/chyh1990/yaml-rust,MIT OR Apache-2.0,Yuheng Chen zerocopy,https://fuchsia.googlesource.com/fuchsia/+/HEAD/src/lib/zerocopy,BSD-2-Clause,Joshua Liebow-Feeser From b5bd85f87e39389a2ea3bb9a3d588fcbdfd0e29d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 18:04:28 +0000 Subject: [PATCH 103/236] chore(deps): Bump opendal from 0.36.0 to 0.37.0 (#17614) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [opendal](https://github.com/apache/incubator-opendal) from 0.36.0 to 0.37.0.
Release notes

Sourced from opendal's releases.

v0.37.0

Upgrade to v0.37

In v0.37.0, OpenDAL bump the version of reqsign to v0.13.0.

There are no public API and raw API changes.


What's Changed

Added

Changed

Fixed

Docs

CI

Chore

... (truncated)

Changelog

Sourced from opendal's changelog.

[v0.37.0] - 2023-06-06

Added

Changed

Fixed

Docs

CI

Chore

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=opendal&package-manager=cargo&previous-version=0.36.0&new-version=0.37.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 67ebb872565c9..13643e87d10f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1561,7 +1561,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "822462c1e7b17b31961798a6874b36daea6818e99e0cb7d3b7b0fa3c477751c3" dependencies = [ "borsh-derive", - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -5533,9 +5533,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "opendal" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3555168d4cc9a83c332e1416ff00e3be36a6d78447dff472829962afbc91bb3d" +checksum = "6a37de9fe637d53550bf3f76d5c731f69cb6f9685ada6afd390ada98994a3f91" dependencies = [ "anyhow", "async-compat", diff --git a/Cargo.toml b/Cargo.toml index 50b1f8cdfee4c..a5c27c090cbb8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -181,7 +181,7 @@ azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = azure_storage_blobs = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, optional = true } # OpenDAL -opendal = {version = "0.36", default-features = false, features = ["native-tls", "services-webhdfs"], optional = true} +opendal = {version = "0.37", default-features = false, features = ["native-tls", "services-webhdfs"], optional = true} # Tower tower = { version = "0.4.13", default-features = false, features = ["buffer", "limit", "retry", "timeout", "util", "balance", "discover"] } From bd880f55d2d8605733297acb4f96a8100a60dad4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 18:22:12 +0000 Subject: [PATCH 104/236] chore(deps): Bump getrandom from 0.2.9 to 0.2.10 (#17613) Bumps [getrandom](https://github.com/rust-random/getrandom) from 0.2.9 to 0.2.10.
Changelog

Sourced from getrandom's changelog.

[0.2.10] - 2023-06-06

Added

  • Support for PS Vita (armv7-sony-vita-newlibeabihf) #359

Changed

  • Use getentropy from libc on Emscripten targets #362

#359: rust-random/getrandom#359 #362: rust-random/getrandom#362

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=getrandom&package-manager=cargo&previous-version=0.2.9&new-version=0.2.10)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13643e87d10f1..41fe1fd431085 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -47,7 +47,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -59,7 +59,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107" dependencies = [ "cfg-if", - "getrandom 0.2.9", + "getrandom 0.2.10", "once_cell", "version_check", ] @@ -1253,7 +1253,7 @@ dependencies = [ "bytes 1.4.0", "dyn-clone", "futures 0.3.28", - "getrandom 0.2.9", + "getrandom 0.2.10", "http-types", "log", "paste", @@ -1341,7 +1341,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "instant", "rand 0.8.5", ] @@ -3412,9 +3412,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "js-sys", @@ -5217,7 +5217,7 @@ dependencies = [ "byteorder", "data-encoding", "ed25519-dalek", - "getrandom 0.2.9", + "getrandom 0.2.10", "log", "rand 0.8.5", "signatory", @@ -5232,7 +5232,7 @@ dependencies = [ "byteorder", "data-encoding", "ed25519-dalek", - "getrandom 0.2.9", + "getrandom 0.2.10", "log", "rand 0.8.5", "signatory", @@ -5461,7 +5461,7 @@ checksum = "eeaf26a72311c087f8c5ba617c96fac67a5c04f430e716ac8d8ab2de62e23368" dependencies = [ "base64 0.13.1", "chrono", - "getrandom 0.2.9", + "getrandom 0.2.10", "http", "rand 0.8.5", "reqwest", @@ -6537,7 +6537,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", ] [[package]] @@ -6707,7 +6707,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "redox_syscall 0.2.16", "thiserror", ] @@ -9038,7 +9038,7 @@ version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" dependencies = [ - "getrandom 0.2.9", + "getrandom 0.2.10", "rand 0.8.5", "serde", ] @@ -9574,7 +9574,7 @@ name = "vector-vrl-web-playground" version = "0.1.0" dependencies = [ "enrichment", - "getrandom 0.2.9", + "getrandom 0.2.10", "gloo-utils", "serde", "serde-wasm-bindgen", @@ -9623,7 +9623,7 @@ dependencies = [ "dyn-clone", "exitcode", "flate2", - "getrandom 0.2.9", + "getrandom 0.2.10", "grok", "hex", "hmac", From b400acced6bd61d5927ab75bb82643b5927c0cbd Mon Sep 17 00:00:00 2001 From: neuronull Date: Tue, 6 Jun 2023 12:34:00 -0600 Subject: [PATCH 105/236] fix(docs): fix copy-paste issue in component spec (#17616) --- docs/specs/component.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/specs/component.md b/docs/specs/component.md index 8a8856995dea5..20e43b4bf5ec3 100644 --- a/docs/specs/component.md +++ b/docs/specs/component.md @@ -158,7 +158,7 @@ the reception of Vector events from an upstream component. #### ComponentBytesSent -*Sinks* MUST emit a `ComponentBytesReceived` event that represent the transmission of bytes. +*Sinks* MUST emit a `ComponentBytesSent` event that represent the transmission of bytes. - Emission - MUST emit a `ComponentBytesSent` event immediately after sending bytes to the downstream target, From c55c9ecbf904d9166c88af65a9a3f76f18289f58 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Jun 2023 05:45:36 -0600 Subject: [PATCH 106/236] chore(deps): Bump tempfile from 3.5.0 to 3.6.0 (#17617) Bumps [tempfile](https://github.com/Stebalien/tempfile) from 3.5.0 to 3.6.0.
Changelog

Sourced from tempfile's changelog.

3.6.0

  • Update windows-sys to 0.48.
  • Update rustix min version to 0.37.11
  • Forward some NamedTempFile and SpooledTempFile methods to the underlying File object for better performance (especially vectorized writes, etc.).
  • Implement AsFd and AsHandle.
  • Misc documentation fixes and code cleanups.
Commits
  • 86b3136 chore: release 3.6.0
  • a2b45b3 fix(docs): Change dir and file variable names to match existing comments ...
  • f474e6a Remove comment on SetFileInformationByHandle (#236)
  • 141c773 feat: implement default methods for SpooledTempFile Read/Write (#232)
  • 3590dbf chore: modern rust (#231)
  • c76b783 feat: implement AsFd/AsHandle to mirror the AsRaw* variants (#230)
  • ae4f4c8 chore: bump rustix min version to 0.37.11 (#229)
  • 9488362 Forward default NamedTempFile methods (#226)
  • c41ee48 build(deps): update windows-sys requirement from 0.45 to 0.48 (#227)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tempfile&package-manager=cargo&previous-version=3.5.0&new-version=3.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 42 ++++++++++++++++++++------------------ Cargo.toml | 2 +- lib/file-source/Cargo.toml | 2 +- vdev/Cargo.toml | 2 +- 4 files changed, 25 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 41fe1fd431085..d9f416f55d96e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3015,13 +3015,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -4186,12 +4186,13 @@ checksum = "59ce5ef949d49ee85593fc4d3f3f95ad61657076395cbbce23e2121fc5542074" [[package]] name = "io-lifetimes" -version = "1.0.3" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ + "hermit-abi 0.3.1", "libc", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -4237,8 +4238,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi 0.3.1", - "io-lifetimes 1.0.3", - "rustix 0.37.5", + "io-lifetimes 1.0.11", + "rustix 0.37.19", "windows-sys 0.48.0", ] @@ -4650,9 +4651,9 @@ checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" [[package]] name = "linux-raw-sys" -version = "0.3.0" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd550e73688e6d578f0ac2119e32b797a327631a42f9433e59d02e139c8df60d" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "listenfd" @@ -6981,16 +6982,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.5" +version = "0.37.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e78cc525325c06b4a7ff02db283472f3c042b7ff0c391f96c6d5ac6f4f91b75" +checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" dependencies = [ "bitflags", - "errno 0.3.0", - "io-lifetimes 1.0.3", + "errno 0.3.1", + "io-lifetimes 1.0.11", "libc", - "linux-raw-sys 0.3.0", - "windows-sys 0.45.0", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", ] [[package]] @@ -8015,15 +8016,16 @@ checksum = "af547b166dd1ea4b472165569fc456cfb6818116f854690b0ff205e636523dab" [[package]] name = "tempfile" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg", "cfg-if", "fastrand", "redox_syscall 0.3.5", - "rustix 0.37.5", - "windows-sys 0.45.0", + "rustix 0.37.19", + "windows-sys 0.48.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index a5c27c090cbb8..af71d877e60cd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -351,7 +351,7 @@ similar-asserts = "1.4.2" proptest = "1.2" quickcheck = "1.0.3" reqwest = { version = "0.11", features = ["json"] } -tempfile = "3.5.0" +tempfile = "3.6.0" test-generator = "0.3.1" tokio-test = "0.4.2" tokio = { version = "1.28.2", features = ["test-util"] } diff --git a/lib/file-source/Cargo.toml b/lib/file-source/Cargo.toml index aa249a7e2c913..a9203b9f21acb 100644 --- a/lib/file-source/Cargo.toml +++ b/lib/file-source/Cargo.toml @@ -76,7 +76,7 @@ features = ["full"] [dev-dependencies] criterion = "0.5" quickcheck = "1" -tempfile = "3.5.0" +tempfile = "3.6.0" similar-asserts = "1.4.2" [[bench]] diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 5d8730c83ffdf..23ab8ab123fda 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -36,5 +36,5 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.96" serde_yaml = "0.9.21" sha2 = "0.10.6" -tempfile = "3.5.0" +tempfile = "3.6.0" toml = { version = "0.7.4", default-features = false, features = ["parse"] } From 6c4856595410ee77d52d62ceb2cd808b1cdff04e Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Wed, 7 Jun 2023 08:33:35 -0600 Subject: [PATCH 107/236] chore(deps): Upgrade rust to 1.70.0 (#17585) --- Tiltfile | 2 +- .../src/variants/disk_v2/tests/model/sequencer.rs | 4 +--- rust-toolchain.toml | 2 +- src/sinks/util/service.rs | 9 +++------ 4 files changed, 6 insertions(+), 11 deletions(-) diff --git a/Tiltfile b/Tiltfile index 1766beef83e69..6c0c9246042b4 100644 --- a/Tiltfile +++ b/Tiltfile @@ -7,7 +7,7 @@ load('ext://helm_resource', 'helm_resource', 'helm_repo') docker_build( ref='timberio/vector', context='.', - build_args={'RUST_VERSION': '1.69.0'}, + build_args={'RUST_VERSION': '1.70.0'}, dockerfile='tilt/Dockerfile' ) diff --git a/lib/vector-buffers/src/variants/disk_v2/tests/model/sequencer.rs b/lib/vector-buffers/src/variants/disk_v2/tests/model/sequencer.rs index d00e95ea1e406..270cf7c84e9d6 100644 --- a/lib/vector-buffers/src/variants/disk_v2/tests/model/sequencer.rs +++ b/lib/vector-buffers/src/variants/disk_v2/tests/model/sequencer.rs @@ -25,11 +25,9 @@ impl TrackedFuture { where F: Future + Send + 'static, { - let wrapped = async move { fut.await }; - Self { polled_once: false, - fut: spawn(wrapped.boxed()), + fut: spawn(fut.boxed()), } } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index f238a797e8764..008def46a7a20 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "1.69.0" +channel = "1.70.0" profile = "default" diff --git a/src/sinks/util/service.rs b/src/sinks/util/service.rs index d75030de8d79f..d8970c5dbbe58 100644 --- a/src/sinks/util/service.rs +++ b/src/sinks/util/service.rs @@ -357,7 +357,6 @@ impl TowerRequestSettings { S::Future: Send + 'static, { let policy = self.retry_policy(retry_logic.clone()); - let settings = self.clone(); // Build services let open = OpenGauge::new(); @@ -368,16 +367,14 @@ impl TowerRequestSettings { // Build individual service ServiceBuilder::new() .layer(AdaptiveConcurrencyLimitLayer::new( - settings.concurrency, - settings.adaptive_concurrency, + self.concurrency, + self.adaptive_concurrency, retry_logic.clone(), )) .service( health_config.build( health_logic.clone(), - ServiceBuilder::new() - .timeout(settings.timeout) - .service(inner), + ServiceBuilder::new().timeout(self.timeout).service(inner), open.clone(), endpoint, ), // NOTE: there is a version conflict for crate `tracing` between `tracing_tower` crate From 460bbc7b9e532f93ac015ff871535c16135e4793 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Jun 2023 08:37:23 -0600 Subject: [PATCH 108/236] chore(deps): Bump wiremock from 0.5.18 to 0.5.19 (#17618) Bumps [wiremock](https://github.com/LukeMathWalker/wiremock-rs) from 0.5.18 to 0.5.19.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=wiremock&package-manager=cargo&previous-version=0.5.18&new-version=0.5.19)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d9f416f55d96e..cecce74ec8c8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10164,9 +10164,9 @@ dependencies = [ [[package]] name = "wiremock" -version = "0.5.18" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7b0b5b253ebc0240d6aac6dd671c495c467420577bf634d3064ae7e6fa2b4c" +checksum = "c6f71803d3a1c80377a06221e0530be02035d5b3e854af56c6ece7ac20ac441d" dependencies = [ "assert-json-diff", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index af71d877e60cd..21f5146478246 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -357,7 +357,7 @@ tokio-test = "0.4.2" tokio = { version = "1.28.2", features = ["test-util"] } tower-test = "0.4.0" vector-core = { path = "lib/vector-core", default-features = false, features = ["vrl", "test"] } -wiremock = "0.5.18" +wiremock = "0.5.19" zstd = { version = "0.12.3", default-features = false } [patch.crates-io] From 579108353e50546081b830d4e5788be7bb76a892 Mon Sep 17 00:00:00 2001 From: neuronull Date: Wed, 7 Jun 2023 11:53:30 -0600 Subject: [PATCH 109/236] fix(ci): change command to find baseline sha from issue comment trigger (#17622) --- .github/workflows/regression.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index e2f8f43e31ae0..6a47a81d596cb 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -165,7 +165,7 @@ jobs: export PR_NUMBER=${{ github.event.issue.number }} echo "PR_NUMBER=${PR_NUMBER}" >> $GITHUB_OUTPUT - export BASELINE_SHA=$(git merge-base --fork-point master) + export BASELINE_SHA=$(git merge-base master HEAD) echo "BASELINE_SHA=${BASELINE_SHA}" >> $GITHUB_OUTPUT export COMPARISON_SHA=$(git rev-parse HEAD) From 3005141f2097169a05af418e5f80765468645700 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Jun 2023 12:55:32 -0600 Subject: [PATCH 110/236] chore(ci): Bump docker/setup-qemu-action from 2.1.0 to 2.2.0 (#17623) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 2.1.0 to 2.2.0.
Release notes

Sourced from docker/setup-qemu-action's releases.

v2.2.0

What's Changed

New Contributors

Full Changelog: https://github.com/docker/setup-qemu-action/compare/v2.1.0...v2.2.0

Commits
  • 2b82ce8 Merge pull request #83 from docker/dependabot/npm_and_yarn/docker/actions-too...
  • 3eae0a2 Merge pull request #81 from docker/dependabot/github_actions/docker/bake-acti...
  • 1fd9478 Bump @​docker/actions-toolkit from 0.1.0 to 0.3.0
  • f9e93f9 Bump docker/bake-action from 2 to 3
  • 9d429d4 Merge pull request #80 from docker/dependabot/npm_and_yarn/docker/actions-too...
  • b5a257c update generated content
  • c915c25 use new implementation from toolkit
  • 25bbf89 update dev dependencies
  • faaa95d Bump @​docker/actions-toolkit from 0.1.0-beta.14 to 0.1.0
  • de3982d Merge pull request #70 from crazy-max/switch-toolkit
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/setup-qemu-action&package-manager=github_actions&previous-version=2.1.0&new-version=2.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- .github/workflows/publish.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index f4d18c2263b11..7377496dd58b8 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -40,7 +40,7 @@ jobs: uses: actions/checkout@v3 - name: Set up QEMU - uses: docker/setup-qemu-action@v2.1.0 + uses: docker/setup-qemu-action@v2.2.0 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2.5.0 - name: Login to DockerHub diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 30d03d9b00b80..d271f51041c0a 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -430,7 +430,7 @@ jobs: username: ${{ secrets.CI_DOCKER_USERNAME }} password: ${{ secrets.CI_DOCKER_PASSWORD }} - name: Set up QEMU - uses: docker/setup-qemu-action@v2.1.0 + uses: docker/setup-qemu-action@v2.2.0 with: platforms: all - name: Set up Docker Buildx From a54a12faae72ee64f4ba842746837a4787af5dc2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Jun 2023 18:56:13 +0000 Subject: [PATCH 111/236] chore(ci): Bump docker/metadata-action from 4.4.0 to 4.5.0 (#17624) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 4.4.0 to 4.5.0.
Release notes

Sourced from docker/metadata-action's releases.

v4.5.0

What's Changed

Full Changelog: https://github.com/docker/metadata-action/compare/v4.4.0...v4.5.0

Commits
  • 2c0bd77 Merge pull request #296 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • b10b364 update generated content
  • 40a1c6f Bump @​docker/actions-toolkit from 0.1.0 to 0.3.0
  • be8ea87 Merge pull request #294 from docker/dependabot/npm_and_yarn/csv-parse-5.4.0
  • dbbf018 Merge pull request #287 from docker/dependabot/github_actions/docker/bake-act...
  • 72b4ec2 Bump csv-parse from 5.3.8 to 5.4.0
  • 00e2c9d Bump docker/bake-action from 2 to 3
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/metadata-action&package-manager=github_actions&previous-version=4.4.0&new-version=4.5.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 7377496dd58b8..8899ec143fb0e 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -51,7 +51,7 @@ jobs: password: ${{ secrets.CI_DOCKER_PASSWORD }} - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@c4ee3adeed93b1fa6a762f209fb01608c1a22f1e + uses: docker/metadata-action@2c0bd771b40637d97bf205cbccdd294a32112176 with: images: timberio/vector-dev flavor: | From 15bc42a21bed188819da4d12e38d108f2e840202 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Jun 2023 18:56:43 +0000 Subject: [PATCH 112/236] chore(ci): Bump docker/setup-buildx-action from 2.5.0 to 2.6.0 (#17625) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 2.5.0 to 2.6.0.
Release notes

Sourced from docker/setup-buildx-action's releases.

v2.6.0

What's Changed

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.5.0...v2.6.0

Commits
  • 6a58db7 Merge pull request #236 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • d56292e update generated content
  • 790eb2d Bump @​docker/actions-toolkit from 0.2.0 to 0.3.0
  • 2a81c53 Merge pull request #231 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • 00b2400 update generated content
  • 484614d Bump @​docker/actions-toolkit from 0.1.0 to 0.2.0
  • d957594 Merge pull request #219 from crazy-max/ci-k3s-append
  • 5bb6d36 ci: set up and build with k3s
  • a99c5e5 update generated content
  • fc1a41d set node name for k8s driver when appending nodes
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/setup-buildx-action&package-manager=github_actions&previous-version=2.5.0&new-version=2.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- .github/workflows/publish.yml | 2 +- .github/workflows/regression.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 8899ec143fb0e..c02c042f3b03e 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -42,7 +42,7 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@v2.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v2.6.0 - name: Login to DockerHub uses: docker/login-action@v2.1.0 if: github.ref == 'refs/heads/master' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index d271f51041c0a..7ee5ecf2af850 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -435,7 +435,7 @@ jobs: platforms: all - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v2.6.0 with: version: latest install: true diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 6a47a81d596cb..7fc5c83f15974 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -317,7 +317,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v2.6.0 - name: Build 'vector' target image uses: docker/build-push-action@v4.0.0 @@ -354,7 +354,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2.5.0 + uses: docker/setup-buildx-action@v2.6.0 - name: Build 'vector' target image uses: docker/build-push-action@v4.0.0 From 10cfd0aec905c605248ad9d36abb312d4bfc1a5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Jun 2023 19:26:19 +0000 Subject: [PATCH 113/236] chore(deps): Bump libc from 0.2.144 to 0.2.146 (#17615) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [libc](https://github.com/rust-lang/libc) from 0.2.144 to 0.2.146.
Release notes

Sourced from libc's releases.

0.2.145

What's Changed

New Contributors

Full Changelog: https://github.com/rust-lang/libc/compare/0.2.144...0.2.145

Commits
  • f171596 Auto merge of #3266 - vita-rust:release-0.2.146, r=Amanieu
  • dd6fdac Update crate version to 0.2.146
  • a6c94f4 Auto merge of #3265 - bossmc:issue-3264-variadic-open(at), r=Amanieu
  • 52badc6 Use use to alias open/openat in lfs64.rs
  • e0dfb3f Auto merge of #3259 - vita-rust:update, r=JohnTitor
  • c2bfe30 Auto merge of #3263 - tzneal:add-msg-nosignal, r=JohnTitor
  • 3b808cf Auto merge of #3262 - nekopsykose:s390x-largefile, r=JohnTitor
  • be93cda add MSG_NEEDSA and MSG_NOSIGNAL for macos
  • 4d473b2 s390x-musl: define O_LARGEFILE constant
  • 9469613 Auto merge of #3257 - superwhiskers:add-putpwent, r=JohnTitor
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=libc&package-manager=cargo&previous-version=0.2.144&new-version=0.2.146)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cecce74ec8c8e..065863c237bb1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4577,9 +4577,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.144" +version = "0.2.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" +checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" [[package]] name = "libflate" diff --git a/Cargo.toml b/Cargo.toml index 21f5146478246..ed15cda4e7871 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = base64 = "0.21.2" criterion = { version = "0.5.1", features = ["html_reports", "async_tokio"] } itertools = { version = "0.10.5", default-features = false } -libc = "0.2.144" +libc = "0.2.146" similar-asserts = "1.4.2" proptest = "1.2" quickcheck = "1.0.3" From 29315428b2c93ae0a5682ddb1fb25137b5eb3931 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 05:24:37 -0600 Subject: [PATCH 114/236] chore(deps): Bump async-graphql from 5.0.9 to 5.0.10 (#17619) Bumps [async-graphql](https://github.com/async-graphql/async-graphql) from 5.0.9 to 5.0.10.
Changelog

Sourced from async-graphql's changelog.

[5.0.10] 2023-06-07

  • Upgrade opentelemetry to 0.19.0 #1252
  • Remove internal CursorScalar type and expose Edge::cursor member #1302
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=async-graphql&package-manager=cargo&previous-version=5.0.9&new-version=5.0.10)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 065863c237bb1..f30b75ebb2f98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -413,9 +413,9 @@ dependencies = [ [[package]] name = "async-graphql" -version = "5.0.9" +version = "5.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "364423936c4b828ac1615ce325e528c5afbe6e6995d799ee5683c7d36720dfa4" +checksum = "b35ef8f9be23ee30fe1eb1cf175c689bc33517c6c6d0fd0669dade611e5ced7f" dependencies = [ "async-graphql-derive", "async-graphql-parser", @@ -444,9 +444,9 @@ dependencies = [ [[package]] name = "async-graphql-derive" -version = "5.0.9" +version = "5.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a06320343bbe0a1f2e29ec6d1ed34e0460f10e6827b3154a78e4ccc039dbc4" +checksum = "1a0f6ceed3640b4825424da70a5107e79d48d9b2bc6318dfc666b2fc4777f8c4" dependencies = [ "Inflector", "async-graphql-parser", @@ -460,9 +460,9 @@ dependencies = [ [[package]] name = "async-graphql-parser" -version = "5.0.9" +version = "5.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46ce3b4b57e2a4630ea5e69eeb02fb5ee3c5f48754fcf7fd6a7bf3b4f96538f0" +checksum = "ecc308cd3bc611ee86c9cf19182d2b5ee583da40761970e41207f088be3db18f" dependencies = [ "async-graphql-value", "pest", @@ -472,9 +472,9 @@ dependencies = [ [[package]] name = "async-graphql-value" -version = "5.0.9" +version = "5.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637c6b5a755133d47c9829df04b7a5e2f1856fe4c1101f581650c93198eba103" +checksum = "d461325bfb04058070712296601dfe5e5bd6cdff84780a0a8c569ffb15c87eb3" dependencies = [ "bytes 1.4.0", "indexmap", diff --git a/Cargo.toml b/Cargo.toml index ed15cda4e7871..2a38cfa839f3b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -210,7 +210,7 @@ smpl_jwt = { version = "0.7.1", default-features = false, optional = true } lapin = { version = "2.2.1", default-features = false, features = ["native-tls"], optional = true } # API -async-graphql = { version = "5.0.9", default-features = false, optional = true, features = ["chrono"] } +async-graphql = { version = "5.0.10", default-features = false, optional = true, features = ["chrono"] } async-graphql-warp = { version = "5.0.9", default-features = false, optional = true } itertools = { version = "0.10.5", default-features = false, optional = true } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index a4c5d089146ac..579e12b6ab049 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" publish = false [dependencies] -async-graphql = { version = "5.0.9", default-features = false, features = ["playground" ], optional = true } +async-graphql = { version = "5.0.10", default-features = false, features = ["playground" ], optional = true } async-trait = { version = "0.1", default-features = false } bitmask-enum = { version = "2.1.0", default-features = false } bytes = { version = "1.4.0", default-features = false, features = ["serde"] } From f1e1ae36ec4f244a03cbc7084cde64ea2d9631fa Mon Sep 17 00:00:00 2001 From: neuronull Date: Thu, 8 Jun 2023 08:23:07 -0600 Subject: [PATCH 115/236] fix(ci): reg workflow alt approach to getting baseline sha (#17645) --- .github/workflows/regression.yml | 28 +++------------------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 7fc5c83f15974..81e1f99fce1bf 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -126,33 +126,9 @@ jobs: smp-version: ${{ steps.experimental-meta.outputs.SMP_CRATE_VERSION }} lading-version: ${{ steps.experimental-meta.outputs.LADING_VERSION }} steps: - - uses: actions/checkout@v3 - - - name: Checkout PR (issue_comment) - if: github.event_name == 'issue_comment' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: gh pr checkout ${{ github.event.issue.number }} - - - name: Get PR branch name (issue_comment) - id: get-pr-branch-name - if: github.event_name == 'issue_comment' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - export BRANCH=$(git branch --show-current) - echo "BRANCH=${BRANCH}" - echo "BRANCH=${BRANCH}" >> $GITHUB_OUTPUT - - - name: Checkout PR branch (issue_comment) - if: github.event_name == 'issue_comment' - uses: actions/checkout@v3 with: - # TODO: this can be done more elegantly in a follow-up by using a depth value and - # increasing it until the merge-base is found. - fetch-depth: 500 - ref: "${{ steps.get-pr-branch-name.outputs.BRANCH }}" + fetch-depth: 1000 # If triggered by issue comment, the event payload doesn't directly contain the head and base sha from the PR. # But, we can retrieve this info from some commands. @@ -165,6 +141,8 @@ jobs: export PR_NUMBER=${{ github.event.issue.number }} echo "PR_NUMBER=${PR_NUMBER}" >> $GITHUB_OUTPUT + gh pr checkout ${{ github.event.issue.number }} + export BASELINE_SHA=$(git merge-base master HEAD) echo "BASELINE_SHA=${BASELINE_SHA}" >> $GITHUB_OUTPUT From e35150e8b376db1f19b60b828233eb47393bb2dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 14:23:41 +0000 Subject: [PATCH 116/236] chore(deps): Bump serde from 1.0.163 to 1.0.164 (#17632) Bumps [serde](https://github.com/serde-rs/serde) from 1.0.163 to 1.0.164.
Commits
  • 107018c Release 1.0.164
  • a398237 Point out serde(untagged) variants which are out of order
  • b63c65d Merge pull request #2470 from dtolnay/contentref
  • f60324e Reuse a single ContentRefDeserializer throughout untagged enum deserialization
  • 361c23a Simplify enumerate().find(...) -> Iterator::position
  • 43b23c7 Format PR 2403 with rustfmt
  • 6081497 Resolve semicolon_if_nothing_returned pedantic clippy lint
  • 48e5753 Allowed Enum variants to be individually marked as untagged (#2403)
  • bbba632 Revert "Ui tests with compile_error resolved at call site"
  • e77db40 Ui tests with compile_error resolved at call site
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=serde&package-manager=cargo&previous-version=1.0.163&new-version=1.0.164)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- lib/vector-api-client/Cargo.toml | 2 +- lib/vector-buffers/Cargo.toml | 2 +- lib/vector-common/Cargo.toml | 2 +- lib/vector-config-macros/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- lib/vector-lookup/Cargo.toml | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f30b75ebb2f98..15e77e88c7c03 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7270,9 +7270,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.163" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] @@ -7330,9 +7330,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.163" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2 1.0.59", "quote 1.0.28", diff --git a/Cargo.toml b/Cargo.toml index 2a38cfa839f3b..dfb1a21c0b174 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -187,7 +187,7 @@ opendal = {version = "0.37", default-features = false, features = ["native-tls", tower = { version = "0.4.13", default-features = false, features = ["buffer", "limit", "retry", "timeout", "util", "balance", "discover"] } tower-http = { version = "0.4.0", default-features = false, features = ["decompression-gzip"]} # Serde -serde = { version = "1.0.163", default-features = false, features = ["derive"] } +serde = { version = "1.0.164", default-features = false, features = ["derive"] } serde-toml-merge = { version = "0.3.0", default-features = false } serde_bytes = { version = "0.11.9", default-features = false, features = ["std"], optional = true } serde_json = { version = "1.0.96", default-features = false, features = ["raw_value"] } diff --git a/lib/vector-api-client/Cargo.toml b/lib/vector-api-client/Cargo.toml index ad455736c62e6..8703e66edf1db 100644 --- a/lib/vector-api-client/Cargo.toml +++ b/lib/vector-api-client/Cargo.toml @@ -9,7 +9,7 @@ license = "MPL-2.0" [dependencies] # Serde -serde = { version = "1.0.163", default-features = false, features = ["derive"] } +serde = { version = "1.0.164", default-features = false, features = ["derive"] } serde_json = { version = "1.0.96", default-features = false, features = ["raw_value"] } # Error handling diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index 30fd200f326f7..f6b98d0cfcf23 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -21,7 +21,7 @@ metrics = "0.21.0" num-traits = { version = "0.2.15", default-features = false } pin-project = { version = "1.1.0", default-features = false } rkyv = { version = "0.7.40", default-features = false, features = ["size_32", "std", "strict", "validation"] } -serde = { version = "1.0.163", default-features = false, features = ["derive"] } +serde = { version = "1.0.164", default-features = false, features = ["derive"] } snafu = { version = "0.7.4", default-features = false, features = ["std"] } tokio-util = { version = "0.7.0", default-features = false } tokio = { version = "1.28.2", default-features = false, features = ["rt", "macros", "rt-multi-thread", "sync", "fs", "io-util", "time"] } diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 04a916445ff0b..8ee739f566bf5 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -56,7 +56,7 @@ paste = "1.0.12" pin-project = { version = "1.1.0", default-features = false } ryu = { version = "1", default-features = false } serde_json = { version = "1.0.96", default-features = false, features = ["std", "raw_value"] } -serde = { version = "1.0.163", optional = true, features = ["derive"] } +serde = { version = "1.0.164", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false } snafu = { version = "0.7", optional = true } stream-cancel = { version = "0.8.1", default-features = false } diff --git a/lib/vector-config-macros/Cargo.toml b/lib/vector-config-macros/Cargo.toml index 64d8c48d3bc70..2a133adaa4fcd 100644 --- a/lib/vector-config-macros/Cargo.toml +++ b/lib/vector-config-macros/Cargo.toml @@ -16,5 +16,5 @@ syn = { version = "1.0", default-features = false, features = ["full", "extra-tr vector-config-common = { path = "../vector-config-common" } [dev-dependencies] -serde = { version = "1.0.163", default-features = false } +serde = { version = "1.0.164", default-features = false } vector-config = { path = "../vector-config" } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 579e12b6ab049..d4e997eecdd05 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -40,7 +40,7 @@ prost = { version = "0.11", default-features = false, features = ["std"] } quanta = { version = "0.11.1", default-features = false } regex = { version = "1.8.4", default-features = false, features = ["std", "perf"] } ryu = { version = "1", default-features = false } -serde = { version = "1.0.163", default-features = false, features = ["derive", "rc"] } +serde = { version = "1.0.164", default-features = false, features = ["derive", "rc"] } serde_json = { version = "1.0.96", default-features = false } serde_with = { version = "2.3.2", default-features = false, features = ["std", "macros"] } smallvec = { version = "1", default-features = false, features = ["serde", "const_generics"] } diff --git a/lib/vector-lookup/Cargo.toml b/lib/vector-lookup/Cargo.toml index daec79b1e5004..b1ca558a543cf 100644 --- a/lib/vector-lookup/Cargo.toml +++ b/lib/vector-lookup/Cargo.toml @@ -7,7 +7,7 @@ publish = false license = "MPL-2.0" [dependencies] -serde = { version = "1.0.163", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.164", default-features = false, features = ["derive", "alloc"] } vector-config = { path = "../vector-config" } vector-config-macros = { path = "../vector-config-macros" } vrl = { version = "0.4.0", default-features = false, features = ["path"] } From 593ea1bc89303f2f2344cca58d7c1aa5de939084 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 14:23:45 +0000 Subject: [PATCH 117/236] chore(deps): Bump memmap2 from 0.6.2 to 0.7.0 (#17641) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [memmap2](https://github.com/RazrFalcon/memmap2-rs) from 0.6.2 to 0.7.0.
Changelog

Sourced from memmap2's changelog.

[0.7.0] - 2023-06-08

Added

Changed

  • libc crate >= 0.2.143 is required now.
Commits
  • 04178aa Version bump.
  • 6579655 Add MADV_POPULATE_{READ,WRITE}.
  • b098f9e Refactor pointer handling within unix.rs into helper methods.
  • 0c9b7f7 Add support for mremap(2) on Linux.
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=memmap2&package-manager=cargo&previous-version=0.6.2&new-version=0.7.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- lib/vector-buffers/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 15e77e88c7c03..2a11a6be9a388 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4863,9 +4863,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memmap2" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d28bba84adfe6646737845bc5ebbfa2c08424eb1c37e94a1fd2a82adb56a872" +checksum = "180d4b35be83d33392d1d1bfbd2ae1eca7ff5de1a94d3fc87faaa99a069e7cbd" dependencies = [ "libc", ] diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index f6b98d0cfcf23..4ce2a1e85f168 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -16,7 +16,7 @@ crossbeam-queue = { version = "0.3.8", default-features = false, features = ["st crossbeam-utils = { version = "0.8.15", default-features = false } fslock = { version = "0.2.1", default-features = false, features = ["std"] } futures = { version = "0.3.28", default-features = false, features = ["std"] } -memmap2 = { version = "0.6.2", default-features = false } +memmap2 = { version = "0.7.0", default-features = false } metrics = "0.21.0" num-traits = { version = "0.2.15", default-features = false } pin-project = { version = "1.1.0", default-features = false } From b3885f693ebbdddd338b72bfd594e164d4fa361d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 14:26:45 +0000 Subject: [PATCH 118/236] chore(deps): Bump async-graphql-warp from 5.0.9 to 5.0.10 (#17642) Bumps [async-graphql-warp](https://github.com/async-graphql/async-graphql) from 5.0.9 to 5.0.10.
Changelog

Sourced from async-graphql-warp's changelog.

[5.0.10] 2023-06-07

  • Upgrade opentelemetry to 0.19.0 #1252
  • Remove internal CursorScalar type and expose Edge::cursor member #1302
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=async-graphql-warp&package-manager=cargo&previous-version=5.0.9&new-version=5.0.10)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2a11a6be9a388..ed27c5f660ea6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -484,9 +484,9 @@ dependencies = [ [[package]] name = "async-graphql-warp" -version = "5.0.9" +version = "5.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fae3d1991cb75a984eb6787b84dd7ebb362a696d4239bb59abbc5a015a01724c" +checksum = "ce971f92675defe1adf14f9e70b8798d797db9f454463b611a552bffd5532188" dependencies = [ "async-graphql", "futures-util", diff --git a/Cargo.toml b/Cargo.toml index dfb1a21c0b174..de6bdc4cb2552 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -211,7 +211,7 @@ lapin = { version = "2.2.1", default-features = false, features = ["native-tls"] # API async-graphql = { version = "5.0.10", default-features = false, optional = true, features = ["chrono"] } -async-graphql-warp = { version = "5.0.9", default-features = false, optional = true } +async-graphql-warp = { version = "5.0.10", default-features = false, optional = true } itertools = { version = "0.10.5", default-features = false, optional = true } # API client From f20eb2ff554c0163ea4955c9a5ad1ef0acd9f492 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 14:40:57 +0000 Subject: [PATCH 119/236] chore(deps): Bump proc-macro2 from 1.0.59 to 1.0.60 (#17643) Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.59 to 1.0.60.
Commits
  • 549377b Release 1.0.60
  • bbb6bb8 Merge pull request #391 from dtolnay/nobeforeafter
  • e31d619 Delete use of proc_macro::Span::before/after
  • 528e761 Ignore missing_fields_in_debug clippy lint for Ident
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=proc-macro2&package-manager=cargo&previous-version=1.0.59&new-version=1.0.60)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 132 ++++++++++++++++++++++++++--------------------------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ed27c5f660ea6..d672e52addcec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -452,7 +452,7 @@ dependencies = [ "async-graphql-parser", "darling 0.14.2", "proc-macro-crate 1.2.1", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", "thiserror", @@ -572,7 +572,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -594,7 +594,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -611,7 +611,7 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -1573,7 +1573,7 @@ dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "syn 1.0.109", ] @@ -1583,7 +1583,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61820b4c5693eafb998b1e67485423c923db4a75f72585c247bdee32bad81e7b" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -1594,7 +1594,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c76cdbfa13def20d1f8af3ae7b3c6771f06352a74221d8851262ac384c122b8e" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -1665,7 +1665,7 @@ version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -1748,7 +1748,7 @@ checksum = "b48814962d2fd604c50d2b9433c2a41a0ab567779ee2c02f7fba6eca1221f082" dependencies = [ "cached_proc_macro_types", "darling 0.14.2", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -1973,7 +1973,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81d7dc0031c3a59a04fc2ba395c8e2dd463cba1859275f065d225f6122221b45" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -2471,7 +2471,7 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "scratch", "syn 1.0.109", @@ -2489,7 +2489,7 @@ version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -2522,7 +2522,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", @@ -2536,7 +2536,7 @@ checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", @@ -2635,7 +2635,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -2646,7 +2646,7 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -2658,7 +2658,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "rustc_version 0.4.0", "syn 1.0.109", @@ -2915,7 +2915,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -2927,7 +2927,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -2939,7 +2939,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11f36e95862220b211a6e2aa5eca09b4fa391b13cd52ceb8035a24bf65a79de2" dependencies = [ "once_cell", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -2959,7 +2959,7 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -3165,7 +3165,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4c81935e123ab0741c4c4f0d9b8377e5fb21d3de7e062fa4b1263b1fbcba1ea" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -3346,7 +3346,7 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -3429,7 +3429,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb19fe8de3ea0920d282f7b77dd4227aea6b8b999b42cdf0ca41b2472b14443a" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -3529,7 +3529,7 @@ dependencies = [ "graphql-parser", "heck 0.4.0", "lazy_static", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "serde", "serde_json", @@ -3543,7 +3543,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00bda454f3d313f909298f626115092d348bc231025699f557b27e248475f48c" dependencies = [ "graphql_client_codegen", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "syn 1.0.109", ] @@ -4905,7 +4905,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -5422,7 +5422,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.2.1", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -5434,7 +5434,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.2.1", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -5608,7 +5608,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -5843,7 +5843,7 @@ checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -5931,7 +5931,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -6140,7 +6140,7 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c142c0e46b57171fe0c528bee8c5b7569e80f0c17e377cd0e30ea57dbc11bb51" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "syn 1.0.109", ] @@ -6185,7 +6185,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", "version_check", @@ -6197,7 +6197,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "version_check", ] @@ -6225,9 +6225,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aeca18b86b413c660b781aa319e4e2648a3e6f9eadc9b47e9038e6fe9f3451b" +checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" dependencies = [ "unicode-ident", ] @@ -6306,7 +6306,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -6335,7 +6335,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -6434,7 +6434,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -6454,7 +6454,7 @@ version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", ] [[package]] @@ -6849,7 +6849,7 @@ version = "0.7.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -7334,7 +7334,7 @@ version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -7345,7 +7345,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -7397,7 +7397,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -7456,7 +7456,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -7468,7 +7468,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" dependencies = [ "darling 0.14.2", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -7730,7 +7730,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -7871,7 +7871,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -7889,7 +7889,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "rustversion", "syn 1.0.109", @@ -7928,7 +7928,7 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "unicode-ident", ] @@ -7939,7 +7939,7 @@ version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "unicode-ident", ] @@ -7956,7 +7956,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", "unicode-xid 0.2.4", @@ -8110,7 +8110,7 @@ version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -8255,7 +8255,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -8475,7 +8475,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "prost-build", "quote 1.0.28", "syn 1.0.109", @@ -8580,7 +8580,7 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -8851,7 +8851,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -8881,7 +8881,7 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3e1c30cedd24fc597f7d37a721efdbdc2b1acae012c1ef1218f4c7c2c0f3e7" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", ] @@ -9421,7 +9421,7 @@ dependencies = [ "darling 0.13.4", "indexmap", "once_cell", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "serde", "serde_json", @@ -9434,7 +9434,7 @@ name = "vector-config-macros" version = "0.1.0" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "serde", "serde_derive_internals", @@ -9700,7 +9700,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", ] @@ -9801,7 +9801,7 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", "wasm-bindgen-shared", @@ -9835,7 +9835,7 @@ version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 2.0.10", "wasm-bindgen-backend", @@ -10240,7 +10240,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6505e6815af7de1746a08f69c69606bb45695a17149517680f3b2149713b19a3" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", ] @@ -10260,7 +10260,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ - "proc-macro2 1.0.59", + "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", "synstructure", From 2638cca6cbf5103f71944383255b3e335d7f5790 Mon Sep 17 00:00:00 2001 From: neuronull Date: Thu, 8 Jun 2023 09:34:18 -0600 Subject: [PATCH 120/236] fix(ci): use correct ID for Triage in Gardener Board (#17647) --- .github/workflows/gardener_issue_comment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gardener_issue_comment.yml b/.github/workflows/gardener_issue_comment.yml index 795de5c3a0517..8ea89fb315145 100644 --- a/.github/workflows/gardener_issue_comment.yml +++ b/.github/workflows/gardener_issue_comment.yml @@ -24,7 +24,7 @@ jobs: # IDs fetched from https://docs.github.com/en/graphql/overview/explorer project_id="PVT_kwDOAQFeYs4AAsTr" # Gardener status_field_id="PVTF_lADOAQFeYs4AAsTrzgAXRuU" # Status - triage_option_id="f75ad846" + triage_option_id="2a08fafa" # ensures that the issue is already on board but also seems to be the only way to fetch # the item id From 380d7adb72a02e8da0af35fd3d80ecb1d8b0b541 Mon Sep 17 00:00:00 2001 From: Alexander Zaitsev Date: Thu, 8 Jun 2023 19:16:28 +0200 Subject: [PATCH 121/236] feat(prometheus): add more compression algorithms to Prometheus Remote Write (#17334) Resolves https://github.com/vectordotdev/vector/issues/17199 - add Zstd and Gzip support to Prometheus Remote Write - add a new compression option in the config (Snappy is the default) - update the documentation Tested: - Local build --- src/sinks/prometheus/remote_write.rs | 61 +++++++++++++++++-- .../sinks/base/prometheus_remote_write.cue | 12 ++++ .../sinks/prometheus_remote_write.cue | 9 +++ website/cue/reference/urls.cue | 1 + 4 files changed, 77 insertions(+), 6 deletions(-) diff --git a/src/sinks/prometheus/remote_write.rs b/src/sinks/prometheus/remote_write.rs index 2692253780c5a..027cd19e4d8cd 100644 --- a/src/sinks/prometheus/remote_write.rs +++ b/src/sinks/prometheus/remote_write.rs @@ -1,3 +1,4 @@ +use std::io::Read; use std::sync::Arc; use std::task; @@ -123,10 +124,40 @@ pub struct RemoteWriteConfig { skip_serializing_if = "crate::serde::skip_serializing_if_default" )] pub acknowledgements: AcknowledgementsConfig, + + #[configurable(derived)] + #[configurable(metadata(docs::advanced))] + #[serde(default)] + pub compression: Compression, } impl_generate_config_from_default!(RemoteWriteConfig); +/// Supported compression types for Prometheus Remote Write. +#[configurable_component] +#[derive(Clone, Copy, Debug, Derivative)] +#[derivative(Default)] +#[serde(rename_all = "lowercase")] +pub enum Compression { + /// Snappy. + #[derivative(Default)] + Snappy, + + /// Gzip. + Gzip, + + /// Zstandard. + Zstd, +} + +const fn convert_compression_to_content_encoding(compression: Compression) -> &'static str { + match compression { + Compression::Snappy => "snappy", + Compression::Gzip => "gzip", + Compression::Zstd => "zstd", + } +} + #[async_trait::async_trait] impl SinkConfig for RemoteWriteConfig { async fn build( @@ -181,6 +212,7 @@ impl SinkConfig for RemoteWriteConfig { aws_region, credentials_provider, http_auth, + compression: self.compression, }); let healthcheck = healthcheck(client.clone(), Arc::clone(&http_request_builder)).boxed(); @@ -190,6 +222,7 @@ impl SinkConfig for RemoteWriteConfig { buckets, quantiles, http_request_builder, + compression: self.compression, }; let sink = { @@ -277,6 +310,7 @@ struct RemoteWriteService { buckets: Vec, quantiles: Vec, http_request_builder: Arc, + compression: Compression, } impl RemoteWriteService { @@ -312,7 +346,7 @@ impl Service, PartitionKey>> for RemoteWriteSer fn call(&mut self, buffer: PartitionInnerBuffer, PartitionKey>) -> Self::Future { let (events, key) = buffer.into_parts(); let body = self.encode_events(events); - let body = snap_block(body); + let body = compress_block(self.compression, body); let client = self.client.clone(); let request_builder = Arc::clone(&self.http_request_builder); @@ -344,6 +378,7 @@ pub struct HttpRequestBuilder { pub aws_region: Option, pub http_auth: Option, pub credentials_provider: Option, + pub compression: Compression, } impl HttpRequestBuilder { @@ -353,11 +388,13 @@ impl HttpRequestBuilder { body: Vec, tenant_id: Option, ) -> Result, crate::Error> { + let content_encoding = convert_compression_to_content_encoding(self.compression); + let mut builder = http::Request::builder() .method(method) .uri(self.endpoint.clone()) .header("X-Prometheus-Remote-Write-Version", "0.1.0") - .header("Content-Encoding", "snappy") + .header("Content-Encoding", content_encoding) .header("Content-Type", "application/x-protobuf"); if let Some(tenant_id) = &tenant_id { @@ -380,10 +417,22 @@ impl HttpRequestBuilder { } } -fn snap_block(data: Bytes) -> Vec { - snap::raw::Encoder::new() - .compress_vec(&data) - .expect("Out of memory") +fn compress_block(compression: Compression, data: Bytes) -> Vec { + match compression { + Compression::Snappy => snap::raw::Encoder::new() + .compress_vec(&data) + .expect("snap compression failed, please report"), + Compression::Gzip => { + let mut buf = Vec::new(); + flate2::read::GzEncoder::new(data.as_ref(), flate2::Compression::default()) + .read_to_end(&mut buf) + .expect("gzip compression failed, please report"); + buf + } + Compression::Zstd => { + zstd::encode_all(data.as_ref(), 0).expect("zstd compression failed, please report") + } + } } async fn sign_request( diff --git a/website/cue/reference/components/sinks/base/prometheus_remote_write.cue b/website/cue/reference/components/sinks/base/prometheus_remote_write.cue index 96242b171ad24..4089af9b9af7a 100644 --- a/website/cue/reference/components/sinks/base/prometheus_remote_write.cue +++ b/website/cue/reference/components/sinks/base/prometheus_remote_write.cue @@ -223,6 +223,18 @@ base: components: sinks: prometheus_remote_write: configuration: { items: type: float: {} } } + compression: { + description: "Supported compression types for Prometheus Remote Write." + required: false + type: string: { + default: "snappy" + enum: { + gzip: "Gzip." + snappy: "Snappy." + zstd: "Zstandard." + } + } + } default_namespace: { description: """ The default namespace for any metrics sent. diff --git a/website/cue/reference/components/sinks/prometheus_remote_write.cue b/website/cue/reference/components/sinks/prometheus_remote_write.cue index c384571aef6f2..fc16151370419 100644 --- a/website/cue/reference/components/sinks/prometheus_remote_write.cue +++ b/website/cue/reference/components/sinks/prometheus_remote_write.cue @@ -100,5 +100,14 @@ components: sinks: prometheus_remote_write: { values for each name, Vector will only send the last value specified. """ } + compression_schemes: { + title: "Compression schemes" + body: """ + Officially according to the [Prometheus Remote-Write specification](\(urls.prometheus_remote_write_spec)), + the only supported compression scheme is [Snappy](\(urls.snappy)). However, + there are a number of other implementations that do support other schemes. Thus + Vector also supports using Gzip and Zstd. + """ + } } } diff --git a/website/cue/reference/urls.cue b/website/cue/reference/urls.cue index fd9cafc52d6dc..0e3560392c108 100644 --- a/website/cue/reference/urls.cue +++ b/website/cue/reference/urls.cue @@ -412,6 +412,7 @@ urls: { prometheus_remote_integrations: "https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage" prometheus_remote_write: "https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write" prometheus_remote_write_protocol: "https://docs.google.com/document/d/1LPhVRSFkGNSuU1fBd81ulhsCPR4hkSZyyBj1SZ8fWOM/edit#heading=h.n0d0vphea3fe" + prometheus_remote_write_spec: "https://prometheus.io/docs/concepts/remote_write_spec/#protocol" protobuf: "https://developers.google.com/protocol-buffers" pulsar: "https://pulsar.apache.org/" pulsar_protocol: "https://pulsar.apache.org/docs/en/develop-binary-protocol/" From a324a07ba1b62baac08d74b287595846b787b887 Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Thu, 8 Jun 2023 15:43:44 -0400 Subject: [PATCH 122/236] feat(journald source): add journal_namespace option (#17648) Closes: https://github.com/vectordotdev/vector/issues/16808 --- src/sources/journald.rs | 64 +++++++++++++++++-- .../components/sources/base/journald.cue | 13 ++++ 2 files changed, 71 insertions(+), 6 deletions(-) diff --git a/src/sources/journald.rs b/src/sources/journald.rs index 652e3577aeae0..f6426d69e4cdd 100644 --- a/src/sources/journald.rs +++ b/src/sources/journald.rs @@ -174,6 +174,16 @@ pub struct JournaldConfig { #[serde(default)] pub journal_directory: Option, + /// The [journal namespace][journal-namespace]. + /// + /// This value is passed to `journalctl` through the [`--namespace` option][journalctl-namespace-option]. + /// If not set, `journalctl` uses the default namespace. + /// + /// [journal-namespace]: https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html#Journal%20Namespaces + /// [journalctl-namespace-option]: https://www.freedesktop.org/software/systemd/man/journalctl.html#--namespace=NAMESPACE + #[serde(default)] + pub journal_namespace: Option, + #[configurable(derived)] #[serde(default, deserialize_with = "bool_or_struct")] acknowledgements: SourceAcknowledgementsConfig, @@ -290,6 +300,7 @@ impl Default for JournaldConfig { batch_size: default_batch_size(), journalctl_path: None, journal_directory: None, + journal_namespace: None, acknowledgements: Default::default(), remap_priority: false, log_namespace: None, @@ -341,6 +352,7 @@ impl SourceConfig for JournaldConfig { let starter = StartJournalctl::new( journalctl_path, self.journal_directory.clone(), + self.journal_namespace.clone(), self.current_boot_only, self.since_now, ); @@ -610,6 +622,7 @@ type JournalStream = BoxStream<'static, Result>; struct StartJournalctl { path: PathBuf, journal_dir: Option, + journal_namespace: Option, current_boot_only: bool, since_now: bool, } @@ -618,12 +631,14 @@ impl StartJournalctl { const fn new( path: PathBuf, journal_dir: Option, + journal_namespace: Option, current_boot_only: bool, since_now: bool, ) -> Self { Self { path, journal_dir, + journal_namespace, current_boot_only, since_now, } @@ -641,6 +656,10 @@ impl StartJournalctl { command.arg(format!("--directory={}", dir.display())); } + if let Some(namespace) = &self.journal_namespace { + command.arg(format!("--namespace={}", namespace)); + } + if self.current_boot_only { command.arg("--boot"); } @@ -1400,30 +1419,56 @@ mod tests { let path = PathBuf::from("journalctl"); let journal_dir = None; + let journal_namespace = None; let current_boot_only = false; let cursor = None; let since_now = false; - let command = create_command(&path, journal_dir, current_boot_only, since_now, cursor); + let command = create_command( + &path, + journal_dir, + journal_namespace, + current_boot_only, + since_now, + cursor, + ); let cmd_line = format!("{:?}", command); assert!(!cmd_line.contains("--directory=")); + assert!(!cmd_line.contains("--namespace=")); assert!(!cmd_line.contains("--boot")); assert!(cmd_line.contains("--since=2000-01-01")); - let since_now = true; let journal_dir = None; + let journal_namespace = None; + let since_now = true; - let command = create_command(&path, journal_dir, current_boot_only, since_now, cursor); + let command = create_command( + &path, + journal_dir, + journal_namespace, + current_boot_only, + since_now, + cursor, + ); let cmd_line = format!("{:?}", command); assert!(cmd_line.contains("--since=now")); let journal_dir = Some(PathBuf::from("/tmp/journal-dir")); + let journal_namespace = Some(String::from("my_namespace")); let current_boot_only = true; let cursor = Some("2021-01-01"); - let command = create_command(&path, journal_dir, current_boot_only, since_now, cursor); + let command = create_command( + &path, + journal_dir, + journal_namespace, + current_boot_only, + since_now, + cursor, + ); let cmd_line = format!("{:?}", command); assert!(cmd_line.contains("--directory=/tmp/journal-dir")); + assert!(cmd_line.contains("--namespace=my_namespace")); assert!(cmd_line.contains("--boot")); assert!(cmd_line.contains("--after-cursor=")); } @@ -1431,12 +1476,19 @@ mod tests { fn create_command( path: &Path, journal_dir: Option, + journal_namespace: Option, current_boot_only: bool, since_now: bool, cursor: Option<&str>, ) -> Command { - StartJournalctl::new(path.into(), journal_dir, current_boot_only, since_now) - .make_command(cursor) + StartJournalctl::new( + path.into(), + journal_dir, + journal_namespace, + current_boot_only, + since_now, + ) + .make_command(cursor) } fn message(event: &Event) -> Value { diff --git a/website/cue/reference/components/sources/base/journald.cue b/website/cue/reference/components/sources/base/journald.cue index bbebadce5bcd9..d259076e1b6a8 100644 --- a/website/cue/reference/components/sources/base/journald.cue +++ b/website/cue/reference/components/sources/base/journald.cue @@ -126,6 +126,19 @@ base: components: sources: journald: configuration: { required: false type: string: {} } + journal_namespace: { + description: """ + The [journal namespace][journal-namespace]. + + This value is passed to `journalctl` through the [`--namespace` option][journalctl-namespace-option]. + If not set, `journalctl` uses the default namespace. + + [journal-namespace]: https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html#Journal%20Namespaces + [journalctl-namespace-option]: https://www.freedesktop.org/software/systemd/man/journalctl.html#--namespace=NAMESPACE + """ + required: false + type: string: {} + } journalctl_path: { description: """ The full path of the `journalctl` executable. From 0dc450fac14ac0236ca48466fd4fe42630d421ed Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Thu, 8 Jun 2023 15:44:35 -0400 Subject: [PATCH 123/236] chore(sinks): mark VectorSink::from_event_sink as deprecated (#17649) This will help steer contributors away from writing new sinks in the deprecated fashion. See https://github.com/vectordotdev/vector/issues/9261 for more context. --- lib/vector-core/src/sink.rs | 6 ++++++ src/config/unit_test/unit_test_components.rs | 1 + src/sinks/appsignal/mod.rs | 1 + src/sinks/aws_cloudwatch_metrics/mod.rs | 1 + src/sinks/azure_monitor_logs.rs | 2 ++ src/sinks/clickhouse/http_sink.rs | 1 + src/sinks/gcp/pubsub.rs | 1 + src/sinks/gcp/stackdriver_logs.rs | 1 + src/sinks/gcp/stackdriver_metrics.rs | 1 + src/sinks/honeycomb.rs | 1 + src/sinks/http.rs | 1 + src/sinks/influxdb/logs.rs | 1 + src/sinks/influxdb/metrics.rs | 1 + src/sinks/mezmo.rs | 1 + src/sinks/prometheus/remote_write.rs | 1 + src/sinks/redis.rs | 1 + src/sinks/sematext/metrics.rs | 1 + src/sinks/util/adaptive_concurrency/tests.rs | 1 + src/test_util/mock/sinks/error.rs | 1 + src/test_util/mock/sinks/panic.rs | 1 + 20 files changed, 26 insertions(+) diff --git a/lib/vector-core/src/sink.rs b/lib/vector-core/src/sink.rs index b39b9d1c75c5d..fb47b6f4d50dd 100644 --- a/lib/vector-core/src/sink.rs +++ b/lib/vector-core/src/sink.rs @@ -61,6 +61,12 @@ impl VectorSink { } /// Converts an event sink into a `VectorSink` + /// + /// Deprecated in favor of `VectorSink::from_event_streamsink`. See [vector/9261] + /// for more info. + /// + /// [vector/9261]: https://github.com/vectordotdev/vector/issues/9261 + #[deprecated] pub fn from_event_sink(sink: impl Sink + Send + Unpin + 'static) -> Self { VectorSink::Sink(Box::new(EventSink::new(sink))) } diff --git a/src/config/unit_test/unit_test_components.rs b/src/config/unit_test/unit_test_components.rs index a38e0492d87aa..157c2306abe3a 100644 --- a/src/config/unit_test/unit_test_components.rs +++ b/src/config/unit_test/unit_test_components.rs @@ -302,6 +302,7 @@ impl SinkConfig for UnitTestStreamSinkConfig { let sink = self.sink.lock().await.take().unwrap(); let healthcheck = future::ok(()).boxed(); + #[allow(deprecated)] Ok((VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/sinks/appsignal/mod.rs b/src/sinks/appsignal/mod.rs index ca2211f8505a2..9d24731f01c9a 100644 --- a/src/sinks/appsignal/mod.rs +++ b/src/sinks/appsignal/mod.rs @@ -136,6 +136,7 @@ impl SinkConfig for AppsignalSinkConfig { ) .boxed(); + #[allow(deprecated)] Ok((super::VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/sinks/aws_cloudwatch_metrics/mod.rs b/src/sinks/aws_cloudwatch_metrics/mod.rs index accc041b54c32..e169dde9b01fb 100644 --- a/src/sinks/aws_cloudwatch_metrics/mod.rs +++ b/src/sinks/aws_cloudwatch_metrics/mod.rs @@ -252,6 +252,7 @@ impl CloudWatchMetricsSvc { }) }); + #[allow(deprecated)] Ok(VectorSink::from_event_sink(sink)) } diff --git a/src/sinks/azure_monitor_logs.rs b/src/sinks/azure_monitor_logs.rs index bc6bc1b43a330..77e675daf3074 100644 --- a/src/sinks/azure_monitor_logs.rs +++ b/src/sinks/azure_monitor_logs.rs @@ -204,6 +204,7 @@ impl SinkConfig for AzureMonitorLogsConfig { ) .sink_map_err(|error| error!(message = "Fatal azure_monitor_logs sink error.", %error)); + #[allow(deprecated)] Ok((VectorSink::from_event_sink(sink), healthcheck)) } @@ -471,6 +472,7 @@ mod tests { .sink_map_err(|error| error!(message = "Fatal azure_monitor_logs sink error.", %error)); let event = Event::Log(LogEvent::from("simple message")); + #[allow(deprecated)] run_and_assert_sink_compliance( VectorSink::from_event_sink(sink), stream::once(ready(event)), diff --git a/src/sinks/clickhouse/http_sink.rs b/src/sinks/clickhouse/http_sink.rs index 65d96dc341ab1..91c197d126723 100644 --- a/src/sinks/clickhouse/http_sink.rs +++ b/src/sinks/clickhouse/http_sink.rs @@ -47,6 +47,7 @@ pub(crate) async fn build_http_sink( let healthcheck = healthcheck(client, config).boxed(); + #[allow(deprecated)] Ok((VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/sinks/gcp/pubsub.rs b/src/sinks/gcp/pubsub.rs index eee4a64c496c4..dded23587438f 100644 --- a/src/sinks/gcp/pubsub.rs +++ b/src/sinks/gcp/pubsub.rs @@ -135,6 +135,7 @@ impl SinkConfig for PubsubConfig { ) .sink_map_err(|error| error!(message = "Fatal gcp_pubsub sink error.", %error)); + #[allow(deprecated)] Ok((VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/sinks/gcp/stackdriver_logs.rs b/src/sinks/gcp/stackdriver_logs.rs index f313bb1a38c5e..8e7c43276bd20 100644 --- a/src/sinks/gcp/stackdriver_logs.rs +++ b/src/sinks/gcp/stackdriver_logs.rs @@ -237,6 +237,7 @@ impl SinkConfig for StackdriverConfig { ) .sink_map_err(|error| error!(message = "Fatal gcp_stackdriver_logs sink error.", %error)); + #[allow(deprecated)] Ok((VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/sinks/gcp/stackdriver_metrics.rs b/src/sinks/gcp/stackdriver_metrics.rs index f6c66735ddb71..a68140e2bced8 100644 --- a/src/sinks/gcp/stackdriver_metrics.rs +++ b/src/sinks/gcp/stackdriver_metrics.rs @@ -126,6 +126,7 @@ impl SinkConfig for StackdriverConfig { |error| error!(message = "Fatal gcp_stackdriver_metrics sink error.", %error), ); + #[allow(deprecated)] Ok((VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/sinks/honeycomb.rs b/src/sinks/honeycomb.rs index c69b52515ec9e..f9a05c2a7a5b0 100644 --- a/src/sinks/honeycomb.rs +++ b/src/sinks/honeycomb.rs @@ -108,6 +108,7 @@ impl SinkConfig for HoneycombConfig { let healthcheck = healthcheck(self.clone(), client).boxed(); + #[allow(deprecated)] Ok((super::VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/sinks/http.rs b/src/sinks/http.rs index a49335b77b5c3..1d21b052c2c82 100644 --- a/src/sinks/http.rs +++ b/src/sinks/http.rs @@ -251,6 +251,7 @@ impl SinkConfig for HttpSinkConfig { ) .sink_map_err(|error| error!(message = "Fatal HTTP sink error.", %error)); + #[allow(deprecated)] let sink = super::VectorSink::from_event_sink(sink); Ok((sink, healthcheck)) diff --git a/src/sinks/influxdb/logs.rs b/src/sinks/influxdb/logs.rs index 0bcd27ef35e90..96c5be07ee96f 100644 --- a/src/sinks/influxdb/logs.rs +++ b/src/sinks/influxdb/logs.rs @@ -232,6 +232,7 @@ impl SinkConfig for InfluxDbLogsConfig { ) .sink_map_err(|error| error!(message = "Fatal influxdb_logs sink error.", %error)); + #[allow(deprecated)] Ok((VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/sinks/influxdb/metrics.rs b/src/sinks/influxdb/metrics.rs index 75bde8fde1238..e8f8d281ca106 100644 --- a/src/sinks/influxdb/metrics.rs +++ b/src/sinks/influxdb/metrics.rs @@ -193,6 +193,7 @@ impl InfluxDbSvc { }) .sink_map_err(|error| error!(message = "Fatal influxdb sink error.", %error)); + #[allow(deprecated)] Ok(VectorSink::from_event_sink(sink)) } } diff --git a/src/sinks/mezmo.rs b/src/sinks/mezmo.rs index 8f741e6e26074..00a8bf09278bc 100644 --- a/src/sinks/mezmo.rs +++ b/src/sinks/mezmo.rs @@ -175,6 +175,7 @@ impl SinkConfig for MezmoConfig { let healthcheck = healthcheck(self.clone(), client).boxed(); + #[allow(deprecated)] Ok((super::VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/sinks/prometheus/remote_write.rs b/src/sinks/prometheus/remote_write.rs index 027cd19e4d8cd..6451e591f11e0 100644 --- a/src/sinks/prometheus/remote_write.rs +++ b/src/sinks/prometheus/remote_write.rs @@ -261,6 +261,7 @@ impl SinkConfig for RemoteWriteConfig { ) }; + #[allow(deprecated)] Ok((sinks::VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/sinks/redis.rs b/src/sinks/redis.rs index 611127b4641d0..5a53831bebd3c 100644 --- a/src/sinks/redis.rs +++ b/src/sinks/redis.rs @@ -235,6 +235,7 @@ impl RedisSinkConfig { }) .sink_map_err(|error| error!(message = "Sink failed to flush.", %error)); + #[allow(deprecated)] Ok(super::VectorSink::from_event_sink(sink)) } diff --git a/src/sinks/sematext/metrics.rs b/src/sinks/sematext/metrics.rs index c67898f307d59..29a51c9309638 100644 --- a/src/sinks/sematext/metrics.rs +++ b/src/sinks/sematext/metrics.rs @@ -194,6 +194,7 @@ impl SematextMetricsService { }) .sink_map_err(|error| error!(message = "Fatal sematext metrics sink error.", %error)); + #[allow(deprecated)] Ok(VectorSink::from_event_sink(sink)) } } diff --git a/src/sinks/util/adaptive_concurrency/tests.rs b/src/sinks/util/adaptive_concurrency/tests.rs index 33d735a76f187..69c52bc6ca236 100644 --- a/src/sinks/util/adaptive_concurrency/tests.rs +++ b/src/sinks/util/adaptive_concurrency/tests.rs @@ -200,6 +200,7 @@ impl SinkConfig for TestConfig { ); *self.controller_stats.lock().unwrap() = stats; + #[allow(deprecated)] Ok((VectorSink::from_event_sink(sink), healthcheck)) } diff --git a/src/test_util/mock/sinks/error.rs b/src/test_util/mock/sinks/error.rs index 4e26e9a263317..b1a662fa6694f 100644 --- a/src/test_util/mock/sinks/error.rs +++ b/src/test_util/mock/sinks/error.rs @@ -30,6 +30,7 @@ impl_generate_config_from_default!(ErrorSinkConfig); #[async_trait] impl SinkConfig for ErrorSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { + #[allow(deprecated)] Ok((VectorSink::from_event_sink(ErrorSink), ok(()).boxed())) } diff --git a/src/test_util/mock/sinks/panic.rs b/src/test_util/mock/sinks/panic.rs index 1f27c256516aa..34079fc30f4ec 100644 --- a/src/test_util/mock/sinks/panic.rs +++ b/src/test_util/mock/sinks/panic.rs @@ -30,6 +30,7 @@ impl_generate_config_from_default!(PanicSinkConfig); #[async_trait] impl SinkConfig for PanicSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { + #[allow(deprecated)] Ok((VectorSink::from_event_sink(PanicSink), ok(()).boxed())) } From 45a28f88a910c8492872773cc2e86045c8e2f4b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Drouet?= Date: Fri, 9 Jun 2023 15:28:33 +0200 Subject: [PATCH 124/236] chore(enrichment): avoid importing vector-common in enrichment module (#17653) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to use enrichment tables in `vrl-wasm` and pulling vector-common is not needed so better not using it. Signed-off-by: Jérémie Drouet --- Cargo.lock | 1 - lib/enrichment/Cargo.toml | 8 +++++--- lib/enrichment/src/find_enrichment_table_records.rs | 2 +- lib/enrichment/src/get_enrichment_table_record.rs | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d672e52addcec..ce9e6d92c79c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2904,7 +2904,6 @@ dependencies = [ "arc-swap", "chrono", "dyn-clone", - "vector-common", "vrl", ] diff --git a/lib/enrichment/Cargo.toml b/lib/enrichment/Cargo.toml index c88d81e2dfb09..4dbc835c11b12 100644 --- a/lib/enrichment/Cargo.toml +++ b/lib/enrichment/Cargo.toml @@ -7,7 +7,9 @@ publish = false [dependencies] arc-swap = { version = "1.6.0", default-features = false } -dyn-clone = { version = "1.0.11", default-features = false } chrono = { version = "0.4.19", default-features = false } -vector-common = { path = "../vector-common", default-features = false, features = [ "btreemap", "conversion", "serde" ] } -vrl = { version = "0.4.0", default-features = false, features = ["diagnostic"] } +dyn-clone = { version = "1.0.11", default-features = false } +vrl = { version = "0.4.0", default-features = false, features = [ + "compiler", + "diagnostic", +] } diff --git a/lib/enrichment/src/find_enrichment_table_records.rs b/lib/enrichment/src/find_enrichment_table_records.rs index bfe0aa7183af2..3ade47b7c55a6 100644 --- a/lib/enrichment/src/find_enrichment_table_records.rs +++ b/lib/enrichment/src/find_enrichment_table_records.rs @@ -191,9 +191,9 @@ impl FunctionExpression for FindEnrichmentTableRecordsFn { #[cfg(test)] mod tests { - use vector_common::TimeZone; use vrl::compiler::state::RuntimeState; use vrl::compiler::TargetValue; + use vrl::compiler::TimeZone; use vrl::value; use vrl::value::Secrets; diff --git a/lib/enrichment/src/get_enrichment_table_record.rs b/lib/enrichment/src/get_enrichment_table_record.rs index edaac844966c1..3678d85e4fea0 100644 --- a/lib/enrichment/src/get_enrichment_table_record.rs +++ b/lib/enrichment/src/get_enrichment_table_record.rs @@ -183,7 +183,7 @@ impl FunctionExpression for GetEnrichmentTableRecordFn { #[cfg(test)] mod tests { - use vector_common::TimeZone; + use vrl::compiler::prelude::TimeZone; use vrl::compiler::state::RuntimeState; use vrl::compiler::TargetValue; use vrl::value; From bf7d79623c0b575dd0bb6f851cc12c15cea5eb5f Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Fri, 9 Jun 2023 13:12:55 -0400 Subject: [PATCH 125/236] feat(codecs): Add lossy option to JSON deserializer (#17628) Closes: https://github.com/vectordotdev/vector/issues/16406. Adds a `decoding.json.lossy` option. --- lib/codecs/src/decoding/format/json.rs | 107 +++++++++++++++--- lib/codecs/src/decoding/format/mod.rs | 2 +- lib/codecs/src/decoding/mod.rs | 38 +++++-- src/codecs/decoding/decoder.rs | 2 +- src/components/validation/resources/mod.rs | 6 +- src/sources/datadog_agent/tests.rs | 8 +- src/sources/http_client/client.rs | 4 +- src/sources/http_client/integration_tests.rs | 28 +++-- src/sources/http_client/tests.rs | 12 +- src/sources/http_server.rs | 26 +++-- .../components/sources/base/amqp.cue | 86 ++++++++------ .../sources/base/aws_kinesis_firehose.cue | 86 ++++++++------ .../components/sources/base/aws_s3.cue | 86 ++++++++------ .../components/sources/base/aws_sqs.cue | 86 ++++++++------ .../components/sources/base/datadog_agent.cue | 86 ++++++++------ .../components/sources/base/demo_logs.cue | 86 ++++++++------ .../components/sources/base/exec.cue | 96 +++++++++------- .../sources/base/file_descriptor.cue | 96 +++++++++------- .../components/sources/base/gcp_pubsub.cue | 86 ++++++++------ .../components/sources/base/heroku_logs.cue | 86 ++++++++------ .../components/sources/base/http.cue | 84 ++++++++------ .../components/sources/base/http_client.cue | 86 ++++++++------ .../components/sources/base/http_server.cue | 84 ++++++++------ .../components/sources/base/kafka.cue | 86 ++++++++------ .../components/sources/base/nats.cue | 86 ++++++++------ .../components/sources/base/redis.cue | 86 ++++++++------ .../components/sources/base/socket.cue | 96 +++++++++------- .../components/sources/base/stdin.cue | 96 +++++++++------- 28 files changed, 1131 insertions(+), 686 deletions(-) diff --git a/lib/codecs/src/decoding/format/json.rs b/lib/codecs/src/decoding/format/json.rs index 32f28e5e58436..164d91756ff15 100644 --- a/lib/codecs/src/decoding/format/json.rs +++ b/lib/codecs/src/decoding/format/json.rs @@ -2,9 +2,11 @@ use std::convert::TryInto; use bytes::Bytes; use chrono::Utc; +use derivative::Derivative; use lookup::PathPrefix; use serde::{Deserialize, Serialize}; use smallvec::{smallvec, SmallVec}; +use vector_config::configurable_component; use vector_core::{ config::{log_schema, DataType, LogNamespace}, event::Event, @@ -16,7 +18,36 @@ use super::Deserializer; /// Config used to build a `JsonDeserializer`. #[derive(Debug, Clone, Default, Deserialize, Serialize)] -pub struct JsonDeserializerConfig; +pub struct JsonDeserializerConfig { + #[serde( + default, + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + /// Options for the JSON deserializer. + pub json: JsonDeserializerOptions, +} + +/// JSON-specific decoding options. +#[configurable_component] +#[derive(Debug, Clone, PartialEq, Eq, Derivative)] +#[derivative(Default)] +pub struct JsonDeserializerOptions { + /// Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + /// + /// When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + /// + /// [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + #[serde( + default = "default_lossy", + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + #[derivative(Default(value = "default_lossy()"))] + lossy: bool, +} + +const fn default_lossy() -> bool { + true +} impl JsonDeserializerConfig { /// Build the `JsonDeserializer` from this configuration. @@ -56,19 +87,23 @@ impl JsonDeserializerConfig { impl JsonDeserializerConfig { /// Creates a new `JsonDeserializerConfig`. - pub fn new() -> Self { - Default::default() + pub fn new(options: JsonDeserializerOptions) -> Self { + Self { json: options } } } /// Deserializer that builds `Event`s from a byte frame containing JSON. -#[derive(Debug, Clone, Default)] -pub struct JsonDeserializer; +#[derive(Debug, Clone, Derivative)] +#[derivative(Default)] +pub struct JsonDeserializer { + #[derivative(Default(value = "default_lossy()"))] + lossy: bool, +} impl JsonDeserializer { /// Creates a new `JsonDeserializer`. - pub fn new() -> Self { - Default::default() + pub fn new(lossy: bool) -> Self { + Self { lossy } } } @@ -84,8 +119,11 @@ impl Deserializer for JsonDeserializer { return Ok(smallvec![]); } - let json: serde_json::Value = serde_json::from_slice(&bytes) - .map_err(|error| format!("Error parsing JSON: {:?}", error))?; + let json: serde_json::Value = match self.lossy { + true => serde_json::from_str(&String::from_utf8_lossy(&bytes)), + false => serde_json::from_slice(&bytes), + } + .map_err(|error| format!("Error parsing JSON: {:?}", error))?; // If the root is an Array, split it into multiple events let mut events = match json { @@ -119,8 +157,10 @@ impl Deserializer for JsonDeserializer { } impl From<&JsonDeserializerConfig> for JsonDeserializer { - fn from(_: &JsonDeserializerConfig) -> Self { - Self + fn from(config: &JsonDeserializerConfig) -> Self { + Self { + lossy: config.json.lossy, + } } } @@ -133,7 +173,7 @@ mod tests { #[test] fn deserialize_json() { let input = Bytes::from(r#"{ "foo": 123 }"#); - let deserializer = JsonDeserializer::new(); + let deserializer = JsonDeserializer::default(); for namespace in [LogNamespace::Legacy, LogNamespace::Vector] { let events = deserializer.parse(input.clone(), namespace).unwrap(); @@ -160,7 +200,7 @@ mod tests { #[test] fn deserialize_json_array() { let input = Bytes::from(r#"[{ "foo": 123 }, { "bar": 456 }]"#); - let deserializer = JsonDeserializer::new(); + let deserializer = JsonDeserializer::default(); for namespace in [LogNamespace::Legacy, LogNamespace::Vector] { let events = deserializer.parse(input.clone(), namespace).unwrap(); let mut events = events.into_iter(); @@ -197,7 +237,7 @@ mod tests { #[test] fn deserialize_skip_empty() { let input = Bytes::from(""); - let deserializer = JsonDeserializer::new(); + let deserializer = JsonDeserializer::default(); for namespace in [LogNamespace::Legacy, LogNamespace::Vector] { let events = deserializer.parse(input.clone(), namespace).unwrap(); @@ -208,7 +248,44 @@ mod tests { #[test] fn deserialize_error_invalid_json() { let input = Bytes::from("{ foo"); - let deserializer = JsonDeserializer::new(); + let deserializer = JsonDeserializer::default(); + + for namespace in [LogNamespace::Legacy, LogNamespace::Vector] { + assert!(deserializer.parse(input.clone(), namespace).is_err()); + } + } + + #[test] + fn deserialize_lossy_replace_invalid_utf8() { + let input = Bytes::from(b"{ \"foo\": \"Hello \xF0\x90\x80World\" }".as_slice()); + let deserializer = JsonDeserializer::new(true); + + for namespace in [LogNamespace::Legacy, LogNamespace::Vector] { + let events = deserializer.parse(input.clone(), namespace).unwrap(); + let mut events = events.into_iter(); + + { + let event = events.next().unwrap(); + let log = event.as_log(); + assert_eq!(log["foo"], b"Hello \xEF\xBF\xBDWorld".into()); + assert_eq!( + log.get(( + lookup::PathPrefix::Event, + log_schema().timestamp_key().unwrap() + )) + .is_some(), + namespace == LogNamespace::Legacy + ); + } + + assert_eq!(events.next(), None); + } + } + + #[test] + fn deserialize_non_lossy_error_invalid_utf8() { + let input = Bytes::from(b"{ \"foo\": \"Hello \xF0\x90\x80World\" }".as_slice()); + let deserializer = JsonDeserializer::new(false); for namespace in [LogNamespace::Legacy, LogNamespace::Vector] { assert!(deserializer.parse(input.clone(), namespace).is_err()); diff --git a/lib/codecs/src/decoding/format/mod.rs b/lib/codecs/src/decoding/format/mod.rs index 8ac77f0a5fe06..d24faefb85382 100644 --- a/lib/codecs/src/decoding/format/mod.rs +++ b/lib/codecs/src/decoding/format/mod.rs @@ -14,7 +14,7 @@ mod syslog; use ::bytes::Bytes; use dyn_clone::DynClone; pub use gelf::{GelfDeserializer, GelfDeserializerConfig}; -pub use json::{JsonDeserializer, JsonDeserializerConfig}; +pub use json::{JsonDeserializer, JsonDeserializerConfig, JsonDeserializerOptions}; pub use native::{NativeDeserializer, NativeDeserializerConfig}; pub use native_json::{NativeJsonDeserializer, NativeJsonDeserializerConfig}; use smallvec::SmallVec; diff --git a/lib/codecs/src/decoding/mod.rs b/lib/codecs/src/decoding/mod.rs index 6501c5e3ceb6d..1fbd05a1ef2e2 100644 --- a/lib/codecs/src/decoding/mod.rs +++ b/lib/codecs/src/decoding/mod.rs @@ -9,8 +9,9 @@ use bytes::{Bytes, BytesMut}; pub use error::StreamDecodingError; pub use format::{ BoxedDeserializer, BytesDeserializer, BytesDeserializerConfig, GelfDeserializer, - GelfDeserializerConfig, JsonDeserializer, JsonDeserializerConfig, NativeDeserializer, - NativeDeserializerConfig, NativeJsonDeserializer, NativeJsonDeserializerConfig, + GelfDeserializerConfig, JsonDeserializer, JsonDeserializerConfig, JsonDeserializerOptions, + NativeDeserializer, NativeDeserializerConfig, NativeJsonDeserializer, + NativeJsonDeserializerConfig, }; #[cfg(feature = "syslog")] pub use format::{SyslogDeserializer, SyslogDeserializerConfig}; @@ -243,7 +244,14 @@ pub enum DeserializerConfig { /// Decodes the raw bytes as [JSON][json]. /// /// [json]: https://www.json.org/ - Json, + Json { + /// Options for the JSON deserializer. + #[serde( + default, + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + json: JsonDeserializerOptions, + }, #[cfg(feature = "syslog")] /// Decodes the raw bytes as a Syslog message. @@ -284,8 +292,8 @@ impl From for DeserializerConfig { } impl From for DeserializerConfig { - fn from(_: JsonDeserializerConfig) -> Self { - Self::Json + fn from(config: JsonDeserializerConfig) -> Self { + Self::Json { json: config.json } } } @@ -307,7 +315,9 @@ impl DeserializerConfig { pub fn build(&self) -> Deserializer { match self { DeserializerConfig::Bytes => Deserializer::Bytes(BytesDeserializerConfig.build()), - DeserializerConfig::Json => Deserializer::Json(JsonDeserializerConfig.build()), + DeserializerConfig::Json { json } => { + Deserializer::Json(JsonDeserializerConfig::new(json.clone()).build()) + } #[cfg(feature = "syslog")] DeserializerConfig::Syslog => { Deserializer::Syslog(SyslogDeserializerConfig::default().build()) @@ -325,7 +335,7 @@ impl DeserializerConfig { match self { DeserializerConfig::Native => FramingConfig::LengthDelimited, DeserializerConfig::Bytes - | DeserializerConfig::Json + | DeserializerConfig::Json { .. } | DeserializerConfig::Gelf | DeserializerConfig::NativeJson => FramingConfig::NewlineDelimited { newline_delimited: Default::default(), @@ -341,7 +351,9 @@ impl DeserializerConfig { pub fn output_type(&self) -> DataType { match self { DeserializerConfig::Bytes => BytesDeserializerConfig.output_type(), - DeserializerConfig::Json => JsonDeserializerConfig.output_type(), + DeserializerConfig::Json { json } => { + JsonDeserializerConfig::new(json.clone()).output_type() + } #[cfg(feature = "syslog")] DeserializerConfig::Syslog => SyslogDeserializerConfig::default().output_type(), DeserializerConfig::Native => NativeDeserializerConfig.output_type(), @@ -354,7 +366,9 @@ impl DeserializerConfig { pub fn schema_definition(&self, log_namespace: LogNamespace) -> schema::Definition { match self { DeserializerConfig::Bytes => BytesDeserializerConfig.schema_definition(log_namespace), - DeserializerConfig::Json => JsonDeserializerConfig.schema_definition(log_namespace), + DeserializerConfig::Json { json } => { + JsonDeserializerConfig::new(json.clone()).schema_definition(log_namespace) + } #[cfg(feature = "syslog")] DeserializerConfig::Syslog => { SyslogDeserializerConfig::default().schema_definition(log_namespace) @@ -371,12 +385,12 @@ impl DeserializerConfig { pub const fn content_type(&self, framer: &FramingConfig) -> &'static str { match (&self, framer) { ( - DeserializerConfig::Json | DeserializerConfig::NativeJson, + DeserializerConfig::Json { .. } | DeserializerConfig::NativeJson, FramingConfig::NewlineDelimited { .. }, ) => "application/x-ndjson", ( DeserializerConfig::Gelf - | DeserializerConfig::Json + | DeserializerConfig::Json { .. } | DeserializerConfig::NativeJson, FramingConfig::CharacterDelimited { character_delimited: @@ -388,7 +402,7 @@ impl DeserializerConfig { ) => "application/json", (DeserializerConfig::Native, _) => "application/octet-stream", ( - DeserializerConfig::Json + DeserializerConfig::Json { .. } | DeserializerConfig::NativeJson | DeserializerConfig::Bytes | DeserializerConfig::Gelf, diff --git a/src/codecs/decoding/decoder.rs b/src/codecs/decoding/decoder.rs index 6f212f450dc83..ecf19c17d715e 100644 --- a/src/codecs/decoding/decoder.rs +++ b/src/codecs/decoding/decoder.rs @@ -122,7 +122,7 @@ mod tests { let reader = StreamReader::new(stream); let decoder = Decoder::new( Framer::NewlineDelimited(NewlineDelimitedDecoder::new()), - Deserializer::Json(JsonDeserializer::new()), + Deserializer::Json(JsonDeserializer::default()), ); let mut stream = FramedRead::new(reader, decoder); diff --git a/src/components/validation/resources/mod.rs b/src/components/validation/resources/mod.rs index b6427bd7f6ab5..bbe80843753a5 100644 --- a/src/components/validation/resources/mod.rs +++ b/src/components/validation/resources/mod.rs @@ -141,7 +141,7 @@ fn deserializer_config_to_serializer(config: &DeserializerConfig) -> encoding::S // "bytes" can be a top-level field and we aren't implicitly decoding everything into the // `message` field... but it's close enough for now. DeserializerConfig::Bytes => SerializerConfig::Text(TextSerializerConfig::default()), - DeserializerConfig::Json => SerializerConfig::Json(JsonSerializerConfig::default()), + DeserializerConfig::Json { .. } => SerializerConfig::Json(JsonSerializerConfig::default()), // TODO: We need to create an Avro serializer because, certainly, for any source decoding // the data as Avro, we can't possibly send anything else without the source just // immediately barfing. @@ -184,7 +184,9 @@ fn serializer_config_to_deserializer(config: &SerializerConfig) -> decoding::Des SerializerConfig::Avro { .. } => todo!(), SerializerConfig::Csv { .. } => todo!(), SerializerConfig::Gelf => DeserializerConfig::Gelf, - SerializerConfig::Json(_) => DeserializerConfig::Json, + SerializerConfig::Json(_) => DeserializerConfig::Json { + json: Default::default(), + }, SerializerConfig::Logfmt => todo!(), SerializerConfig::Native => DeserializerConfig::Native, SerializerConfig::NativeJson => DeserializerConfig::NativeJson, diff --git a/src/sources/datadog_agent/tests.rs b/src/sources/datadog_agent/tests.rs index 4508082573d6c..bd7d521b43e5e 100644 --- a/src/sources/datadog_agent/tests.rs +++ b/src/sources/datadog_agent/tests.rs @@ -1595,7 +1595,9 @@ fn test_config_outputs() { ( "json / single output", TestCase { - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, multiple_outputs: false, want: HashMap::from([( None, @@ -1620,7 +1622,9 @@ fn test_config_outputs() { ( "json / multiple output", TestCase { - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, multiple_outputs: true, want: HashMap::from([ ( diff --git a/src/sources/http_client/client.rs b/src/sources/http_client/client.rs index 6732a4975aad7..0b8ba25dace35 100644 --- a/src/sources/http_client/client.rs +++ b/src/sources/http_client/client.rs @@ -235,7 +235,9 @@ impl ValidatableComponent for HttpClientConfig { let config = Self { endpoint: uri.to_string(), interval: Duration::from_secs(1), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, ..Default::default() }; diff --git a/src/sources/http_client/integration_tests.rs b/src/sources/http_client/integration_tests.rs index ec7b9f11d71fc..d49b7f92111fd 100644 --- a/src/sources/http_client/integration_tests.rs +++ b/src/sources/http_client/integration_tests.rs @@ -96,7 +96,9 @@ async fn collected_logs_json() { endpoint: format!("{}/logs/json.json", dufs_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -173,7 +175,9 @@ async fn unauthorized_no_auth() { endpoint: format!("{}/logs/json.json", dufs_auth_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -191,7 +195,9 @@ async fn unauthorized_wrong_auth() { endpoint: format!("{}/logs/json.json", dufs_auth_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -212,7 +218,9 @@ async fn authorized() { endpoint: format!("{}/logs/json.json", dufs_auth_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -233,7 +241,9 @@ async fn tls_invalid_ca() { endpoint: format!("{}/logs/json.json", dufs_https_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -254,7 +264,9 @@ async fn tls_valid() { endpoint: format!("{}/logs/json.json", dufs_https_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -276,7 +288,9 @@ async fn shutdown() { endpoint: format!("{}/logs/json.json", dufs_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, diff --git a/src/sources/http_client/tests.rs b/src/sources/http_client/tests.rs index 8676dc5678cd4..bf1e6b007511a 100644 --- a/src/sources/http_client/tests.rs +++ b/src/sources/http_client/tests.rs @@ -78,7 +78,9 @@ async fn json_decoding_newline_delimited() { endpoint: format!("http://{}/endpoint", in_addr), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, framing: FramingConfig::NewlineDelimited { newline_delimited: NewlineDelimitedDecoderOptions::default(), }, @@ -108,7 +110,9 @@ async fn json_decoding_character_delimited() { endpoint: format!("http://{}/endpoint", in_addr), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, framing: FramingConfig::CharacterDelimited { character_delimited: CharacterDelimitedDecoderOptions { delimiter: b',', @@ -146,7 +150,9 @@ async fn request_query_applied() { vec!["val1".to_string(), "val2".to_string()], ), ]), - decoding: DeserializerConfig::Json, + decoding: DeserializerConfig::Json { + json: Default::default(), + }, framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, diff --git a/src/sources/http_server.rs b/src/sources/http_server.rs index c4730fe7fee5b..50f9b3fd0bd32 100644 --- a/src/sources/http_server.rs +++ b/src/sources/http_server.rs @@ -202,11 +202,11 @@ impl SimpleHttpConfig { ), Encoding::Json => ( BytesDecoderConfig::new().into(), - JsonDeserializerConfig::new().into(), + JsonDeserializerConfig::default().into(), ), Encoding::Ndjson => ( NewlineDelimitedDecoderConfig::new().into(), - JsonDeserializerConfig::new().into(), + JsonDeserializerConfig::default().into(), ), Encoding::Binary => ( BytesDecoderConfig::new().into(), @@ -256,7 +256,9 @@ impl_generate_config_from_default!(SimpleHttpConfig); impl ValidatableComponent for SimpleHttpConfig { fn validation_configuration() -> ValidationConfiguration { let config = Self { - decoding: Some(DeserializerConfig::Json), + decoding: Some(DeserializerConfig::Json { + json: Default::default(), + }), ..Default::default() }; @@ -760,7 +762,7 @@ mod tests { EventStatus::Delivered, true, None, - Some(JsonDeserializerConfig::new().into()), + Some(JsonDeserializerConfig::default().into()), ) .await; @@ -810,7 +812,7 @@ mod tests { EventStatus::Delivered, true, None, - Some(JsonDeserializerConfig::new().into()), + Some(JsonDeserializerConfig::default().into()), ) .await; @@ -853,7 +855,7 @@ mod tests { EventStatus::Delivered, true, None, - Some(JsonDeserializerConfig::new().into()), + Some(JsonDeserializerConfig::default().into()), ) .await; @@ -902,7 +904,7 @@ mod tests { EventStatus::Delivered, true, None, - Some(JsonDeserializerConfig::new().into()), + Some(JsonDeserializerConfig::default().into()), ) .await; @@ -986,7 +988,7 @@ mod tests { EventStatus::Delivered, true, None, - Some(JsonDeserializerConfig::new().into()), + Some(JsonDeserializerConfig::default().into()), ) .await; @@ -1027,7 +1029,7 @@ mod tests { EventStatus::Delivered, true, None, - Some(JsonDeserializerConfig::new().into()), + Some(JsonDeserializerConfig::default().into()), ) .await; @@ -1106,7 +1108,7 @@ mod tests { EventStatus::Delivered, true, None, - Some(JsonDeserializerConfig::new().into()), + Some(JsonDeserializerConfig::default().into()), ) .await; @@ -1150,7 +1152,7 @@ mod tests { EventStatus::Delivered, true, None, - Some(JsonDeserializerConfig::new().into()), + Some(JsonDeserializerConfig::default().into()), ) .await; @@ -1219,7 +1221,7 @@ mod tests { EventStatus::Delivered, true, None, - Some(JsonDeserializerConfig::new().into()), + Some(JsonDeserializerConfig::default().into()), ) .await; diff --git a/website/cue/reference/components/sources/base/amqp.cue b/website/cue/reference/components/sources/base/amqp.cue index bf487fc82b63c..404b600a86097 100644 --- a/website/cue/reference/components/sources/base/amqp.cue +++ b/website/cue/reference/components/sources/base/amqp.cue @@ -49,48 +49,66 @@ base: components: sources: amqp: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - This codec is **[experimental][experimental]**. + This codec is **[experimental][experimental]**. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue b/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue index 998223c93dc03..73b2de60e2050 100644 --- a/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue @@ -52,48 +52,66 @@ base: components: sources: aws_kinesis_firehose: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - This codec is **[experimental][experimental]**. + This codec is **[experimental][experimental]**. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/aws_s3.cue b/website/cue/reference/components/sources/base/aws_s3.cue index 8311de4c71911..03589426d8a57 100644 --- a/website/cue/reference/components/sources/base/aws_s3.cue +++ b/website/cue/reference/components/sources/base/aws_s3.cue @@ -138,48 +138,66 @@ base: components: sources: aws_s3: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. - This codec is **[experimental][experimental]**. + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + This codec is **[experimental][experimental]**. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/aws_sqs.cue b/website/cue/reference/components/sources/base/aws_sqs.cue index 52cd9c2e42729..b2d469c8db6a5 100644 --- a/website/cue/reference/components/sources/base/aws_sqs.cue +++ b/website/cue/reference/components/sources/base/aws_sqs.cue @@ -133,48 +133,66 @@ base: components: sources: aws_sqs: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - This codec is **[experimental][experimental]**. + This codec is **[experimental][experimental]**. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/datadog_agent.cue b/website/cue/reference/components/sources/base/datadog_agent.cue index 33d7ee2302186..b356e18ac169c 100644 --- a/website/cue/reference/components/sources/base/datadog_agent.cue +++ b/website/cue/reference/components/sources/base/datadog_agent.cue @@ -34,48 +34,66 @@ base: components: sources: datadog_agent: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - This codec is **[experimental][experimental]**. + This codec is **[experimental][experimental]**. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/demo_logs.cue b/website/cue/reference/components/sources/base/demo_logs.cue index 534afb0a437de..d75717f71c32d 100644 --- a/website/cue/reference/components/sources/base/demo_logs.cue +++ b/website/cue/reference/components/sources/base/demo_logs.cue @@ -13,48 +13,66 @@ base: components: sources: demo_logs: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. - This codec is **[experimental][experimental]**. + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + This codec is **[experimental][experimental]**. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/exec.cue b/website/cue/reference/components/sources/base/exec.cue index 06c7c09fadb96..a182abfc51e2b 100644 --- a/website/cue/reference/components/sources/base/exec.cue +++ b/website/cue/reference/components/sources/base/exec.cue @@ -9,48 +9,66 @@ base: components: sources: exec: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. - - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - - This codec is **[experimental][experimental]**. - - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - - This codec is **[experimental][experimental]**. - - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. + + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. + + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + + This codec is **[experimental][experimental]**. + + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/file_descriptor.cue b/website/cue/reference/components/sources/base/file_descriptor.cue index 3d0be84a25207..6bbbda11ed427 100644 --- a/website/cue/reference/components/sources/base/file_descriptor.cue +++ b/website/cue/reference/components/sources/base/file_descriptor.cue @@ -4,48 +4,66 @@ base: components: sources: file_descriptor: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. - - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - - This codec is **[experimental][experimental]**. - - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - - This codec is **[experimental][experimental]**. - - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. + + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. + + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + + This codec is **[experimental][experimental]**. + + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/gcp_pubsub.cue b/website/cue/reference/components/sources/base/gcp_pubsub.cue index afd10831e91a7..f5b03561cce9e 100644 --- a/website/cue/reference/components/sources/base/gcp_pubsub.cue +++ b/website/cue/reference/components/sources/base/gcp_pubsub.cue @@ -80,48 +80,66 @@ base: components: sources: gcp_pubsub: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - This codec is **[experimental][experimental]**. + This codec is **[experimental][experimental]**. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/heroku_logs.cue b/website/cue/reference/components/sources/base/heroku_logs.cue index 3bb613492ab97..e42f80fc2c476 100644 --- a/website/cue/reference/components/sources/base/heroku_logs.cue +++ b/website/cue/reference/components/sources/base/heroku_logs.cue @@ -46,48 +46,66 @@ base: components: sources: heroku_logs: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - This codec is **[experimental][experimental]**. + This codec is **[experimental][experimental]**. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/http.cue b/website/cue/reference/components/sources/base/http.cue index c48d01eb2a4a9..12a95c494068e 100644 --- a/website/cue/reference/components/sources/base/http.cue +++ b/website/cue/reference/components/sources/base/http.cue @@ -50,46 +50,64 @@ base: components: sources: http: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: true - type: string: enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: true + type: string: enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. - This codec is **[experimental][experimental]**. + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + This codec is **[experimental][experimental]**. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt - """ + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } } } } diff --git a/website/cue/reference/components/sources/base/http_client.cue b/website/cue/reference/components/sources/base/http_client.cue index 13577f858e80e..25209b31e2843 100644 --- a/website/cue/reference/components/sources/base/http_client.cue +++ b/website/cue/reference/components/sources/base/http_client.cue @@ -46,48 +46,66 @@ base: components: sources: http_client: configuration: { decoding: { description: "Decoder to use on the HTTP responses." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. - This codec is **[experimental][experimental]**. + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + This codec is **[experimental][experimental]**. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/http_server.cue b/website/cue/reference/components/sources/base/http_server.cue index 6558067f7c8f6..9edad734be947 100644 --- a/website/cue/reference/components/sources/base/http_server.cue +++ b/website/cue/reference/components/sources/base/http_server.cue @@ -50,46 +50,64 @@ base: components: sources: http_server: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: true - type: string: enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: true + type: string: enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. - This codec is **[experimental][experimental]**. + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + This codec is **[experimental][experimental]**. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt - """ + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } } } } diff --git a/website/cue/reference/components/sources/base/kafka.cue b/website/cue/reference/components/sources/base/kafka.cue index 1c03c2e568393..30fee8d24567b 100644 --- a/website/cue/reference/components/sources/base/kafka.cue +++ b/website/cue/reference/components/sources/base/kafka.cue @@ -58,48 +58,66 @@ base: components: sources: kafka: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - This codec is **[experimental][experimental]**. + This codec is **[experimental][experimental]**. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/nats.cue b/website/cue/reference/components/sources/base/nats.cue index ca42dfb333f28..5e232b607a9e8 100644 --- a/website/cue/reference/components/sources/base/nats.cue +++ b/website/cue/reference/components/sources/base/nats.cue @@ -101,48 +101,66 @@ base: components: sources: nats: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. - This codec is **[experimental][experimental]**. + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + This codec is **[experimental][experimental]**. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/redis.cue b/website/cue/reference/components/sources/base/redis.cue index 03cf03f45ef65..0336931a1bfcd 100644 --- a/website/cue/reference/components/sources/base/redis.cue +++ b/website/cue/reference/components/sources/base/redis.cue @@ -19,48 +19,66 @@ base: components: sources: redis: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. - This codec is **[experimental][experimental]**. + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + This codec is **[experimental][experimental]**. - This codec is **[experimental][experimental]**. + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + This codec is **[experimental][experimental]**. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/socket.cue b/website/cue/reference/components/sources/base/socket.cue index f6236c0d1c94e..1c57034f87f8f 100644 --- a/website/cue/reference/components/sources/base/socket.cue +++ b/website/cue/reference/components/sources/base/socket.cue @@ -21,48 +21,66 @@ base: components: sources: socket: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. - - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - - This codec is **[experimental][experimental]**. - - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - - This codec is **[experimental][experimental]**. - - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. + + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. + + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + + This codec is **[experimental][experimental]**. + + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } diff --git a/website/cue/reference/components/sources/base/stdin.cue b/website/cue/reference/components/sources/base/stdin.cue index 4e5d9aef77028..7682e32ebecba 100644 --- a/website/cue/reference/components/sources/base/stdin.cue +++ b/website/cue/reference/components/sources/base/stdin.cue @@ -4,48 +4,66 @@ base: components: sources: stdin: configuration: { decoding: { description: "Configures how events are decoded from raw bytes." required: false - type: object: options: codec: { - description: "The codec to use for decoding events." - required: false - type: string: { - default: "bytes" - enum: { - bytes: "Uses the raw bytes as-is." - gelf: """ - Decodes the raw bytes as a [GELF][gelf] message. - - [gelf]: https://docs.graylog.org/docs/gelf - """ - json: """ - Decodes the raw bytes as [JSON][json]. - - [json]: https://www.json.org/ - """ - native: """ - Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. - - This codec is **[experimental][experimental]**. - - [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - native_json: """ - Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. - - This codec is **[experimental][experimental]**. - - [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue - [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - """ - syslog: """ - Decodes the raw bytes as a Syslog message. + type: object: options: { + codec: { + description: "The codec to use for decoding events." + required: false + type: string: { + default: "bytes" + enum: { + bytes: "Uses the raw bytes as-is." + gelf: """ + Decodes the raw bytes as a [GELF][gelf] message. + + [gelf]: https://docs.graylog.org/docs/gelf + """ + json: """ + Decodes the raw bytes as [JSON][json]. + + [json]: https://www.json.org/ + """ + native: """ + Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. + + This codec is **[experimental][experimental]**. + + [vector_native_protobuf]: https://github.com/vectordotdev/vector/blob/master/lib/vector-core/proto/event.proto + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + native_json: """ + Decodes the raw bytes as Vector’s [native JSON format][vector_native_json]. + + This codec is **[experimental][experimental]**. + + [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue + [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs + """ + syslog: """ + Decodes the raw bytes as a Syslog message. + + Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the + [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + + [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt + [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + """ + } + } + } + json: { + description: "Options for the JSON deserializer." + relevant_when: "codec = \"json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - Decodes either as the [RFC 3164][rfc3164]-style format ("old" style) or the - [RFC 5424][rfc5424]-style format ("new" style, includes structured data). + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt - [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character """ + required: false + type: bool: default: true } } } From cb9a3a548877b222afb14159393b8bc7bc3f8518 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Jun 2023 12:20:50 -0600 Subject: [PATCH 126/236] chore(ci): Bump docker/build-push-action from 4.0.0 to 4.1.0 (#17656) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4.0.0 to 4.1.0.
Release notes

Sourced from docker/build-push-action's releases.

v4.1.0

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.0.0...v4.1.0

Commits
  • 44ea916 Merge pull request #875 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • 0167eef update generated content
  • 91bf8bf chore(deps): Bump @​docker/actions-toolkit from 0.2.0 to 0.3.0
  • a799b4d Merge pull request #860 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • 87480bd update generated content
  • f9efed5 Merge pull request #871 from dvdksn/fix/secret-example-link
  • 3580b78 fix: broken link to secret example docs
  • 91df6b8 Merge pull request #859 from docker/dependabot/github_actions/docker/bake-act...
  • ea92b18 chore(deps): Bump @​docker/actions-toolkit from 0.1.0 to 0.2.0
  • 6f91eb3 chore(deps): Bump docker/bake-action from 2 to 3
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/build-push-action&package-manager=github_actions&previous-version=4.0.0&new-version=4.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- .github/workflows/regression.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index c02c042f3b03e..9379bfd404ebd 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -63,7 +63,7 @@ jobs: org.opencontainers.image.title=Vector development environment org.opencontainers.image.url=https://github.com/vectordotdev/vector - name: Build and push - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v4.1.0 with: context: . file: ./scripts/environment/Dockerfile diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 81e1f99fce1bf..fb711449f609b 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -298,7 +298,7 @@ jobs: uses: docker/setup-buildx-action@v2.6.0 - name: Build 'vector' target image - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v4.1.0 with: context: baseline-vector/ cache-from: type=gha @@ -335,7 +335,7 @@ jobs: uses: docker/setup-buildx-action@v2.6.0 - name: Build 'vector' target image - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v4.1.0 with: context: comparison-vector/ cache-from: type=gha From e1b335748ef3b1345db9f5b9af11b5df2f24868a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 11:02:27 +0000 Subject: [PATCH 127/236] chore(deps): Bump log from 0.4.18 to 0.4.19 (#17662) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [log](https://github.com/rust-lang/log) from 0.4.18 to 0.4.19.
Changelog

Sourced from log's changelog.

[0.4.19] - 2023-06-10

Commits
  • 84ddc30 Merge pull request #552 from rust-lang/cargo/0.4.18
  • 9ae986d Merge pull request #548 from iorust/master
  • 5322e56 update changelog to reflect actual changes
  • db9e5cc Add the structured-logger crate to the README
  • 92e83c0 fixes for unrevert
  • 54d48fb prepare for 0.4.19 release
  • 3c8473d Revert "Revert "Remove build.rs file""
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=log&package-manager=cargo&previous-version=0.4.18&new-version=0.4.19)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- vdev/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce9e6d92c79c3..e8219466e8a36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4683,9 +4683,9 @@ checksum = "8166fbddef141acbea89cf3425ed97d4c22d14a68161977fc01c301175a4fb89" [[package]] name = "log" -version = "0.4.18" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de" +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "logfmt" diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 23ab8ab123fda..5b53649a38280 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -24,7 +24,7 @@ hashlink = { version = "0.8.2", features = ["serde_impl"] } hex = "0.4.3" indicatif = { version = "0.17.5", features = ["improved_unicode"] } itertools = "0.10.5" -log = "0.4.18" +log = "0.4.19" once_cell = "1.18" os_info = { version = "3.7.0", default-features = false } # watch https://github.com/epage/anstyle for official interop with Clap From 37a662a9c2e388dc1699f90288c5d856381d15d4 Mon Sep 17 00:00:00 2001 From: Toby Lawrence Date: Tue, 13 Jun 2023 07:26:02 -0400 Subject: [PATCH 128/236] fix(buffers): deadlock when seeking after entire write fails to be flushed (#17657) ## Context In #17644, a user reported disk buffers getting stuck in an infinite loop, and thus deadlocking, when restarting after a crash. They provided some very useful debug information, going as far to evaluate, add some logging, and get some values of internal state for the reader. When a disk buffer is initialized -- either for the first time or after Vector is restarted and the buffer must resume where it left off -- both the reader and writer perform a catch-up phase. For the writer, it checks the current data file and tries to figure out if the last record written matches where it believes it left off. For the reader, it actually has to dynamically seek to where it left off within the the given data file, since we can't just open the file and start from the beginning: data files are append-only. As part of the seek logic, there's a loop where we just call `Reader::next` until we read the record we supposedly left off on, and then we know we're caught up. This loop only breaks on two conditions: - `self.last_reader_record_id < ledger_last`, which implies we haven't yet read the last record we left off on (otherwise it would be equal to `ledger_last`) - `maybe_record.is_none() && self.last_reader_record_id == 0`, which would tell us that we reached EOF on the data file (no more records) but nothing was in the file (`last_reader_record_id` still being 0) While the first conditional is correct, the second one is not. The user that originally reported the issue [said as much](https://github.com/vectordotdev/vector/issues/17644#issuecomment-1584256140), but dropping the `&& self.last_reader_record_id == 0` fixes the issue. In this case, there can exist a scenario where Vector crashes and writes that the reader had read and acknowledged never actually make it to disk. Both the reader/writer are able to outpace the data on disk because the reader can read yet-to-be-flushed records since they exist as dirty pages in the page cache. When this happens, the reader may have indicated to the ledger that it, for example, has read up to record ID 10 while the last record _on disk_ when Vector starts up is record ID 5. When the seek logic runs, it knows the last read record ID was 10. It will do some number of reads while seeking, eventually reading record ID 5, and updating `self.last_reader_record_id` accordingly. On the next iteration of the loop, it tries to read but hits EOF: the data file indeed has nothing left. However, `self.last_reader_record_id < ledger_last` is still true while `maybe_record.is_none() && self.last_reader_record_id == 0` is not, as `self.last_reader_record_id` is set to `5`. Alas, deadlock. ## Solution The solution is painfully simple, and the user that originally reported the issue [said as much](https://github.com/vectordotdev/vector/issues/17644#issuecomment-1584256140): drop `&& self.last_reader_record_id == 0`. Given the loop's own condition, the inner check for `self.last_reader_record_id == 0` was redundant... but obviously also logically incorrect, too, in the case where we had missing writes. I'm still not entirely sure how existing tests didn't already catch this, but it was easy enough to spot the error once I knew where to look, and the resulting unit test I added convincingly showed that it was broken, and after making the change, indeed fixed. ## Reviewer Note(s) I added two unit tests: one for the fix as shown and one for what I thought was another bug. Turns out that the "other bug" wasn't a bug, and this unit test isn't _explicitly_ required, but it's a simple variation of other tests with a more straightforward invariant that it tries to demonstrate, so I just left it in. Fixes #17644. --- .../src/variants/disk_v2/reader.rs | 8 +- .../variants/disk_v2/tests/initialization.rs | 95 +++++++++++++++ .../src/variants/disk_v2/tests/invariants.rs | 110 +++++++++++++++++- .../src/variants/disk_v2/tests/mod.rs | 18 +++ 4 files changed, 224 insertions(+), 7 deletions(-) diff --git a/lib/vector-buffers/src/variants/disk_v2/reader.rs b/lib/vector-buffers/src/variants/disk_v2/reader.rs index 2587160f3beca..7d4da1faafa1e 100644 --- a/lib/vector-buffers/src/variants/disk_v2/reader.rs +++ b/lib/vector-buffers/src/variants/disk_v2/reader.rs @@ -901,12 +901,8 @@ where while self.last_reader_record_id < ledger_last { match self.next().await { Ok(maybe_record) => { - if maybe_record.is_none() && self.last_reader_record_id == 0 { - // We've hit a point where there's no more data to read. If our "last reader record - // ID" hasn't moved at all, that means the buffer was already empty and we're caught - // up, so we just pin ourselves to where the ledger says we left off, and we're good - // to go. - self.last_reader_record_id = ledger_last; + if maybe_record.is_none() { + // We've hit the end of the current data file so we've gone as far as we can. break; } } diff --git a/lib/vector-buffers/src/variants/disk_v2/tests/initialization.rs b/lib/vector-buffers/src/variants/disk_v2/tests/initialization.rs index 8c44937563170..217057d3e0c7e 100644 --- a/lib/vector-buffers/src/variants/disk_v2/tests/initialization.rs +++ b/lib/vector-buffers/src/variants/disk_v2/tests/initialization.rs @@ -87,3 +87,98 @@ async fn reader_doesnt_block_from_partial_write_on_last_record() { let parent = trace_span!("reader_doesnt_block_from_partial_write_on_last_record"); fut.instrument(parent.or_current()).await; } + +#[tokio::test] +async fn reader_doesnt_block_when_ahead_of_last_record_in_current_data_file() { + // When initializing, the reader will be catching up to the last record it read, which involves + // reading individual records in the current reader data file until a record is returned whose + // record ID matches the "last record ID read" field from the ledger. + // + // If the current data file contains a valid last record when we initialize, but that last + // record is _behind_ the last record read as tracked by the ledger, then we need to ensure we + // can break out of the catch-up loop when we get to the end of the current data file. + // + // Our existing logic for corrupted event detection, and the writer's own initialization logic, + // will emit an error message when we realize that data is missing based on record ID gaps. + let _a = install_tracing_helpers(); + + let fut = with_temp_dir(|dir| { + let data_dir = dir.to_path_buf(); + + async move { + // Create a regular buffer, no customizations required. + let (mut writer, mut reader, ledger) = create_default_buffer_v2(data_dir.clone()).await; + + // Write two records, and then read and acknowledge both. + // + // This puts the buffer into a state where there's data in the current data file, and + // the ledger has a non-zero record ID for where it thinks the reader needs to be. This + // ensures that the reader actually does at least two calls to `Reader::next` during + // `Reader::seek_to_next_record`, which is necessary to ensure that the reader leaves + // the default state of `self.last_reader_record_id == 0`. + let first_bytes_written = writer + .write_record(SizedRecord::new(64)) + .await + .expect("should not fail to write"); + writer.flush().await.expect("flush should not fail"); + + let second_bytes_written = writer + .write_record(SizedRecord::new(68)) + .await + .expect("should not fail to write"); + writer.flush().await.expect("flush should not fail"); + + writer.close(); + + let first_read = reader + .next() + .await + .expect("should not fail to read record") + .expect("should contain first record"); + assert_eq!(SizedRecord::new(64), first_read); + acknowledge(first_read).await; + + let second_read = reader + .next() + .await + .expect("should not fail to read record") + .expect("should contain first record"); + assert_eq!(SizedRecord::new(68), second_read); + acknowledge(second_read).await; + + let third_read = reader.next().await.expect("should not fail to read record"); + assert!(third_read.is_none()); + + ledger.flush().expect("should not fail to flush ledger"); + + // Grab the current writer data file path before dropping the buffer. + let data_file_path = ledger.get_current_writer_data_file_path(); + drop(reader); + drop(writer); + drop(ledger); + + // Open the data file and truncate the second record. This will ensure that the reader + // hits EOF after the first read, which we need to do in order to exercise the logic + // that breaks out of the loop. + let initial_len = first_bytes_written as u64 + second_bytes_written as u64; + let target_len = first_bytes_written as u64; + set_file_length(&data_file_path, initial_len, target_len) + .await + .expect("should not fail to truncate data file"); + + // Now reopen the buffer, which should complete in a timely fashion without an immediate error. + let reopen = timeout( + Duration::from_millis(500), + create_default_buffer_v2::<_, SizedRecord>(data_dir), + ) + .await; + assert!( + reopen.is_ok(), + "failed to reopen buffer in a timely fashion; likely deadlock" + ); + } + }); + + let parent = trace_span!("reader_doesnt_block_when_ahead_of_last_record_in_current_data_file"); + fut.instrument(parent.or_current()).await; +} diff --git a/lib/vector-buffers/src/variants/disk_v2/tests/invariants.rs b/lib/vector-buffers/src/variants/disk_v2/tests/invariants.rs index ef56bb19d3f4e..29adb49f17243 100644 --- a/lib/vector-buffers/src/variants/disk_v2/tests/invariants.rs +++ b/lib/vector-buffers/src/variants/disk_v2/tests/invariants.rs @@ -5,7 +5,8 @@ use tracing::Instrument; use super::{create_buffer_v2_with_max_data_file_size, read_next, read_next_some}; use crate::{ assert_buffer_is_empty, assert_buffer_records, assert_buffer_size, assert_enough_bytes_written, - assert_reader_writer_v2_file_positions, await_timeout, set_data_file_length, + assert_reader_last_writer_next_positions, assert_reader_writer_v2_file_positions, + await_timeout, set_data_file_length, test::{acknowledge, install_tracing_helpers, with_temp_dir, MultiEventRecord, SizedRecord}, variants::disk_v2::{ common::{DEFAULT_FLUSH_INTERVAL, MAX_FILE_ID}, @@ -820,3 +821,110 @@ async fn writer_updates_ledger_when_buffered_writer_reports_implicit_flush() { }) .await; } + +#[tokio::test] +async fn reader_writer_positions_aligned_through_multiple_files_and_records() { + // This test ensures that the reader/writer position stay aligned through multiple records and + // data files. This is to say, that, if we write 5 records, each with 10 events, and then read + // and acknowledge all of those events... the writer's next record ID should be 51 (the 50th + // event would correspond to ID 50, so next ID would be 51) and the reader's last read record ID + // should be 50. + // + // Testing this across multiple data files isn't super germane to the position logic, but it + // just ensures we're also testing that aspect. + + let _a = install_tracing_helpers(); + let fut = with_temp_dir(|dir| { + let data_dir = dir.to_path_buf(); + + async move { + // Create our buffer with an arbitrarily low maximum data file size. We'll use this to + // control how many records make it into a given data file. Just another way to ensure + // we're testing the position logic with multiple writes to one data file, one write to + // a data file, etc. + let (mut writer, mut reader, ledger) = + create_buffer_v2_with_max_data_file_size(data_dir, 256).await; + + // We'll write multi-event records with N events based on these sizes, and as we do so, + // we'll assert that our writer position moves as expected after the write, and that + // after reading and acknowledging, the reader position also moves as expected. + let record_sizes = &[176, 52, 91, 137, 54, 87]; + + let mut expected_writer_position = ledger.state().get_next_writer_record_id(); + let mut expected_reader_position = ledger.state().get_last_reader_record_id(); + let mut trailing_reader_position_delta = 0; + + for record_size in record_sizes { + // Initial check before writing/reading the next record. + assert_reader_last_writer_next_positions!( + ledger, + expected_reader_position, + expected_writer_position + ); + + let record = MultiEventRecord::new(*record_size); + assert_eq!( + record.event_count(), + usize::try_from(*record_size).unwrap_or(usize::MAX) + ); + + writer + .write_record(record) + .await + .expect("write should not fail"); + writer.flush().await.expect("flush should not fail"); + + expected_writer_position += u64::from(*record_size); + + // Make sure the writer position advanced after flushing. + assert_reader_last_writer_next_positions!( + ledger, + expected_reader_position, + expected_writer_position + ); + + let record_via_read = read_next_some(&mut reader).await; + assert_eq!(record_via_read, MultiEventRecord::new(*record_size)); + acknowledge(record_via_read).await; + + // Increment the expected reader position by the trailing reader position delta, and + // then now that we've done a read, we should be able to have seen actually move + // forward. + expected_reader_position += trailing_reader_position_delta; + assert_reader_last_writer_next_positions!( + ledger, + expected_reader_position, + expected_writer_position + ); + + // Set the trailing reader position delta to the record we just read. + // + // We do it this way because reads themselves have to drive acknowledgement logic to + // then drive updates to the ledger, so we will only see the change in the reader's + // position the _next_ time we do a read. + trailing_reader_position_delta = u64::from(*record_size); + } + + // Close the writer and do a final read, thus driving the acknowledgement logic, and + // position update logic, before we do our final position check. + writer.close(); + assert_eq!(reader.next().await, Ok(None)); + + // Calculate the absolute reader/writer positions we would expect based on all of the + // records/events written and read. This is to double check our work and make sure that + // the "expected" positions didn't hide any bugs from us. + let expected_final_reader_position = + record_sizes.iter().copied().map(u64::from).sum::(); + let expected_final_writer_position = expected_final_reader_position + 1; + + assert_reader_last_writer_next_positions!( + ledger, + expected_final_reader_position, + expected_final_writer_position + ); + } + }); + + let parent = trace_span!("reader_writer_positions_aligned_through_multiple_files_and_records"); + fut.instrument(parent.or_current()).await; +} diff --git a/lib/vector-buffers/src/variants/disk_v2/tests/mod.rs b/lib/vector-buffers/src/variants/disk_v2/tests/mod.rs index e123a39c228d6..8c479827355fd 100644 --- a/lib/vector-buffers/src/variants/disk_v2/tests/mod.rs +++ b/lib/vector-buffers/src/variants/disk_v2/tests/mod.rs @@ -134,6 +134,24 @@ macro_rules! assert_reader_writer_v2_file_positions { }}; } +#[macro_export] +macro_rules! assert_reader_last_writer_next_positions { + ($ledger:expr, $reader_expected:expr, $writer_expected:expr) => {{ + let reader_actual = $ledger.state().get_last_reader_record_id(); + let writer_actual = $ledger.state().get_next_writer_record_id(); + assert_eq!( + $reader_expected, reader_actual, + "expected reader last read record ID of {}, got {} instead", + $reader_expected, reader_actual, + ); + assert_eq!( + $writer_expected, writer_actual, + "expected writer next record ID of {}, got {} instead", + $writer_expected, writer_actual, + ); + }}; +} + #[macro_export] macro_rules! assert_enough_bytes_written { ($written:expr, $record_type:ty, $record_payload_size:expr) => { From 19c4d4f72a4c08fdf51299bd7b3b906f8f8d08c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 13:40:45 +0000 Subject: [PATCH 129/236] chore(deps): Bump wasm-bindgen from 0.2.86 to 0.2.87 (#17672) Bumps [wasm-bindgen](https://github.com/rustwasm/wasm-bindgen) from 0.2.86 to 0.2.87.
Changelog

Sourced from wasm-bindgen's changelog.

0.2.87

Released 2023-06-12.

Added

  • Implemented IntoIterator for Array. #3477

Changed

  • Deprecate HtmlMenuItemElement and parts of HtmlMenuElement. #3448

  • Stabilize ResizeObserver. #3459

Fixed

  • Take alignment into consideration during (de/re)allocation. #3463

Commits
  • f0a8ae3 Bump to 0.2.87 (#3475)
  • 673e3eb Implement IntoIterator for Array
  • 5f10d6f Update changelog
  • 10777c5 Stabilize ResizeObserver
  • a2ab2d5 Take alignment into consideration during malloc (#3463)
  • 3d78163 Fix CI
  • 5453e33 Fixed the port number for the wasm-audio-worklet demo to be in line with the ...
  • a334ce4 Fix TODOs in web-sys tests (#3449)
  • 4e6dcbe Deprecate HtmlMenuItemElement and parts of HtmlMenuElement
  • a9dea47 Fix TODO with get_index/set_index
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=wasm-bindgen&package-manager=cargo&previous-version=0.2.86&new-version=0.2.87)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e8219466e8a36..b8b016f3f848a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9783,9 +9783,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -9793,9 +9793,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", @@ -9820,9 +9820,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote 1.0.28", "wasm-bindgen-macro-support", @@ -9830,9 +9830,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.60", "quote 1.0.28", @@ -9843,9 +9843,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" From f88357515c12240ae2a594324253e7f203ea27f9 Mon Sep 17 00:00:00 2001 From: Andy George Date: Tue, 13 Jun 2023 13:13:05 -0500 Subject: [PATCH 130/236] fix(install.sh): Correctly `shift` all parsed arguments (#17684) Closes #17681 Without `shift`ing the other possible input arguments, the `--prefix` argument will set the `prefix` incorrectly. When using [the official Docker example](https://vector.dev/docs/setup/installation/#installation-script), arguments of `-y --prefix` will result in the following error: ``` mkdir: unrecognized option '--prefix' ``` ...as `"$2"` is `--prefix`, because `-y` was never `shift`ed out. Adding a `shift` to the other arguments correctly removes them from the argument list when parsed. --- distribution/install.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/distribution/install.sh b/distribution/install.sh index bbca042d0e4c1..e2903c7b11e7e 100755 --- a/distribution/install.sh +++ b/distribution/install.sh @@ -76,9 +76,11 @@ main() { ;; --no-modify-path) modify_path=no + shift ;; -y) prompt=no + shift ;; *) ;; From ab1169bd40ff7f1fa8cf1e77d24cd779112b2178 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Tue, 13 Jun 2023 11:22:08 -0700 Subject: [PATCH 131/236] chore(ci): Add apt retries to cross builds (#17683) To see if this helps with flakey apt installs. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- scripts/cross/bootstrap-ubuntu.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/cross/bootstrap-ubuntu.sh b/scripts/cross/bootstrap-ubuntu.sh index 1b4889604b06b..cf053aa365b20 100755 --- a/scripts/cross/bootstrap-ubuntu.sh +++ b/scripts/cross/bootstrap-ubuntu.sh @@ -1,6 +1,8 @@ #!/bin/sh set -o errexit +echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/80-retries + apt-get update apt-get install -y \ apt-transport-https \ From 2dfa8509bcdb4220d32e3d91f7fdd61c081db5ea Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Tue, 13 Jun 2023 16:42:33 -0400 Subject: [PATCH 132/236] feat(codecs): add lossy option to `gelf`, `native_json`, and `syslog` deserializers (#17680) Adds a `lossy` option to the relevant deserializers. --- lib/codecs/src/decoding/format/gelf.rs | 69 ++++++++---- lib/codecs/src/decoding/format/json.rs | 57 +++++----- lib/codecs/src/decoding/format/mod.rs | 15 ++- lib/codecs/src/decoding/format/native_json.rs | 57 ++++++++-- lib/codecs/src/decoding/format/syslog.rs | 53 +++++++-- lib/codecs/src/decoding/mod.rs | 102 ++++++++++++------ lib/codecs/tests/native.rs | 8 +- src/components/validation/resources/mod.rs | 14 ++- src/config/mod.rs | 1 + src/sources/datadog_agent/tests.rs | 8 +- src/sources/http_client/integration_tests.rs | 8 +- src/sources/syslog.rs | 22 ++-- .../components/sources/base/amqp.cue | 52 ++++++++- .../sources/base/aws_kinesis_firehose.cue | 52 ++++++++- .../components/sources/base/aws_s3.cue | 52 ++++++++- .../components/sources/base/aws_sqs.cue | 52 ++++++++- .../components/sources/base/datadog_agent.cue | 52 ++++++++- .../components/sources/base/demo_logs.cue | 52 ++++++++- .../components/sources/base/exec.cue | 52 ++++++++- .../sources/base/file_descriptor.cue | 52 ++++++++- .../components/sources/base/gcp_pubsub.cue | 52 ++++++++- .../components/sources/base/heroku_logs.cue | 52 ++++++++- .../components/sources/base/http.cue | 52 ++++++++- .../components/sources/base/http_client.cue | 52 ++++++++- .../components/sources/base/http_server.cue | 52 ++++++++- .../components/sources/base/kafka.cue | 52 ++++++++- .../components/sources/base/nats.cue | 52 ++++++++- .../components/sources/base/redis.cue | 52 ++++++++- .../components/sources/base/socket.cue | 52 ++++++++- .../components/sources/base/stdin.cue | 52 ++++++++- 30 files changed, 1185 insertions(+), 165 deletions(-) diff --git a/lib/codecs/src/decoding/format/gelf.rs b/lib/codecs/src/decoding/format/gelf.rs index 050bcdfc21912..14da4d2266438 100644 --- a/lib/codecs/src/decoding/format/gelf.rs +++ b/lib/codecs/src/decoding/format/gelf.rs @@ -1,9 +1,11 @@ use bytes::Bytes; use chrono::{DateTime, NaiveDateTime, Utc}; +use derivative::Derivative; use lookup::{event_path, owned_value_path, PathPrefix}; use serde::{Deserialize, Serialize}; use smallvec::{smallvec, SmallVec}; use std::collections::HashMap; +use vector_config::configurable_component; use vector_core::config::LogNamespace; use vector_core::{ config::{log_schema, DataType}, @@ -14,7 +16,7 @@ use vector_core::{ use vrl::value::kind::Collection; use vrl::value::{Kind, Value}; -use super::Deserializer; +use super::{default_lossy, Deserializer}; use crate::{gelf_fields::*, VALID_FIELD_REGEX}; /// On GELF decoding behavior: @@ -25,12 +27,26 @@ use crate::{gelf_fields::*, VALID_FIELD_REGEX}; /// Config used to build a `GelfDeserializer`. #[derive(Debug, Clone, Default, Deserialize, Serialize)] -pub struct GelfDeserializerConfig; +pub struct GelfDeserializerConfig { + #[serde( + default, + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + /// GELF-specific decoding options. + pub gelf: GelfDeserializerOptions, +} impl GelfDeserializerConfig { + /// Creates a new `GelfDeserializerConfig`. + pub fn new(options: GelfDeserializerOptions) -> Self { + Self { gelf: options } + } + /// Build the `GelfDeserializer` from this configuration. pub fn build(&self) -> GelfDeserializer { - GelfDeserializer::default() + GelfDeserializer { + lossy: self.gelf.lossy, + } } /// Return the type of event built by this deserializer. @@ -60,21 +76,36 @@ impl GelfDeserializerConfig { } } -/// Deserializer that builds an `Event` from a byte frame containing a GELF log -/// message. -#[derive(Debug, Clone)] -pub struct GelfDeserializer; +/// GELF-specific decoding options. +#[configurable_component] +#[derive(Debug, Clone, PartialEq, Eq, Derivative)] +#[derivative(Default)] +pub struct GelfDeserializerOptions { + /// Determines whether or not to replace invalid UTF-8 sequences instead of failing. + /// + /// When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + /// + /// [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + #[serde( + default = "default_lossy", + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + #[derivative(Default(value = "default_lossy()"))] + pub lossy: bool, +} -impl Default for GelfDeserializer { - fn default() -> Self { - Self::new() - } +/// Deserializer that builds an `Event` from a byte frame containing a GELF log message. +#[derive(Debug, Clone, Derivative)] +#[derivative(Default)] +pub struct GelfDeserializer { + #[derivative(Default(value = "default_lossy()"))] + lossy: bool, } impl GelfDeserializer { - /// Create a new GelfDeserializer - pub fn new() -> GelfDeserializer { - GelfDeserializer + /// Create a new `GelfDeserializer`. + pub fn new(lossy: bool) -> GelfDeserializer { + GelfDeserializer { lossy } } /// Builds a LogEvent from the parsed GelfMessage. @@ -195,10 +226,10 @@ impl Deserializer for GelfDeserializer { bytes: Bytes, _log_namespace: LogNamespace, ) -> vector_common::Result> { - let line = std::str::from_utf8(&bytes)?; - let line = line.trim(); - - let parsed: GelfMessage = serde_json::from_str(line)?; + let parsed: GelfMessage = match self.lossy { + true => serde_json::from_str(&String::from_utf8_lossy(&bytes)), + false => serde_json::from_slice(&bytes), + }?; let event = self.message_to_event(&parsed)?; Ok(smallvec![event]) @@ -220,7 +251,7 @@ mod tests { fn deserialize_gelf_input( input: &serde_json::Value, ) -> vector_common::Result> { - let config = GelfDeserializerConfig; + let config = GelfDeserializerConfig::default(); let deserializer = config.build(); let buffer = Bytes::from(serde_json::to_vec(&input).unwrap()); deserializer.parse(buffer, LogNamespace::Legacy) diff --git a/lib/codecs/src/decoding/format/json.rs b/lib/codecs/src/decoding/format/json.rs index 164d91756ff15..49980122be493 100644 --- a/lib/codecs/src/decoding/format/json.rs +++ b/lib/codecs/src/decoding/format/json.rs @@ -4,7 +4,6 @@ use bytes::Bytes; use chrono::Utc; use derivative::Derivative; use lookup::PathPrefix; -use serde::{Deserialize, Serialize}; use smallvec::{smallvec, SmallVec}; use vector_config::configurable_component; use vector_core::{ @@ -14,42 +13,27 @@ use vector_core::{ }; use vrl::value::Kind; -use super::Deserializer; +use super::{default_lossy, Deserializer}; /// Config used to build a `JsonDeserializer`. -#[derive(Debug, Clone, Default, Deserialize, Serialize)] -pub struct JsonDeserializerConfig { - #[serde( - default, - skip_serializing_if = "vector_core::serde::skip_serializing_if_default" - )] - /// Options for the JSON deserializer. - pub json: JsonDeserializerOptions, -} - -/// JSON-specific decoding options. #[configurable_component] #[derive(Debug, Clone, PartialEq, Eq, Derivative)] #[derivative(Default)] -pub struct JsonDeserializerOptions { - /// Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. - /// - /// When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. - /// - /// [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character +pub struct JsonDeserializerConfig { #[serde( - default = "default_lossy", + default, skip_serializing_if = "vector_core::serde::skip_serializing_if_default" )] - #[derivative(Default(value = "default_lossy()"))] - lossy: bool, -} - -const fn default_lossy() -> bool { - true + /// JSON-specific decoding options. + pub json: JsonDeserializerOptions, } impl JsonDeserializerConfig { + /// Creates a new `JsonDeserializerConfig`. + pub fn new(options: JsonDeserializerOptions) -> Self { + Self { json: options } + } + /// Build the `JsonDeserializer` from this configuration. pub fn build(&self) -> JsonDeserializer { Into::::into(self) @@ -85,11 +69,22 @@ impl JsonDeserializerConfig { } } -impl JsonDeserializerConfig { - /// Creates a new `JsonDeserializerConfig`. - pub fn new(options: JsonDeserializerOptions) -> Self { - Self { json: options } - } +/// JSON-specific decoding options. +#[configurable_component] +#[derive(Debug, Clone, PartialEq, Eq, Derivative)] +#[derivative(Default)] +pub struct JsonDeserializerOptions { + /// Determines whether or not to replace invalid UTF-8 sequences instead of failing. + /// + /// When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + /// + /// [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + #[serde( + default = "default_lossy", + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + #[derivative(Default(value = "default_lossy()"))] + pub lossy: bool, } /// Deserializer that builds `Event`s from a byte frame containing JSON. diff --git a/lib/codecs/src/decoding/format/mod.rs b/lib/codecs/src/decoding/format/mod.rs index d24faefb85382..c0ab2ea1b5924 100644 --- a/lib/codecs/src/decoding/format/mod.rs +++ b/lib/codecs/src/decoding/format/mod.rs @@ -13,17 +13,19 @@ mod syslog; use ::bytes::Bytes; use dyn_clone::DynClone; -pub use gelf::{GelfDeserializer, GelfDeserializerConfig}; +pub use gelf::{GelfDeserializer, GelfDeserializerConfig, GelfDeserializerOptions}; pub use json::{JsonDeserializer, JsonDeserializerConfig, JsonDeserializerOptions}; pub use native::{NativeDeserializer, NativeDeserializerConfig}; -pub use native_json::{NativeJsonDeserializer, NativeJsonDeserializerConfig}; +pub use native_json::{ + NativeJsonDeserializer, NativeJsonDeserializerConfig, NativeJsonDeserializerOptions, +}; use smallvec::SmallVec; +#[cfg(feature = "syslog")] +pub use syslog::{SyslogDeserializer, SyslogDeserializerConfig, SyslogDeserializerOptions}; use vector_core::config::LogNamespace; use vector_core::event::Event; pub use self::bytes::{BytesDeserializer, BytesDeserializerConfig}; -#[cfg(feature = "syslog")] -pub use self::syslog::{SyslogDeserializer, SyslogDeserializerConfig}; /// Parse structured events from bytes. pub trait Deserializer: DynClone + Send + Sync { @@ -44,3 +46,8 @@ dyn_clone::clone_trait_object!(Deserializer); /// A `Box` containing a `Deserializer`. pub type BoxedDeserializer = Box; + +/// Default value for the UTF-8 lossy option. +const fn default_lossy() -> bool { + true +} diff --git a/lib/codecs/src/decoding/format/native_json.rs b/lib/codecs/src/decoding/format/native_json.rs index 038a36c69fa28..7a8a1914015de 100644 --- a/lib/codecs/src/decoding/format/native_json.rs +++ b/lib/codecs/src/decoding/format/native_json.rs @@ -1,21 +1,35 @@ use bytes::Bytes; +use derivative::Derivative; use serde::{Deserialize, Serialize}; use smallvec::{smallvec, SmallVec}; +use vector_config::configurable_component; use vector_core::{config::DataType, event::Event, schema}; use vrl::value::kind::Collection; use vrl::value::Kind; -use super::Deserializer; +use super::{default_lossy, Deserializer}; use vector_core::config::LogNamespace; /// Config used to build a `NativeJsonDeserializer`. #[derive(Debug, Clone, Default, Deserialize, Serialize)] -pub struct NativeJsonDeserializerConfig; +pub struct NativeJsonDeserializerConfig { + /// Vector's native JSON-specific decoding options. + pub native_json: NativeJsonDeserializerOptions, +} impl NativeJsonDeserializerConfig { + /// Creates a new `NativeJsonDeserializerConfig`. + pub fn new(options: NativeJsonDeserializerOptions) -> Self { + Self { + native_json: options, + } + } + /// Build the `NativeJsonDeserializer` from this configuration. - pub const fn build(&self) -> NativeJsonDeserializer { - NativeJsonDeserializer + pub fn build(&self) -> NativeJsonDeserializer { + NativeJsonDeserializer { + lossy: self.native_json.lossy, + } } /// Return the type of event build by this deserializer. @@ -37,10 +51,32 @@ impl NativeJsonDeserializerConfig { } } +/// Vector's native JSON-specific decoding options. +#[configurable_component] +#[derive(Debug, Clone, PartialEq, Eq, Derivative)] +#[derivative(Default)] +pub struct NativeJsonDeserializerOptions { + /// Determines whether or not to replace invalid UTF-8 sequences instead of failing. + /// + /// When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + /// + /// [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + #[serde( + default = "default_lossy", + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + #[derivative(Default(value = "default_lossy()"))] + pub lossy: bool, +} + /// Deserializer that builds `Event`s from a byte frame containing Vector's native JSON /// representation. -#[derive(Debug, Clone, Default)] -pub struct NativeJsonDeserializer; +#[derive(Debug, Clone, Derivative)] +#[derivative(Default)] +pub struct NativeJsonDeserializer { + #[derivative(Default(value = "default_lossy()"))] + lossy: bool, +} impl Deserializer for NativeJsonDeserializer { fn parse( @@ -56,8 +92,11 @@ impl Deserializer for NativeJsonDeserializer { return Ok(smallvec![]); } - let json: serde_json::Value = serde_json::from_slice(&bytes) - .map_err(|error| format!("Error parsing JSON: {:?}", error))?; + let json: serde_json::Value = match self.lossy { + true => serde_json::from_str(&String::from_utf8_lossy(&bytes)), + false => serde_json::from_slice(&bytes), + } + .map_err(|error| format!("Error parsing JSON: {:?}", error))?; let events = match json { serde_json::Value::Array(values) => values @@ -79,7 +118,7 @@ mod test { #[test] fn parses_top_level_arrays() { - let config = NativeJsonDeserializerConfig; + let config = NativeJsonDeserializerConfig::default(); let deserializer = config.build(); let json1 = json!({"a": "b", "c": "d"}); diff --git a/lib/codecs/src/decoding/format/syslog.rs b/lib/codecs/src/decoding/format/syslog.rs index d4aedefdc1d46..f9e173ba7d497 100644 --- a/lib/codecs/src/decoding/format/syslog.rs +++ b/lib/codecs/src/decoding/format/syslog.rs @@ -1,11 +1,14 @@ use bytes::Bytes; use chrono::{DateTime, Datelike, Utc}; +use derivative::Derivative; use lookup::lookup_v2::parse_value_path; use lookup::{event_path, owned_value_path, OwnedTargetPath, OwnedValuePath, PathPrefix}; use serde::{Deserialize, Serialize}; use smallvec::{smallvec, SmallVec}; +use std::borrow::Cow; use std::collections::BTreeMap; use syslog_loose::{IncompleteDate, Message, ProcId, Protocol}; +use vector_config::configurable_component; use vector_core::config::{LegacyKey, LogNamespace}; use vector_core::{ config::{log_schema, DataType}, @@ -14,25 +17,39 @@ use vector_core::{ }; use vrl::value::{kind::Collection, Kind}; -use super::Deserializer; +use super::{default_lossy, Deserializer}; /// Config used to build a `SyslogDeserializer`. #[derive(Debug, Clone, Default, Deserialize, Serialize)] pub struct SyslogDeserializerConfig { source: Option<&'static str>, + /// Syslog-specific decoding options. + pub syslog: SyslogDeserializerOptions, } impl SyslogDeserializerConfig { + /// Creates a new `SyslogDeserializerConfig`. + pub fn new(options: SyslogDeserializerOptions) -> Self { + Self { + source: None, + syslog: options, + } + } + /// Create the `SyslogDeserializer` from the given source name. pub fn from_source(source: &'static str) -> Self { Self { source: Some(source), + ..Default::default() } } /// Build the `SyslogDeserializer` from this configuration. pub const fn build(&self) -> SyslogDeserializer { - SyslogDeserializer { source: None } + SyslogDeserializer { + source: self.source, + lossy: self.syslog.lossy, + } } /// Return the type of event build by this deserializer. @@ -218,14 +235,35 @@ impl SyslogDeserializerConfig { } } +/// Syslog-specific decoding options. +#[configurable_component] +#[derive(Debug, Clone, PartialEq, Eq, Derivative)] +#[derivative(Default)] +pub struct SyslogDeserializerOptions { + /// Determines whether or not to replace invalid UTF-8 sequences instead of failing. + /// + /// When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + /// + /// [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + #[serde( + default = "default_lossy", + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + #[derivative(Default(value = "default_lossy()"))] + pub lossy: bool, +} + /// Deserializer that builds an `Event` from a byte frame containing a syslog /// message. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Derivative)] +#[derivative(Default)] pub struct SyslogDeserializer { /// The syslog source needs it's own syslog deserializer separate from the /// syslog codec since it needs to handle the structured of the decoded data /// differently when using the Vector lognamespace. pub source: Option<&'static str>, + #[derivative(Default(value = "default_lossy()"))] + lossy: bool, } impl Deserializer for SyslogDeserializer { @@ -234,7 +272,10 @@ impl Deserializer for SyslogDeserializer { bytes: Bytes, log_namespace: LogNamespace, ) -> vector_common::Result> { - let line = std::str::from_utf8(&bytes)?; + let line: Cow = match self.lossy { + true => String::from_utf8_lossy(&bytes), + false => Cow::from(std::str::from_utf8(&bytes)?), + }; let line = line.trim(); let parsed = syslog_loose::parse_message_with_year_exact(line, resolve_year)?; @@ -449,7 +490,7 @@ mod tests { let input = Bytes::from("<34>1 2003-10-11T22:14:15.003Z mymachine.example.com su - ID47 - MSG"); - let deserializer = SyslogDeserializer { source: None }; + let deserializer = SyslogDeserializer::default(); let events = deserializer.parse(input, LogNamespace::Legacy).unwrap(); assert_eq!(events.len(), 1); @@ -465,7 +506,7 @@ mod tests { let input = Bytes::from("<34>1 2003-10-11T22:14:15.003Z mymachine.example.com su - ID47 - MSG"); - let deserializer = SyslogDeserializer { source: None }; + let deserializer = SyslogDeserializer::default(); let events = deserializer.parse(input, LogNamespace::Vector).unwrap(); assert_eq!(events.len(), 1); diff --git a/lib/codecs/src/decoding/mod.rs b/lib/codecs/src/decoding/mod.rs index 1fbd05a1ef2e2..acf26f33a944f 100644 --- a/lib/codecs/src/decoding/mod.rs +++ b/lib/codecs/src/decoding/mod.rs @@ -9,12 +9,12 @@ use bytes::{Bytes, BytesMut}; pub use error::StreamDecodingError; pub use format::{ BoxedDeserializer, BytesDeserializer, BytesDeserializerConfig, GelfDeserializer, - GelfDeserializerConfig, JsonDeserializer, JsonDeserializerConfig, JsonDeserializerOptions, - NativeDeserializer, NativeDeserializerConfig, NativeJsonDeserializer, - NativeJsonDeserializerConfig, + GelfDeserializerConfig, GelfDeserializerOptions, JsonDeserializer, JsonDeserializerConfig, + JsonDeserializerOptions, NativeDeserializer, NativeDeserializerConfig, NativeJsonDeserializer, + NativeJsonDeserializerConfig, NativeJsonDeserializerOptions, }; #[cfg(feature = "syslog")] -pub use format::{SyslogDeserializer, SyslogDeserializerConfig}; +pub use format::{SyslogDeserializer, SyslogDeserializerConfig, SyslogDeserializerOptions}; pub use framing::{ BoxedFramer, BoxedFramingError, BytesDecoder, BytesDecoderConfig, CharacterDelimitedDecoder, CharacterDelimitedDecoderConfig, CharacterDelimitedDecoderOptions, FramingError, @@ -245,7 +245,7 @@ pub enum DeserializerConfig { /// /// [json]: https://www.json.org/ Json { - /// Options for the JSON deserializer. + /// JSON-specific decoding options. #[serde( default, skip_serializing_if = "vector_core::serde::skip_serializing_if_default" @@ -261,7 +261,14 @@ pub enum DeserializerConfig { /// /// [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt /// [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt - Syslog, + Syslog { + /// Syslog-specific decoding options. + #[serde( + default, + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + syslog: SyslogDeserializerOptions, + }, /// Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. /// @@ -277,12 +284,26 @@ pub enum DeserializerConfig { /// /// [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue /// [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - NativeJson, + NativeJson { + /// Vector's native JSON-specific decoding options. + #[serde( + default, + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + native_json: NativeJsonDeserializerOptions, + }, /// Decodes the raw bytes as a [GELF][gelf] message. /// /// [gelf]: https://docs.graylog.org/docs/gelf - Gelf, + Gelf { + /// GELF-specific decoding options. + #[serde( + default, + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] + gelf: GelfDeserializerOptions, + }, } impl From for DeserializerConfig { @@ -299,14 +320,16 @@ impl From for DeserializerConfig { #[cfg(feature = "syslog")] impl From for DeserializerConfig { - fn from(_: SyslogDeserializerConfig) -> Self { - Self::Syslog + fn from(config: SyslogDeserializerConfig) -> Self { + Self::Syslog { + syslog: config.syslog, + } } } impl From for DeserializerConfig { - fn from(_: GelfDeserializerConfig) -> Self { - Self::Gelf + fn from(config: GelfDeserializerConfig) -> Self { + Self::Gelf { gelf: config.gelf } } } @@ -319,14 +342,16 @@ impl DeserializerConfig { Deserializer::Json(JsonDeserializerConfig::new(json.clone()).build()) } #[cfg(feature = "syslog")] - DeserializerConfig::Syslog => { - Deserializer::Syslog(SyslogDeserializerConfig::default().build()) + DeserializerConfig::Syslog { syslog } => { + Deserializer::Syslog(SyslogDeserializerConfig::new(syslog.clone()).build()) } DeserializerConfig::Native => Deserializer::Native(NativeDeserializerConfig.build()), - DeserializerConfig::NativeJson => { - Deserializer::NativeJson(NativeJsonDeserializerConfig.build()) + DeserializerConfig::NativeJson { native_json } => Deserializer::NativeJson( + NativeJsonDeserializerConfig::new(native_json.clone()).build(), + ), + DeserializerConfig::Gelf { gelf } => { + Deserializer::Gelf(GelfDeserializerConfig::new(gelf.clone()).build()) } - DeserializerConfig::Gelf => Deserializer::Gelf(GelfDeserializerConfig.build()), } } @@ -336,12 +361,12 @@ impl DeserializerConfig { DeserializerConfig::Native => FramingConfig::LengthDelimited, DeserializerConfig::Bytes | DeserializerConfig::Json { .. } - | DeserializerConfig::Gelf - | DeserializerConfig::NativeJson => FramingConfig::NewlineDelimited { + | DeserializerConfig::Gelf { .. } + | DeserializerConfig::NativeJson { .. } => FramingConfig::NewlineDelimited { newline_delimited: Default::default(), }, #[cfg(feature = "syslog")] - DeserializerConfig::Syslog => FramingConfig::NewlineDelimited { + DeserializerConfig::Syslog { .. } => FramingConfig::NewlineDelimited { newline_delimited: Default::default(), }, } @@ -355,10 +380,16 @@ impl DeserializerConfig { JsonDeserializerConfig::new(json.clone()).output_type() } #[cfg(feature = "syslog")] - DeserializerConfig::Syslog => SyslogDeserializerConfig::default().output_type(), + DeserializerConfig::Syslog { syslog } => { + SyslogDeserializerConfig::new(syslog.clone()).output_type() + } DeserializerConfig::Native => NativeDeserializerConfig.output_type(), - DeserializerConfig::NativeJson => NativeJsonDeserializerConfig.output_type(), - DeserializerConfig::Gelf => GelfDeserializerConfig.output_type(), + DeserializerConfig::NativeJson { native_json } => { + NativeJsonDeserializerConfig::new(native_json.clone()).output_type() + } + DeserializerConfig::Gelf { gelf } => { + GelfDeserializerConfig::new(gelf.clone()).output_type() + } } } @@ -370,14 +401,17 @@ impl DeserializerConfig { JsonDeserializerConfig::new(json.clone()).schema_definition(log_namespace) } #[cfg(feature = "syslog")] - DeserializerConfig::Syslog => { - SyslogDeserializerConfig::default().schema_definition(log_namespace) + DeserializerConfig::Syslog { syslog } => { + SyslogDeserializerConfig::new(syslog.clone()).schema_definition(log_namespace) } DeserializerConfig::Native => NativeDeserializerConfig.schema_definition(log_namespace), - DeserializerConfig::NativeJson => { - NativeJsonDeserializerConfig.schema_definition(log_namespace) + DeserializerConfig::NativeJson { native_json } => { + NativeJsonDeserializerConfig::new(native_json.clone()) + .schema_definition(log_namespace) + } + DeserializerConfig::Gelf { gelf } => { + GelfDeserializerConfig::new(gelf.clone()).schema_definition(log_namespace) } - DeserializerConfig::Gelf => GelfDeserializerConfig.schema_definition(log_namespace), } } @@ -385,13 +419,13 @@ impl DeserializerConfig { pub const fn content_type(&self, framer: &FramingConfig) -> &'static str { match (&self, framer) { ( - DeserializerConfig::Json { .. } | DeserializerConfig::NativeJson, + DeserializerConfig::Json { .. } | DeserializerConfig::NativeJson { .. }, FramingConfig::NewlineDelimited { .. }, ) => "application/x-ndjson", ( - DeserializerConfig::Gelf + DeserializerConfig::Gelf { .. } | DeserializerConfig::Json { .. } - | DeserializerConfig::NativeJson, + | DeserializerConfig::NativeJson { .. }, FramingConfig::CharacterDelimited { character_delimited: CharacterDelimitedDecoderOptions { @@ -403,13 +437,13 @@ impl DeserializerConfig { (DeserializerConfig::Native, _) => "application/octet-stream", ( DeserializerConfig::Json { .. } - | DeserializerConfig::NativeJson + | DeserializerConfig::NativeJson { .. } | DeserializerConfig::Bytes - | DeserializerConfig::Gelf, + | DeserializerConfig::Gelf { .. }, _, ) => "text/plain", #[cfg(feature = "syslog")] - (DeserializerConfig::Syslog, _) => "text/plain", + (DeserializerConfig::Syslog { .. }, _) => "text/plain", } } } diff --git a/lib/codecs/tests/native.rs b/lib/codecs/tests/native.rs index d4f9a139d35a4..3d2e9f0ea8a34 100644 --- a/lib/codecs/tests/native.rs +++ b/lib/codecs/tests/native.rs @@ -27,7 +27,7 @@ fn roundtrip_current_native_json_fixtures() { roundtrip_fixtures( "json", "", - &NativeJsonDeserializerConfig.build(), + &NativeJsonDeserializerConfig::default().build(), &mut NativeJsonSerializerConfig.build(), false, ); @@ -51,7 +51,7 @@ fn reserialize_pre_v24_native_json_fixtures() { roundtrip_fixtures( "json", "pre-v24", - &NativeJsonDeserializerConfig.build(), + &NativeJsonDeserializerConfig::default().build(), &mut NativeJsonSerializerConfig.build(), true, ); @@ -100,7 +100,7 @@ fn pre_v24_native_decoding_matches() { fn rebuild_json_fixtures() { rebuild_fixtures( "json", - &NativeJsonDeserializerConfig.build(), + &NativeJsonDeserializerConfig::default().build(), &mut NativeJsonSerializerConfig.build(), ); } @@ -134,7 +134,7 @@ fn fixtures_match(suffix: &str) { /// This test ensures we can load the serialized binaries binary and that they match across /// protocols. fn decoding_matches(suffix: &str) { - let json_deserializer = NativeJsonDeserializerConfig.build(); + let json_deserializer = NativeJsonDeserializerConfig::default().build(); let proto_deserializer = NativeDeserializerConfig.build(); let json_entries = list_fixtures("json", suffix); diff --git a/src/components/validation/resources/mod.rs b/src/components/validation/resources/mod.rs index bbe80843753a5..a7e99bc998a17 100644 --- a/src/components/validation/resources/mod.rs +++ b/src/components/validation/resources/mod.rs @@ -146,10 +146,10 @@ fn deserializer_config_to_serializer(config: &DeserializerConfig) -> encoding::S // the data as Avro, we can't possibly send anything else without the source just // immediately barfing. #[cfg(feature = "sources-syslog")] - DeserializerConfig::Syslog => SerializerConfig::Logfmt, + DeserializerConfig::Syslog { .. } => SerializerConfig::Logfmt, DeserializerConfig::Native => SerializerConfig::Native, - DeserializerConfig::NativeJson => SerializerConfig::NativeJson, - DeserializerConfig::Gelf => SerializerConfig::Gelf, + DeserializerConfig::NativeJson { .. } => SerializerConfig::NativeJson, + DeserializerConfig::Gelf { .. } => SerializerConfig::Gelf, }; serializer_config @@ -183,13 +183,17 @@ fn serializer_config_to_deserializer(config: &SerializerConfig) -> decoding::Des let deserializer_config = match config { SerializerConfig::Avro { .. } => todo!(), SerializerConfig::Csv { .. } => todo!(), - SerializerConfig::Gelf => DeserializerConfig::Gelf, + SerializerConfig::Gelf => DeserializerConfig::Gelf { + gelf: Default::default(), + }, SerializerConfig::Json(_) => DeserializerConfig::Json { json: Default::default(), }, SerializerConfig::Logfmt => todo!(), SerializerConfig::Native => DeserializerConfig::Native, - SerializerConfig::NativeJson => DeserializerConfig::NativeJson, + SerializerConfig::NativeJson => DeserializerConfig::NativeJson { + native_json: Default::default(), + }, SerializerConfig::RawMessage | SerializerConfig::Text(_) => DeserializerConfig::Bytes, }; diff --git a/src/config/mod.rs b/src/config/mod.rs index 4c2729738e6f3..59a26d367d3de 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -1314,6 +1314,7 @@ mod resource_tests { proptest! { #[test] fn valid(addr: IpAddr, port1 in specport(), port2 in specport()) { + prop_assume!(port1 != port2); let components = vec![ ("sink_0", vec![tcp(addr, 0)]), ("sink_1", vec![tcp(addr, port1)]), diff --git a/src/sources/datadog_agent/tests.rs b/src/sources/datadog_agent/tests.rs index bd7d521b43e5e..ed3c5d3a4d9d4 100644 --- a/src/sources/datadog_agent/tests.rs +++ b/src/sources/datadog_agent/tests.rs @@ -1666,7 +1666,9 @@ fn test_config_outputs() { ( "syslog / single output", TestCase { - decoding: DeserializerConfig::Syslog, + decoding: DeserializerConfig::Syslog { + syslog: Default::default(), + }, multiple_outputs: false, want: HashMap::from([( None, @@ -1741,7 +1743,9 @@ fn test_config_outputs() { ( "syslog / multiple output", TestCase { - decoding: DeserializerConfig::Syslog, + decoding: DeserializerConfig::Syslog { + syslog: Default::default(), + }, multiple_outputs: true, want: HashMap::from([ ( diff --git a/src/sources/http_client/integration_tests.rs b/src/sources/http_client/integration_tests.rs index d49b7f92111fd..5927265abd2bf 100644 --- a/src/sources/http_client/integration_tests.rs +++ b/src/sources/http_client/integration_tests.rs @@ -122,7 +122,9 @@ async fn collected_metrics_native_json() { endpoint: format!("{}/metrics/native.json", dufs_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::NativeJson, + decoding: DeserializerConfig::NativeJson { + native_json: Default::default(), + }, framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -151,7 +153,9 @@ async fn collected_trace_native_json() { endpoint: format!("{}/traces/native.json", dufs_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::NativeJson, + decoding: DeserializerConfig::NativeJson { + native_json: Default::default(), + }, framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, diff --git a/src/sources/syslog.rs b/src/sources/syslog.rs index 05e4394a9cd96..06c8a44ebd65d 100644 --- a/src/sources/syslog.rs +++ b/src/sources/syslog.rs @@ -6,7 +6,7 @@ use bytes::Bytes; use chrono::Utc; use codecs::{ decoding::{Deserializer, Framer}, - BytesDecoder, OctetCountingDecoder, SyslogDeserializer, SyslogDeserializerConfig, + BytesDecoder, OctetCountingDecoder, SyslogDeserializerConfig, }; use futures::StreamExt; use listenfd::ListenFd; @@ -224,9 +224,9 @@ impl SourceConfig for SyslogConfig { Framer::OctetCounting(OctetCountingDecoder::new_with_max_length( self.max_length, )), - Deserializer::Syslog(SyslogDeserializer { - source: Some(SyslogConfig::NAME), - }), + Deserializer::Syslog( + SyslogDeserializerConfig::from_source(SyslogConfig::NAME).build(), + ), ); build_unix_stream_source( @@ -280,9 +280,7 @@ impl TcpSource for SyslogTcpSource { fn decoder(&self) -> Self::Decoder { Decoder::new( Framer::OctetCounting(OctetCountingDecoder::new_with_max_length(self.max_length)), - Deserializer::Syslog(SyslogDeserializer { - source: Some(SyslogConfig::NAME), - }), + Deserializer::Syslog(SyslogDeserializerConfig::from_source(SyslogConfig::NAME).build()), ) } @@ -334,9 +332,9 @@ pub fn udp( socket, Decoder::new( Framer::Bytes(BytesDecoder::new()), - Deserializer::Syslog(SyslogDeserializer { - source: Some(SyslogConfig::NAME), - }), + Deserializer::Syslog( + SyslogDeserializerConfig::from_source(SyslogConfig::NAME).build(), + ), ), ) .take_until(shutdown) @@ -474,9 +472,7 @@ mod test { bytes: Bytes, log_namespace: LogNamespace, ) -> Option { - let parser = SyslogDeserializer { - source: Some(SyslogConfig::NAME), - }; + let parser = SyslogDeserializerConfig::from_source(SyslogConfig::NAME).build(); let mut events = parser.parse(bytes, LogNamespace::Legacy).ok()?; handle_events( &mut events, diff --git a/website/cue/reference/components/sources/base/amqp.cue b/website/cue/reference/components/sources/base/amqp.cue index 404b600a86097..93d2becda8f58 100644 --- a/website/cue/reference/components/sources/base/amqp.cue +++ b/website/cue/reference/components/sources/base/amqp.cue @@ -95,13 +95,61 @@ base: components: sources: amqp: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue b/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue index 73b2de60e2050..8b17bfc8a5fbf 100644 --- a/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sources/base/aws_kinesis_firehose.cue @@ -98,13 +98,61 @@ base: components: sources: aws_kinesis_firehose: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/aws_s3.cue b/website/cue/reference/components/sources/base/aws_s3.cue index 03589426d8a57..4950291add6a7 100644 --- a/website/cue/reference/components/sources/base/aws_s3.cue +++ b/website/cue/reference/components/sources/base/aws_s3.cue @@ -184,13 +184,61 @@ base: components: sources: aws_s3: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/aws_sqs.cue b/website/cue/reference/components/sources/base/aws_sqs.cue index b2d469c8db6a5..30a7e3fd6381a 100644 --- a/website/cue/reference/components/sources/base/aws_sqs.cue +++ b/website/cue/reference/components/sources/base/aws_sqs.cue @@ -179,13 +179,61 @@ base: components: sources: aws_sqs: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/datadog_agent.cue b/website/cue/reference/components/sources/base/datadog_agent.cue index b356e18ac169c..4f625a5717ce5 100644 --- a/website/cue/reference/components/sources/base/datadog_agent.cue +++ b/website/cue/reference/components/sources/base/datadog_agent.cue @@ -80,13 +80,61 @@ base: components: sources: datadog_agent: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/demo_logs.cue b/website/cue/reference/components/sources/base/demo_logs.cue index d75717f71c32d..d8366fa1c5ec9 100644 --- a/website/cue/reference/components/sources/base/demo_logs.cue +++ b/website/cue/reference/components/sources/base/demo_logs.cue @@ -59,13 +59,61 @@ base: components: sources: demo_logs: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/exec.cue b/website/cue/reference/components/sources/base/exec.cue index a182abfc51e2b..cc0e2ab6618e6 100644 --- a/website/cue/reference/components/sources/base/exec.cue +++ b/website/cue/reference/components/sources/base/exec.cue @@ -55,13 +55,61 @@ base: components: sources: exec: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/file_descriptor.cue b/website/cue/reference/components/sources/base/file_descriptor.cue index 6bbbda11ed427..6eaed568f2526 100644 --- a/website/cue/reference/components/sources/base/file_descriptor.cue +++ b/website/cue/reference/components/sources/base/file_descriptor.cue @@ -50,13 +50,61 @@ base: components: sources: file_descriptor: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/gcp_pubsub.cue b/website/cue/reference/components/sources/base/gcp_pubsub.cue index f5b03561cce9e..767c7587aa0a4 100644 --- a/website/cue/reference/components/sources/base/gcp_pubsub.cue +++ b/website/cue/reference/components/sources/base/gcp_pubsub.cue @@ -126,13 +126,61 @@ base: components: sources: gcp_pubsub: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/heroku_logs.cue b/website/cue/reference/components/sources/base/heroku_logs.cue index e42f80fc2c476..982d0343ac1d1 100644 --- a/website/cue/reference/components/sources/base/heroku_logs.cue +++ b/website/cue/reference/components/sources/base/heroku_logs.cue @@ -92,13 +92,61 @@ base: components: sources: heroku_logs: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/http.cue b/website/cue/reference/components/sources/base/http.cue index 12a95c494068e..b5aac87c8c2ff 100644 --- a/website/cue/reference/components/sources/base/http.cue +++ b/website/cue/reference/components/sources/base/http.cue @@ -93,13 +93,61 @@ base: components: sources: http: configuration: { """ } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/http_client.cue b/website/cue/reference/components/sources/base/http_client.cue index 25209b31e2843..efe28fb6a7827 100644 --- a/website/cue/reference/components/sources/base/http_client.cue +++ b/website/cue/reference/components/sources/base/http_client.cue @@ -92,13 +92,61 @@ base: components: sources: http_client: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/http_server.cue b/website/cue/reference/components/sources/base/http_server.cue index 9edad734be947..6cda10ae713b2 100644 --- a/website/cue/reference/components/sources/base/http_server.cue +++ b/website/cue/reference/components/sources/base/http_server.cue @@ -93,13 +93,61 @@ base: components: sources: http_server: configuration: { """ } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/kafka.cue b/website/cue/reference/components/sources/base/kafka.cue index 30fee8d24567b..9ee5b6e6b2dc6 100644 --- a/website/cue/reference/components/sources/base/kafka.cue +++ b/website/cue/reference/components/sources/base/kafka.cue @@ -104,13 +104,61 @@ base: components: sources: kafka: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/nats.cue b/website/cue/reference/components/sources/base/nats.cue index 5e232b607a9e8..6cb2b3e956dc9 100644 --- a/website/cue/reference/components/sources/base/nats.cue +++ b/website/cue/reference/components/sources/base/nats.cue @@ -147,13 +147,61 @@ base: components: sources: nats: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/redis.cue b/website/cue/reference/components/sources/base/redis.cue index 0336931a1bfcd..1e81399a5f3c4 100644 --- a/website/cue/reference/components/sources/base/redis.cue +++ b/website/cue/reference/components/sources/base/redis.cue @@ -65,13 +65,61 @@ base: components: sources: redis: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/socket.cue b/website/cue/reference/components/sources/base/socket.cue index 1c57034f87f8f..d49d70ffb589f 100644 --- a/website/cue/reference/components/sources/base/socket.cue +++ b/website/cue/reference/components/sources/base/socket.cue @@ -67,13 +67,61 @@ base: components: sources: socket: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. diff --git a/website/cue/reference/components/sources/base/stdin.cue b/website/cue/reference/components/sources/base/stdin.cue index 7682e32ebecba..72141036aa562 100644 --- a/website/cue/reference/components/sources/base/stdin.cue +++ b/website/cue/reference/components/sources/base/stdin.cue @@ -50,13 +50,61 @@ base: components: sources: stdin: configuration: { } } } + gelf: { + description: "GELF-specific decoding options." + relevant_when: "codec = \"gelf\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } json: { - description: "Options for the JSON deserializer." + description: "JSON-specific decoding options." relevant_when: "codec = \"json\"" required: false type: object: options: lossy: { description: """ - Determines whether or not to replace invalid UTF-8 sequences instead of returning an error. + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + native_json: { + description: "Vector's native JSON-specific decoding options." + relevant_when: "codec = \"native_json\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. + + When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. + + [U+FFFD]: https://en.wikipedia.org/wiki/Specials_(Unicode_block)#Replacement_character + """ + required: false + type: bool: default: true + } + } + syslog: { + description: "Syslog-specific decoding options." + relevant_when: "codec = \"syslog\"" + required: false + type: object: options: lossy: { + description: """ + Determines whether or not to replace invalid UTF-8 sequences instead of failing. When true, invalid UTF-8 sequences are replaced with the [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]. From 83af7ea47f661ff22ba5aae728584390ea80743f Mon Sep 17 00:00:00 2001 From: Nathan Fox Date: Tue, 13 Jun 2023 16:43:48 -0400 Subject: [PATCH 133/236] fix `demo_logs` metadata source name (#17689) --- src/sources/demo_logs.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sources/demo_logs.rs b/src/sources/demo_logs.rs index d587a20cfc7b1..6a31744047501 100644 --- a/src/sources/demo_logs.rs +++ b/src/sources/demo_logs.rs @@ -255,7 +255,7 @@ async fn demo_logs_source( now, ); log_namespace.insert_source_metadata( - "service", + DemoLogsConfig::NAME, log, Some(LegacyKey::InsertIfEmpty(path!("service"))), path!("service"), From ac68a7b8d8238f4d64d5f3850e15dc9931e39349 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 11:17:32 +0100 Subject: [PATCH 134/236] chore(deps): Bump rdkafka from 0.31.0 to 0.32.2 (#17664) Bumps [rdkafka](https://github.com/fede1024/rust-rdkafka) from 0.31.0 to 0.32.2.
Changelog

Sourced from rdkafka's changelog.

Changelog

See also the rdkafka-sys changelog.

Unreleased

0.32.1 (2023-06-09)

  • Add support for the cluster mock API.
  • Expose assignment_lost method on the consumer.
Commits
  • b46a1d3 Update pin of rdkafka-sys to latest.
  • 97688d7 Release v4.5.0+1.9.2
  • 157d81e Fix version.
  • f09348e Release 0.32.0.
  • 83e9a50 Expose assignment_lost
  • 0048649 Add entry in the changelog
  • 49eaf5d Add example for the mock API
  • 9198dba Add missing mock APIs
  • 536c0af Adapt to project style and few minor fixes
  • b3a9e7a mocking: Add support for mock cluster reference from Client configured with t...
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=rdkafka&package-manager=cargo&previous-version=0.31.0&new-version=0.32.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b8b016f3f848a..9a66fea32ee9a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6617,9 +6617,9 @@ dependencies = [ [[package]] name = "rdkafka" -version = "0.31.0" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88383df3a85a38adfa2aa447d3ab6eb9cedcb49613adcf18e7e7ebb3b62e9b03" +checksum = "f8733bc5dc0b192d1a4b28073f9bff1326ad9e4fecd4d9b025d6fc358d1c3e79" dependencies = [ "futures-channel", "futures-util", @@ -6635,9 +6635,9 @@ dependencies = [ [[package]] name = "rdkafka-sys" -version = "4.4.0+1.9.2" +version = "4.5.0+1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ac9d87c3aba1748e3112318459f2ac8bff80bfff7359e338e0463549590249" +checksum = "1bb0676c2112342ac7165decdedbc4e7086c0af384479ccce534546b10687a5d" dependencies = [ "cmake", "libc", diff --git a/Cargo.toml b/Cargo.toml index de6bdc4cb2552..746309e91968a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -291,7 +291,7 @@ postgres-openssl = { version = "0.5.0", default-features = false, features = ["r pulsar = { version = "6.0.0", default-features = false, features = ["tokio-runtime", "auth-oauth2", "flate2", "lz4", "snap", "zstd"], optional = true } rand = { version = "0.8.5", default-features = false, features = ["small_rng"] } rand_distr = { version = "0.4.3", default-features = false } -rdkafka = { version = "0.31.0", default-features = false, features = ["tokio", "libz", "ssl", "zstd"], optional = true } +rdkafka = { version = "0.32.2", default-features = false, features = ["tokio", "libz", "ssl", "zstd"], optional = true } redis = { version = "0.23.0", default-features = false, features = ["connection-manager", "tokio-comp", "tokio-native-tls-comp"], optional = true } regex = { version = "1.8.4", default-features = false, features = ["std", "perf"] } roaring = { version = "0.10.1", default-features = false, optional = true } From 8d98bb8c4f4a4dd44e433caf8846aee4df1eec2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 11:21:03 +0100 Subject: [PATCH 135/236] chore(deps): Bump pulsar from 6.0.0 to 6.0.1 (#17673) Bumps [pulsar](https://github.com/streamnative/pulsar-rs) from 6.0.0 to 6.0.1.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pulsar&package-manager=cargo&previous-version=6.0.0&new-version=6.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a66fea32ee9a..7c07bbdba9a5d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6341,9 +6341,9 @@ dependencies = [ [[package]] name = "pulsar" -version = "6.0.0" +version = "6.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06fbacec81fe6fb82f076279c3aaeb05324478f62c3074f13ecd0452cbec27b2" +checksum = "e6eb95b2e36b92d3e0536be87eaf7accb17db39f5a44452759b43f1328e82dc9" dependencies = [ "async-trait", "bit-vec 0.6.3", diff --git a/Cargo.toml b/Cargo.toml index 746309e91968a..9d0dfe2fb4e81 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -288,7 +288,7 @@ paste = "1.0.12" percent-encoding = { version = "2.3.0", default-features = false } pin-project = { version = "1.1.0", default-features = false } postgres-openssl = { version = "0.5.0", default-features = false, features = ["runtime"], optional = true } -pulsar = { version = "6.0.0", default-features = false, features = ["tokio-runtime", "auth-oauth2", "flate2", "lz4", "snap", "zstd"], optional = true } +pulsar = { version = "6.0.1", default-features = false, features = ["tokio-runtime", "auth-oauth2", "flate2", "lz4", "snap", "zstd"], optional = true } rand = { version = "0.8.5", default-features = false, features = ["small_rng"] } rand_distr = { version = "0.4.3", default-features = false } rdkafka = { version = "0.32.2", default-features = false, features = ["tokio", "libz", "ssl", "zstd"], optional = true } From 714ccf8e77426b916ab88121c45a611106ebd6fe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 10:21:21 +0000 Subject: [PATCH 136/236] chore(deps): Bump crossbeam-utils from 0.8.15 to 0.8.16 (#17674) Bumps [crossbeam-utils](https://github.com/crossbeam-rs/crossbeam) from 0.8.15 to 0.8.16.
Release notes

Sourced from crossbeam-utils's releases.

crossbeam-utils 0.8.16

  • Improve implementation of CachePadded. (#967)
Commits
  • de26ec7 Prepare for the next release
  • 6732dec Bump memoffset to v0.9
  • 82786c5 Regenerate no_atomic
  • 1c9af43 Don't allow(dropping_copy_types)
  • e784567 chore: rename dropping_copy_types lint
  • 57750c2 Update TSAN suppressions file
  • 9b81008 Update create-pull-request action to v5
  • a964ffd CachePadded: Use 32-byte alignment on riscv32/sparc/hexagon, 16-byte alignmen...
  • 81ff802 Merge #973
  • 04fa0aa Prepare for the next release
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=crossbeam-utils&package-manager=cargo&previous-version=0.8.15&new-version=0.8.16)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- lib/vector-buffers/Cargo.toml | 2 +- lib/vector-common/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7c07bbdba9a5d..21bde1aa9c59a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2336,9 +2336,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index 4ce2a1e85f168..21ec45d1b7b58 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -13,7 +13,7 @@ bytecheck = { version = "0.6.9", default-features = false, features = ["std"] } bytes = { version = "1.4.0", default-features = false } crc32fast = { version = "1.3.2", default-features = false } crossbeam-queue = { version = "0.3.8", default-features = false, features = ["std"] } -crossbeam-utils = { version = "0.8.15", default-features = false } +crossbeam-utils = { version = "0.8.16", default-features = false } fslock = { version = "0.2.1", default-features = false, features = ["std"] } futures = { version = "0.3.28", default-features = false, features = ["std"] } memmap2 = { version = "0.7.0", default-features = false } diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 8ee739f566bf5..19aaa92d33137 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -45,7 +45,7 @@ async-stream = "0.3.5" bytes = { version = "1.4.0", default-features = false, optional = true } chrono-tz = { version = "0.8.2", default-features = false, features = ["serde"] } chrono = { version = "0.4", default-features = false, optional = true, features = ["clock"] } -crossbeam-utils = { version = "0.8.15", default-features = false } +crossbeam-utils = { version = "0.8.16", default-features = false } derivative = "2.1.3" futures = { version = "0.3.28", default-features = false, features = ["std"] } indexmap = { version = "~1.9.3", default-features = false } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index d4e997eecdd05..99a53e604b3bc 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -11,7 +11,7 @@ async-trait = { version = "0.1", default-features = false } bitmask-enum = { version = "2.1.0", default-features = false } bytes = { version = "1.4.0", default-features = false, features = ["serde"] } chrono = { version = "0.4.19", default-features = false, features = ["serde"] } -crossbeam-utils = { version = "0.8.15", default-features = false } +crossbeam-utils = { version = "0.8.16", default-features = false } db-key = { version = "0.0.5", default-features = false, optional = true } dyn-clone = { version = "1.0.11", default-features = false } enrichment = { path = "../enrichment", optional = true } From c97d619d47b1171d592dcf55692b5caa01e97992 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 11:39:40 +0000 Subject: [PATCH 137/236] chore(deps): Bump uuid from 1.3.3 to 1.3.4 (#17682) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.3.3 to 1.3.4.
Release notes

Sourced from uuid's releases.

1.3.4

What's Changed

New Contributors

Full Changelog: https://github.com/uuid-rs/uuid/compare/1.3.3...1.3.4

Commits
  • 07052be Merge pull request #683 from uuid-rs/cargo/1.3.4
  • 80ec18c prepare for 1.3.4 release
  • 4297536 Merge pull request #682 from Hanaasagi/fix-index
  • 3af4733 fix: keep the order when filling random bytes
  • 6188ecf Merge pull request #679 from uuid-rs/ci/msrv-build
  • e582a3d Just check v4 for MSRV
  • b466522 fix up MSRV build in CI
  • a0d6eb6 Merge pull request #677 from acfoltzer/wasm32-wasi
  • 403f845 add installed wasmtime to GITHUB_PATH
  • f74e05e add wasm32-wasi target in CI
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=uuid&package-manager=cargo&previous-version=1.3.3&new-version=1.3.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 21bde1aa9c59a..f7fdb7ea2c649 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9035,9 +9035,9 @@ checksum = "936e4b492acfd135421d8dca4b1aa80a7bfc26e702ef3af710e0752684df5372" [[package]] name = "uuid" -version = "1.3.3" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" +checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" dependencies = [ "getrandom 0.2.10", "rand 0.8.5", From 80069871df7d0809411053435486c604b7b8c15d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 11:40:20 +0000 Subject: [PATCH 138/236] chore(ci): Bump docker/setup-buildx-action from 2.6.0 to 2.7.0 (#17685) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 2.6.0 to 2.7.0.
Release notes

Sourced from docker/setup-buildx-action's releases.

v2.7.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.6.0...v2.7.0

Commits
  • ecf9528 Merge pull request #238 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • b2a38ee update generated content
  • 7f79690 Bump @​docker/actions-toolkit from 0.4.0 to 0.5.0
  • bdd549b Merge pull request #237 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • be4a385 update generated content
  • 6c4dbb2 Bump @​docker/actions-toolkit from 0.3.0 to 0.4.0
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/setup-buildx-action&package-manager=github_actions&previous-version=2.6.0&new-version=2.7.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- .github/workflows/publish.yml | 2 +- .github/workflows/regression.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 9379bfd404ebd..29af270241f7f 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -42,7 +42,7 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@v2.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2.6.0 + uses: docker/setup-buildx-action@v2.7.0 - name: Login to DockerHub uses: docker/login-action@v2.1.0 if: github.ref == 'refs/heads/master' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 7ee5ecf2af850..69c86091ea8c0 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -435,7 +435,7 @@ jobs: platforms: all - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2.6.0 + uses: docker/setup-buildx-action@v2.7.0 with: version: latest install: true diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index fb711449f609b..d651481f3f7cf 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -295,7 +295,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2.6.0 + uses: docker/setup-buildx-action@v2.7.0 - name: Build 'vector' target image uses: docker/build-push-action@v4.1.0 @@ -332,7 +332,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2.6.0 + uses: docker/setup-buildx-action@v2.7.0 - name: Build 'vector' target image uses: docker/build-push-action@v4.1.0 From 71273dfc64206dd66290426fe7d65a68afb13d51 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 11:41:19 +0000 Subject: [PATCH 139/236] chore(ci): Bump docker/metadata-action from 4.5.0 to 4.6.0 (#17686) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 4.5.0 to 4.6.0.
Release notes

Sourced from docker/metadata-action's releases.

v4.6.0

Full Changelog: https://github.com/docker/metadata-action/compare/v4.5.0...v4.6.0

Commits
  • 818d4b7 Merge pull request #302 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • 948134a update generated content
  • ef7eee9 enable comments to avoid breaking change with current impl
  • 8ec80c3 Bump @​docker/actions-toolkit from 0.3.0 to 0.5.0
  • 38650bb Merge pull request #301 from crazy-max/dedup-labels
  • ebbd9b4 update generated content
  • 2dadb92 dedup and sort labels
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/metadata-action&package-manager=github_actions&previous-version=4.5.0&new-version=4.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 29af270241f7f..8283ccf77a9be 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -51,7 +51,7 @@ jobs: password: ${{ secrets.CI_DOCKER_PASSWORD }} - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@2c0bd771b40637d97bf205cbccdd294a32112176 + uses: docker/metadata-action@818d4b7b91585d195f67373fd9cb0332e31a7175 with: images: timberio/vector-dev flavor: | From bce5e65d9562983f0094f1b7359775cf17043285 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 11:41:49 +0000 Subject: [PATCH 140/236] chore(ci): Bump docker/build-push-action from 4.1.0 to 4.1.1 (#17687) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4.1.0 to 4.1.1.
Release notes

Sourced from docker/build-push-action's releases.

v4.1.1

Note

Buildx v0.10 enables support for a minimal SLSA Provenance attestation, which requires support for OCI-compliant multi-platform images. This may introduce issues with registry and runtime support (e.g. Google Cloud Run and AWS Lambda). You can optionally disable the default provenance attestation functionality using provenance: false.

Full Changelog: https://github.com/docker/build-push-action/compare/v4.1.0...v4.1.1

Commits
  • 2eb1c19 Merge pull request #880 from crazy-max/fix-inputlist
  • 27376fe update generated content
  • c933000 test: build-arg with hash
  • dac08d4 chore(deps): Bump @​docker/actions-toolkit from 0.3.0 to 0.5.0
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/build-push-action&package-manager=github_actions&previous-version=4.1.0&new-version=4.1.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- .github/workflows/regression.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 8283ccf77a9be..7057f98f21d6a 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -63,7 +63,7 @@ jobs: org.opencontainers.image.title=Vector development environment org.opencontainers.image.url=https://github.com/vectordotdev/vector - name: Build and push - uses: docker/build-push-action@v4.1.0 + uses: docker/build-push-action@v4.1.1 with: context: . file: ./scripts/environment/Dockerfile diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index d651481f3f7cf..e78de733eb234 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -298,7 +298,7 @@ jobs: uses: docker/setup-buildx-action@v2.7.0 - name: Build 'vector' target image - uses: docker/build-push-action@v4.1.0 + uses: docker/build-push-action@v4.1.1 with: context: baseline-vector/ cache-from: type=gha @@ -335,7 +335,7 @@ jobs: uses: docker/setup-buildx-action@v2.7.0 - name: Build 'vector' target image - uses: docker/build-push-action@v4.1.0 + uses: docker/build-push-action@v4.1.1 with: context: comparison-vector/ cache-from: type=gha From 41ee39414ea3210c841659f1f41b3295ad8bfd23 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Wed, 14 Jun 2023 08:01:17 -0600 Subject: [PATCH 141/236] chore(deps): Drop use of `hashlink` crate (#17678) The version bump to `hashlink` is causing an issue for licensing, and we more commonly use the equivalent `indexmap` crate already, so just move the usage in `vdev` to that crate. --- Cargo.lock | 12 +----------- vdev/Cargo.toml | 2 +- vdev/src/testing/config.rs | 6 +++--- 3 files changed, 5 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f7fdb7ea2c649..aa907ba7203e4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3614,16 +3614,6 @@ dependencies = [ "ahash 0.8.2", ] -[[package]] -name = "hashlink" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa" -dependencies = [ - "hashbrown 0.13.2", - "serde", -] - [[package]] name = "hdrhistogram" version = "7.5.2" @@ -9071,8 +9061,8 @@ dependencies = [ "directories 5.0.1", "dunce", "glob", - "hashlink", "hex", + "indexmap", "indicatif", "itertools", "log", diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 5b53649a38280..1e713e44137db 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -20,8 +20,8 @@ directories = "5.0.1" # remove this when stabilized https://doc.rust-lang.org/stable/std/path/fn.absolute.html dunce = "1.0.4" glob = { version = "0.3.1", default-features = false } -hashlink = { version = "0.8.2", features = ["serde_impl"] } hex = "0.4.3" +indexmap = { version = "1.9", default-features = false, features = ["serde"] } indicatif = { version = "0.17.5", features = ["improved_unicode"] } itertools = "0.10.5" log = "0.4.19" diff --git a/vdev/src/testing/config.rs b/vdev/src/testing/config.rs index e7bedd7093805..560c7faba7b18 100644 --- a/vdev/src/testing/config.rs +++ b/vdev/src/testing/config.rs @@ -3,7 +3,7 @@ use std::path::{Path, PathBuf}; use std::{env, fs}; use anyhow::{bail, Context, Result}; -use hashlink::LinkedHashMap; +use indexmap::IndexMap; use itertools::{self, Itertools}; use serde::{Deserialize, Serialize}; use serde_yaml::Value; @@ -98,7 +98,7 @@ pub struct IntegrationTestConfig { #[serde(default)] pub env: Environment, /// The matrix of environment configurations values. - matrix: LinkedHashMap>, + matrix: IndexMap>, /// Configuration specific to the compose services. #[serde(default)] pub runner: IntegrationRunnerConfig, @@ -140,7 +140,7 @@ impl IntegrationTestConfig { Ok(config) } - pub fn environments(&self) -> LinkedHashMap { + pub fn environments(&self) -> IndexMap { self.matrix .values() .multi_cartesian_product() From 59e2cbff7bce014209813369d2a33a25ac193bb3 Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Wed, 14 Jun 2023 10:20:47 -0400 Subject: [PATCH 142/236] fix(http_client source): remove utf8 lossy conversion (#17655) Fixes: https://github.com/vectordotdev/vector/issues/16814 This change is a now a no-op thanks to https://github.com/vectordotdev/vector/pull/17628 and https://github.com/vectordotdev/vector/pull/17688. --- src/sources/http_client/client.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/sources/http_client/client.rs b/src/sources/http_client/client.rs index 0b8ba25dace35..6ba6ea77220f1 100644 --- a/src/sources/http_client/client.rs +++ b/src/sources/http_client/client.rs @@ -309,8 +309,7 @@ impl http_client::HttpClientContext for HttpClientContext { fn on_response(&mut self, _url: &Uri, _header: &Parts, body: &Bytes) -> Option> { // get the body into a byte array let mut buf = BytesMut::new(); - let body = String::from_utf8_lossy(body); - buf.extend_from_slice(body.as_bytes()); + buf.extend_from_slice(body); let events = self.decode_events(&mut buf); From ee480cd08a5451bc3f0b83a2b037ba131e38d4b9 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Wed, 14 Jun 2023 11:00:28 -0400 Subject: [PATCH 143/236] chore: Dropped error field from StreamClosed Error (#17693) Field has been unused, so I've removed it and cleaned up the usages and conditionals where it was included previously. `send_batch()` appears to only return `ClosedError` making some additional matching pointless. Signed-off-by: Spencer Gilbert --- src/internal_events/common.rs | 1 - src/sources/amqp.rs | 8 ++++---- src/sources/apache_metrics/mod.rs | 4 ++-- src/sources/aws_ecs_metrics/mod.rs | 4 ++-- src/sources/aws_kinesis_firehose/handlers.rs | 5 +---- src/sources/aws_s3/sqs.rs | 4 ++-- src/sources/aws_sqs/source.rs | 2 +- src/sources/datadog_agent/mod.rs | 4 ++-- src/sources/demo_logs.rs | 4 ++-- src/sources/docker_logs/mod.rs | 11 ++++------- src/sources/eventstoredb_metrics/mod.rs | 4 ++-- src/sources/exec/mod.rs | 4 ++-- src/sources/file.rs | 4 ++-- src/sources/file_descriptors/mod.rs | 4 ++-- src/sources/gcp_pubsub.rs | 2 +- src/sources/host_metrics/mod.rs | 4 ++-- src/sources/internal_logs.rs | 4 ++-- src/sources/internal_metrics.rs | 4 ++-- src/sources/journald.rs | 4 ++-- src/sources/kafka.rs | 8 ++++---- src/sources/kubernetes_logs/mod.rs | 3 +-- src/sources/mongodb_metrics/mod.rs | 4 ++-- src/sources/nats.rs | 4 ++-- src/sources/nginx_metrics/mod.rs | 4 ++-- src/sources/opentelemetry/grpc.rs | 2 +- src/sources/opentelemetry/http.rs | 10 ++++------ src/sources/postgresql_metrics.rs | 4 ++-- src/sources/redis/mod.rs | 4 ++-- src/sources/socket/udp.rs | 4 ++-- src/sources/statsd/mod.rs | 4 ++-- src/sources/syslog.rs | 4 ++-- src/sources/util/http/prelude.rs | 4 ++-- src/sources/util/http_client.rs | 4 ++-- src/sources/util/net/tcp/mod.rs | 4 ++-- src/sources/util/unix_datagram.rs | 4 ++-- src/sources/util/unix_stream.rs | 4 ++-- src/sources/vector/mod.rs | 2 +- 37 files changed, 74 insertions(+), 84 deletions(-) diff --git a/src/internal_events/common.rs b/src/internal_events/common.rs index fafa280df099e..0bf94f54ae25a 100644 --- a/src/internal_events/common.rs +++ b/src/internal_events/common.rs @@ -83,7 +83,6 @@ const STREAM_CLOSED: &str = "stream_closed"; #[derive(Debug)] pub struct StreamClosedError { - pub error: crate::source_sender::ClosedError, pub count: usize, } diff --git a/src/sources/amqp.rs b/src/sources/amqp.rs index 94fc250806cf5..f5cf22f493a61 100644 --- a/src/sources/amqp.rs +++ b/src/sources/amqp.rs @@ -392,8 +392,8 @@ async fn finalize_event_stream( let mut stream = stream.map(|event| event.with_batch_notifier(&batch)); match out.send_event_stream(&mut stream).await { - Err(error) => { - emit!(StreamClosedError { error, count: 1 }); + Err(_) => { + emit!(StreamClosedError { count: 1 }); } Ok(_) => { finalizer.add(msg.into(), receiver); @@ -401,8 +401,8 @@ async fn finalize_event_stream( } } None => match out.send_event_stream(&mut stream).await { - Err(error) => { - emit!(StreamClosedError { error, count: 1 }); + Err(_) => { + emit!(StreamClosedError { count: 1 }); } Ok(_) => { let ack_options = lapin::options::BasicAckOptions::default(); diff --git a/src/sources/apache_metrics/mod.rs b/src/sources/apache_metrics/mod.rs index 7a0fec5404e0e..ebcabce4525e1 100644 --- a/src/sources/apache_metrics/mod.rs +++ b/src/sources/apache_metrics/mod.rs @@ -273,9 +273,9 @@ fn apache_metrics( debug!("Finished sending."); Ok(()) } - Err(error) => { + Err(_) => { let (count, _) = stream.size_hint(); - emit!(StreamClosedError { error, count }); + emit!(StreamClosedError { count }); Err(()) } } diff --git a/src/sources/aws_ecs_metrics/mod.rs b/src/sources/aws_ecs_metrics/mod.rs index fe652964cf80b..5829d4e004116 100644 --- a/src/sources/aws_ecs_metrics/mod.rs +++ b/src/sources/aws_ecs_metrics/mod.rs @@ -207,8 +207,8 @@ async fn aws_ecs_metrics( endpoint: uri.path(), }); - if let Err(error) = out.send_batch(metrics).await { - emit!(StreamClosedError { error, count }); + if (out.send_batch(metrics).await).is_err() { + emit!(StreamClosedError { count }); return Err(()); } } diff --git a/src/sources/aws_kinesis_firehose/handlers.rs b/src/sources/aws_kinesis_firehose/handlers.rs index 054f91af58a72..11bc526b0648c 100644 --- a/src/sources/aws_kinesis_firehose/handlers.rs +++ b/src/sources/aws_kinesis_firehose/handlers.rs @@ -145,10 +145,7 @@ pub(super) async fn firehose( let count = events.len(); if let Err(error) = context.out.send_batch(events).await { - emit!(StreamClosedError { - error: error.clone(), - count, - }); + emit!(StreamClosedError { count }); let error = RequestError::ShuttingDown { request_id: request_id.clone(), source: error, diff --git a/src/sources/aws_s3/sqs.rs b/src/sources/aws_s3/sqs.rs index b206e212a3465..3074a8f383d6e 100644 --- a/src/sources/aws_s3/sqs.rs +++ b/src/sources/aws_s3/sqs.rs @@ -574,9 +574,9 @@ impl IngestorProcess { let send_error = match self.out.send_event_stream(&mut stream).await { Ok(_) => None, - Err(error) => { + Err(_) => { let (count, _) = stream.size_hint(); - emit!(StreamClosedError { error, count }); + emit!(StreamClosedError { count }); Some(crate::source_sender::ClosedError) } }; diff --git a/src/sources/aws_sqs/source.rs b/src/sources/aws_sqs/source.rs index 3a17e8c801fcb..2d01647516e54 100644 --- a/src/sources/aws_sqs/source.rs +++ b/src/sources/aws_sqs/source.rs @@ -180,7 +180,7 @@ impl SqsSource { } } } - Err(error) => emit!(StreamClosedError { error, count }), + Err(_) => emit!(StreamClosedError { count }), } } } diff --git a/src/sources/datadog_agent/mod.rs b/src/sources/datadog_agent/mod.rs index 32afa640ff05e..2e4dbf6656597 100644 --- a/src/sources/datadog_agent/mod.rs +++ b/src/sources/datadog_agent/mod.rs @@ -443,8 +443,8 @@ pub(crate) async fn handle_request( } else { out.send_batch(events).await } - .map_err(move |error: crate::source_sender::ClosedError| { - emit!(StreamClosedError { error, count }); + .map_err(|_| { + emit!(StreamClosedError { count }); warp::reject::custom(ApiError::ServerShutdown) })?; match receiver { diff --git a/src/sources/demo_logs.rs b/src/sources/demo_logs.rs index 6a31744047501..fa92cd3ac0681 100644 --- a/src/sources/demo_logs.rs +++ b/src/sources/demo_logs.rs @@ -264,8 +264,8 @@ async fn demo_logs_source( event }); - out.send_batch(events).await.map_err(|error| { - emit!(StreamClosedError { error, count }); + out.send_batch(events).await.map_err(|_| { + emit!(StreamClosedError { count }); })?; } Err(error) => { diff --git a/src/sources/docker_logs/mod.rs b/src/sources/docker_logs/mod.rs index 4c93b20abe12a..9dc7ffeeedd09 100644 --- a/src/sources/docker_logs/mod.rs +++ b/src/sources/docker_logs/mod.rs @@ -814,13 +814,10 @@ impl EventStreamBuilder { let result = { let mut stream = events_stream .map(move |event| add_hostname(event, &host_key, &hostname, self.log_namespace)); - self.out - .send_event_stream(&mut stream) - .await - .map_err(|error| { - let (count, _) = stream.size_hint(); - emit!(StreamClosedError { error, count }); - }) + self.out.send_event_stream(&mut stream).await.map_err(|_| { + let (count, _) = stream.size_hint(); + emit!(StreamClosedError { count }); + }) }; // End of stream diff --git a/src/sources/eventstoredb_metrics/mod.rs b/src/sources/eventstoredb_metrics/mod.rs index 1d205ae6799e2..7b15edff19035 100644 --- a/src/sources/eventstoredb_metrics/mod.rs +++ b/src/sources/eventstoredb_metrics/mod.rs @@ -137,8 +137,8 @@ fn eventstoredb( events_received.emit(CountByteSize(count, byte_size)); - if let Err(error) = cx.out.send_batch(metrics).await { - emit!(StreamClosedError { count, error }); + if (cx.out.send_batch(metrics).await).is_err() { + emit!(StreamClosedError { count }); break; } } diff --git a/src/sources/exec/mod.rs b/src/sources/exec/mod.rs index 5b163db025772..8af71919a3d76 100644 --- a/src/sources/exec/mod.rs +++ b/src/sources/exec/mod.rs @@ -498,8 +498,8 @@ async fn run_command( for event in &mut events { handle_event(&config, &hostname, &Some(stream.to_string()), pid, event, log_namespace); } - if let Err(error) = out.send_batch(events).await { - emit!(StreamClosedError { count, error }); + if (out.send_batch(events).await).is_err() { + emit!(StreamClosedError { count }); break; } }, diff --git a/src/sources/file.rs b/src/sources/file.rs index 5e72bb7676917..1c48a2cefe298 100644 --- a/src/sources/file.rs +++ b/src/sources/file.rs @@ -659,9 +659,9 @@ pub fn file_source( Ok(()) => { debug!("Finished sending."); } - Err(error) => { + Err(_) => { let (count, _) = messages.size_hint(); - emit!(StreamClosedError { error, count }); + emit!(StreamClosedError { count }); } } }); diff --git a/src/sources/file_descriptors/mod.rs b/src/sources/file_descriptors/mod.rs index 06710adec9c37..04189d33e871c 100644 --- a/src/sources/file_descriptors/mod.rs +++ b/src/sources/file_descriptors/mod.rs @@ -195,9 +195,9 @@ async fn process_stream( debug!("Finished sending."); Ok(()) } - Err(error) => { + Err(_) => { let (count, _) = stream.size_hint(); - emit!(StreamClosedError { error, count }); + emit!(StreamClosedError { count }); Err(()) } } diff --git a/src/sources/gcp_pubsub.rs b/src/sources/gcp_pubsub.rs index 262693551eb45..d38688592d4fd 100644 --- a/src/sources/gcp_pubsub.rs +++ b/src/sources/gcp_pubsub.rs @@ -618,7 +618,7 @@ impl PubsubSource { let count = events.len(); match self.out.send_batch(events).await { - Err(error) => emit!(StreamClosedError { error, count }), + Err(_) => emit!(StreamClosedError { count }), Ok(()) => match notifier { None => ack_ids .send(ids) diff --git a/src/sources/host_metrics/mod.rs b/src/sources/host_metrics/mod.rs index 9f577b3b85a1a..18fb45cb91fc8 100644 --- a/src/sources/host_metrics/mod.rs +++ b/src/sources/host_metrics/mod.rs @@ -296,8 +296,8 @@ impl HostMetricsConfig { bytes_received.emit(ByteSize(0)); let metrics = generator.capture_metrics().await; let count = metrics.len(); - if let Err(error) = out.send_batch(metrics).await { - emit!(StreamClosedError { count, error }); + if (out.send_batch(metrics).await).is_err() { + emit!(StreamClosedError { count }); return Err(()); } } diff --git a/src/sources/internal_logs.rs b/src/sources/internal_logs.rs index 0d22b3b310208..d74a97788a3b5 100644 --- a/src/sources/internal_logs.rs +++ b/src/sources/internal_logs.rs @@ -191,9 +191,9 @@ async fn run( Utc::now(), ); - if let Err(error) = out.send_event(Event::from(log)).await { + if (out.send_event(Event::from(log)).await).is_err() { // this wont trigger any infinite loop considering it stops the component - emit!(StreamClosedError { error, count: 1 }); + emit!(StreamClosedError { count: 1 }); return Err(()); } } diff --git a/src/sources/internal_metrics.rs b/src/sources/internal_metrics.rs index 908daab49e1f7..2b876aeaa8397 100644 --- a/src/sources/internal_metrics.rs +++ b/src/sources/internal_metrics.rs @@ -190,8 +190,8 @@ impl<'a> InternalMetrics<'a> { metric }); - if let Err(error) = self.out.send_batch(batch).await { - emit!(StreamClosedError { error, count }); + if (self.out.send_batch(batch).await).is_err() { + emit!(StreamClosedError { count }); return Err(()); } } diff --git a/src/sources/journald.rs b/src/sources/journald.rs index f6426d69e4cdd..3ec938bda0b47 100644 --- a/src/sources/journald.rs +++ b/src/sources/journald.rs @@ -606,8 +606,8 @@ impl<'a> Batch<'a> { finalizer.finalize(cursor, self.receiver).await; } } - Err(error) => { - emit!(StreamClosedError { error, count }); + Err(_) => { + emit!(StreamClosedError { count }); // `out` channel is closed, don't restart journalctl. self.exiting = Some(false); } diff --git a/src/sources/kafka.rs b/src/sources/kafka.rs index 39f16074f5342..ca904de314a62 100644 --- a/src/sources/kafka.rs +++ b/src/sources/kafka.rs @@ -449,8 +449,8 @@ async fn parse_message( let (batch, receiver) = BatchNotifier::new_with_receiver(); let mut stream = stream.map(|event| event.with_batch_notifier(&batch)); match out.send_event_stream(&mut stream).await { - Err(error) => { - emit!(StreamClosedError { error, count }); + Err(_) => { + emit!(StreamClosedError { count }); } Ok(_) => { // Drop stream to avoid borrowing `msg`: "[...] borrow might be used @@ -461,8 +461,8 @@ async fn parse_message( } } None => match out.send_event_stream(&mut stream).await { - Err(error) => { - emit!(StreamClosedError { error, count }); + Err(_) => { + emit!(StreamClosedError { count }); } Ok(_) => { if let Err(error) = diff --git a/src/sources/kubernetes_logs/mod.rs b/src/sources/kubernetes_logs/mod.rs index de609ff055758..17878ec54c28b 100644 --- a/src/sources/kubernetes_logs/mod.rs +++ b/src/sources/kubernetes_logs/mod.rs @@ -871,8 +871,7 @@ impl Source { .map(|result| { match result { Ok(Ok(())) => info!(message = "Event processing loop completed gracefully."), - Ok(Err(error)) => emit!(StreamClosedError { - error, + Ok(Err(_)) => emit!(StreamClosedError { count: events_count }), Err(error) => emit!(KubernetesLifecycleError { diff --git a/src/sources/mongodb_metrics/mod.rs b/src/sources/mongodb_metrics/mod.rs index a144b5f09c2a9..16dc2a79a23d3 100644 --- a/src/sources/mongodb_metrics/mod.rs +++ b/src/sources/mongodb_metrics/mod.rs @@ -147,8 +147,8 @@ impl SourceConfig for MongoDbMetricsConfig { let metrics = metrics.into_iter().flatten(); - if let Err(error) = cx.out.send_batch(metrics).await { - emit!(StreamClosedError { error, count }); + if (cx.out.send_batch(metrics).await).is_err() { + emit!(StreamClosedError { count }); return Err(()); } } diff --git a/src/sources/nats.rs b/src/sources/nats.rs index e5409836803b3..6600bffc1a256 100644 --- a/src/sources/nats.rs +++ b/src/sources/nats.rs @@ -238,8 +238,8 @@ async fn nats_source( event }); - out.send_batch(events).await.map_err(|error| { - emit!(StreamClosedError { error, count }); + out.send_batch(events).await.map_err(|_| { + emit!(StreamClosedError { count }); })?; } Err(error) => { diff --git a/src/sources/nginx_metrics/mod.rs b/src/sources/nginx_metrics/mod.rs index 0e6bb83049a12..6a849ddfac51d 100644 --- a/src/sources/nginx_metrics/mod.rs +++ b/src/sources/nginx_metrics/mod.rs @@ -135,8 +135,8 @@ impl SourceConfig for NginxMetricsConfig { let metrics = metrics.into_iter().flatten(); - if let Err(error) = cx.out.send_batch(metrics).await { - emit!(StreamClosedError { error, count }); + if (cx.out.send_batch(metrics).await).is_err() { + emit!(StreamClosedError { count }); return Err(()); } } diff --git a/src/sources/opentelemetry/grpc.rs b/src/sources/opentelemetry/grpc.rs index a7036d0175861..9916043520a3a 100644 --- a/src/sources/opentelemetry/grpc.rs +++ b/src/sources/opentelemetry/grpc.rs @@ -48,7 +48,7 @@ impl LogsService for Service { .send_batch_named(LOGS, events) .map_err(|error| { let message = error.to_string(); - emit!(StreamClosedError { error, count }); + emit!(StreamClosedError { count }); Status::unavailable(message) }) .and_then(|_| handle_batch_status(receiver)) diff --git a/src/sources/opentelemetry/http.rs b/src/sources/opentelemetry/http.rs index e2d5dede3edfd..be7ebef83ef7a 100644 --- a/src/sources/opentelemetry/http.rs +++ b/src/sources/opentelemetry/http.rs @@ -122,12 +122,10 @@ async fn handle_request( let receiver = BatchNotifier::maybe_apply_to(acknowledgements, &mut events); let count = events.len(); - out.send_batch_named(output, events) - .await - .map_err(move |error| { - emit!(StreamClosedError { error, count }); - warp::reject::custom(ApiError::ServerShutdown) - })?; + out.send_batch_named(output, events).await.map_err(|_| { + emit!(StreamClosedError { count }); + warp::reject::custom(ApiError::ServerShutdown) + })?; match receiver { None => Ok(protobuf(ExportLogsServiceResponse {}).into_response()), diff --git a/src/sources/postgresql_metrics.rs b/src/sources/postgresql_metrics.rs index 6c6d8505c6b9d..bca7a56c13317 100644 --- a/src/sources/postgresql_metrics.rs +++ b/src/sources/postgresql_metrics.rs @@ -227,8 +227,8 @@ impl SourceConfig for PostgresqlMetricsConfig { }); let metrics = metrics.into_iter().flatten(); - if let Err(error) = cx.out.send_batch(metrics).await { - emit!(StreamClosedError { error, count }); + if (cx.out.send_batch(metrics).await).is_err() { + emit!(StreamClosedError { count }); return Err(()); } } diff --git a/src/sources/redis/mod.rs b/src/sources/redis/mod.rs index c8ab0ed6de71e..03f4da5fe5dd8 100644 --- a/src/sources/redis/mod.rs +++ b/src/sources/redis/mod.rs @@ -279,8 +279,8 @@ impl InputHandler { event }); - if let Err(error) = self.cx.out.send_batch(events).await { - emit!(StreamClosedError { error, count }); + if (self.cx.out.send_batch(events).await).is_err() { + emit!(StreamClosedError { count }); return Err(()); } } diff --git a/src/sources/socket/udp.rs b/src/sources/socket/udp.rs index 1671be995a299..1951cc9ff01ff 100644 --- a/src/sources/socket/udp.rs +++ b/src/sources/socket/udp.rs @@ -269,8 +269,8 @@ pub(super) fn udp( tokio::select!{ result = out.send_batch(events) => { - if let Err(error) = result { - emit!(StreamClosedError { error, count }); + if result.is_err() { + emit!(StreamClosedError { count }); return Ok(()) } } diff --git a/src/sources/statsd/mod.rs b/src/sources/statsd/mod.rs index 4f18e1d14be1b..467dae41fec30 100644 --- a/src/sources/statsd/mod.rs +++ b/src/sources/statsd/mod.rs @@ -295,8 +295,8 @@ async fn statsd_udp( match frame { Ok(((events, _byte_size), _sock)) => { let count = events.len(); - if let Err(error) = out.send_batch(events).await { - emit!(StreamClosedError { error, count }); + if (out.send_batch(events).await).is_err() { + emit!(StreamClosedError { count }); } } Err(error) => { diff --git a/src/sources/syslog.rs b/src/sources/syslog.rs index 06c8a44ebd65d..728cda24a6c46 100644 --- a/src/sources/syslog.rs +++ b/src/sources/syslog.rs @@ -364,9 +364,9 @@ pub fn udp( debug!("Finished sending."); Ok(()) } - Err(error) => { + Err(_) => { let (count, _) = stream.size_hint(); - emit!(StreamClosedError { error, count }); + emit!(StreamClosedError { count }); Err(()) } } diff --git a/src/sources/util/http/prelude.rs b/src/sources/util/http/prelude.rs index 26f073f7bb7d1..1087a65b81f40 100644 --- a/src/sources/util/http/prelude.rs +++ b/src/sources/util/http/prelude.rs @@ -213,10 +213,10 @@ async fn handle_request( let count = events.len(); out.send_batch(events) - .map_err(move |error: crate::source_sender::ClosedError| { + .map_err(|_| { // can only fail if receiving end disconnected, so we are shutting down, // probably not gracefully. - emit!(StreamClosedError { error, count }); + emit!(StreamClosedError { count }); warp::reject::custom(RejectShuttingDown) }) .and_then(|_| handle_batch_status(receiver)) diff --git a/src/sources/util/http_client.rs b/src/sources/util/http_client.rs index aa5cdb1b8db7e..08f14b878608f 100644 --- a/src/sources/util/http_client.rs +++ b/src/sources/util/http_client.rs @@ -233,9 +233,9 @@ pub(crate) async fn call< debug!("Finished sending."); Ok(()) } - Err(error) => { + Err(_) => { let (count, _) = stream.size_hint(); - emit!(StreamClosedError { error, count }); + emit!(StreamClosedError { count }); Err(()) } } diff --git a/src/sources/util/net/tcp/mod.rs b/src/sources/util/net/tcp/mod.rs index e3b68e539d8aa..a31ba249a172d 100644 --- a/src/sources/util/net/tcp/mod.rs +++ b/src/sources/util/net/tcp/mod.rs @@ -405,8 +405,8 @@ async fn handle_stream( break; } } - Err(error) => { - emit!(StreamClosedError { error, count }); + Err(_) => { + emit!(StreamClosedError { count }); break; } } diff --git a/src/sources/util/unix_datagram.rs b/src/sources/util/unix_datagram.rs index 7a0f4b0e55eab..aa5c957cad060 100644 --- a/src/sources/util/unix_datagram.rs +++ b/src/sources/util/unix_datagram.rs @@ -114,8 +114,8 @@ async fn listen( handle_events(&mut events, received_from.clone()); let count = events.len(); - if let Err(error) = out.send_batch(events).await { - emit!(StreamClosedError { error, count }); + if (out.send_batch(events).await).is_err() { + emit!(StreamClosedError { count }); } }, Some(Err(error)) => { diff --git a/src/sources/util/unix_stream.rs b/src/sources/util/unix_stream.rs index f19c888362c30..3748ae005be16 100644 --- a/src/sources/util/unix_stream.rs +++ b/src/sources/util/unix_stream.rs @@ -118,8 +118,8 @@ pub fn build_unix_stream_source( handle_events(&mut events, Some(received_from.clone())); let count = events.len(); - if let Err(error) = out.send_batch(events).await { - emit!(StreamClosedError { error, count }); + if (out.send_batch(events).await).is_err() { + emit!(StreamClosedError { count }); } } Err(error) => { diff --git a/src/sources/vector/mod.rs b/src/sources/vector/mod.rs index 66aef5f7b4b19..a6f5ad5b049f3 100644 --- a/src/sources/vector/mod.rs +++ b/src/sources/vector/mod.rs @@ -78,7 +78,7 @@ impl proto::Service for Service { .send_batch(events) .map_err(|error| { let message = error.to_string(); - emit!(StreamClosedError { error, count }); + emit!(StreamClosedError { count }); Status::unavailable(message) }) .and_then(|_| handle_batch_status(receiver)) From 9c4539436ecbbf48dc0dd454ea25230d539b2c9b Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Wed, 14 Jun 2023 17:19:03 -0400 Subject: [PATCH 144/236] chore(codecs): consolidate enum types (#17688) Consolidate enum types to reduce duplicate type definitions and documentation. --- lib/codecs/src/decoding/format/gelf.rs | 5 +- lib/codecs/src/decoding/format/json.rs | 5 +- lib/codecs/src/decoding/format/native_json.rs | 8 +- lib/codecs/src/decoding/format/syslog.rs | 10 +- .../decoding/framing/character_delimited.rs | 4 +- .../src/decoding/framing/newline_delimited.rs | 6 +- .../src/decoding/framing/octet_counting.rs | 4 +- lib/codecs/src/decoding/mod.rs | 205 +++++------------- .../encoding/framing/character_delimited.rs | 4 +- lib/codecs/src/encoding/mod.rs | 33 +-- src/components/validation/resources/mod.rs | 56 ++--- src/sources/aws_s3/mod.rs | 6 +- src/sources/datadog_agent/tests.rs | 16 +- src/sources/http_client/client.rs | 4 +- src/sources/http_client/integration_tests.rs | 36 +-- src/sources/http_client/tests.rs | 26 +-- src/sources/http_server.rs | 4 +- src/sources/socket/unix.rs | 8 +- 18 files changed, 144 insertions(+), 296 deletions(-) diff --git a/lib/codecs/src/decoding/format/gelf.rs b/lib/codecs/src/decoding/format/gelf.rs index 14da4d2266438..e5b7dbe96c315 100644 --- a/lib/codecs/src/decoding/format/gelf.rs +++ b/lib/codecs/src/decoding/format/gelf.rs @@ -26,13 +26,14 @@ use crate::{gelf_fields::*, VALID_FIELD_REGEX}; /// of vector will still work with the new relaxed decoding. /// Config used to build a `GelfDeserializer`. -#[derive(Debug, Clone, Default, Deserialize, Serialize)] +#[configurable_component] +#[derive(Debug, Clone, Default)] pub struct GelfDeserializerConfig { + /// GELF-specific decoding options. #[serde( default, skip_serializing_if = "vector_core::serde::skip_serializing_if_default" )] - /// GELF-specific decoding options. pub gelf: GelfDeserializerOptions, } diff --git a/lib/codecs/src/decoding/format/json.rs b/lib/codecs/src/decoding/format/json.rs index 49980122be493..67e7bc624bbdf 100644 --- a/lib/codecs/src/decoding/format/json.rs +++ b/lib/codecs/src/decoding/format/json.rs @@ -17,14 +17,13 @@ use super::{default_lossy, Deserializer}; /// Config used to build a `JsonDeserializer`. #[configurable_component] -#[derive(Debug, Clone, PartialEq, Eq, Derivative)] -#[derivative(Default)] +#[derive(Debug, Clone, Default)] pub struct JsonDeserializerConfig { + /// JSON-specific decoding options. #[serde( default, skip_serializing_if = "vector_core::serde::skip_serializing_if_default" )] - /// JSON-specific decoding options. pub json: JsonDeserializerOptions, } diff --git a/lib/codecs/src/decoding/format/native_json.rs b/lib/codecs/src/decoding/format/native_json.rs index 7a8a1914015de..43e09ec86f5f6 100644 --- a/lib/codecs/src/decoding/format/native_json.rs +++ b/lib/codecs/src/decoding/format/native_json.rs @@ -1,6 +1,5 @@ use bytes::Bytes; use derivative::Derivative; -use serde::{Deserialize, Serialize}; use smallvec::{smallvec, SmallVec}; use vector_config::configurable_component; use vector_core::{config::DataType, event::Event, schema}; @@ -11,9 +10,14 @@ use super::{default_lossy, Deserializer}; use vector_core::config::LogNamespace; /// Config used to build a `NativeJsonDeserializer`. -#[derive(Debug, Clone, Default, Deserialize, Serialize)] +#[configurable_component] +#[derive(Debug, Clone, Default)] pub struct NativeJsonDeserializerConfig { /// Vector's native JSON-specific decoding options. + #[serde( + default, + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] pub native_json: NativeJsonDeserializerOptions, } diff --git a/lib/codecs/src/decoding/format/syslog.rs b/lib/codecs/src/decoding/format/syslog.rs index f9e173ba7d497..336d7c1aa232f 100644 --- a/lib/codecs/src/decoding/format/syslog.rs +++ b/lib/codecs/src/decoding/format/syslog.rs @@ -3,7 +3,6 @@ use chrono::{DateTime, Datelike, Utc}; use derivative::Derivative; use lookup::lookup_v2::parse_value_path; use lookup::{event_path, owned_value_path, OwnedTargetPath, OwnedValuePath, PathPrefix}; -use serde::{Deserialize, Serialize}; use smallvec::{smallvec, SmallVec}; use std::borrow::Cow; use std::collections::BTreeMap; @@ -20,10 +19,17 @@ use vrl::value::{kind::Collection, Kind}; use super::{default_lossy, Deserializer}; /// Config used to build a `SyslogDeserializer`. -#[derive(Debug, Clone, Default, Deserialize, Serialize)] +#[configurable_component] +#[derive(Debug, Clone, Default)] pub struct SyslogDeserializerConfig { + #[serde(skip)] source: Option<&'static str>, + /// Syslog-specific decoding options. + #[serde( + default, + skip_serializing_if = "vector_core::serde::skip_serializing_if_default" + )] pub syslog: SyslogDeserializerOptions, } diff --git a/lib/codecs/src/decoding/framing/character_delimited.rs b/lib/codecs/src/decoding/framing/character_delimited.rs index ef7e49c63f759..2c4d0ef918125 100644 --- a/lib/codecs/src/decoding/framing/character_delimited.rs +++ b/lib/codecs/src/decoding/framing/character_delimited.rs @@ -1,6 +1,5 @@ use bytes::{Buf, Bytes, BytesMut}; use memchr::memchr; -use serde::{Deserialize, Serialize}; use tokio_util::codec::Decoder; use tracing::{trace, warn}; use vector_config::configurable_component; @@ -8,7 +7,8 @@ use vector_config::configurable_component; use super::BoxedFramingError; /// Config used to build a `CharacterDelimitedDecoder`. -#[derive(Debug, Clone, Deserialize, Serialize)] +#[configurable_component] +#[derive(Debug, Clone)] pub struct CharacterDelimitedDecoderConfig { /// Options for the character delimited decoder. pub character_delimited: CharacterDelimitedDecoderOptions, diff --git a/lib/codecs/src/decoding/framing/newline_delimited.rs b/lib/codecs/src/decoding/framing/newline_delimited.rs index cc96d08d7ab40..6a1f2c81caca1 100644 --- a/lib/codecs/src/decoding/framing/newline_delimited.rs +++ b/lib/codecs/src/decoding/framing/newline_delimited.rs @@ -1,19 +1,19 @@ use bytes::{Bytes, BytesMut}; use derivative::Derivative; -use serde::{Deserialize, Serialize}; use tokio_util::codec::Decoder; use vector_config::configurable_component; use super::{BoxedFramingError, CharacterDelimitedDecoder}; /// Config used to build a `NewlineDelimitedDecoder`. -#[derive(Debug, Clone, Default, Deserialize, Serialize, PartialEq, Eq)] +#[configurable_component] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct NewlineDelimitedDecoderConfig { + /// Options for the newline delimited decoder. #[serde( default, skip_serializing_if = "vector_core::serde::skip_serializing_if_default" )] - /// Options for the newline delimited decoder. pub newline_delimited: NewlineDelimitedDecoderOptions, } diff --git a/lib/codecs/src/decoding/framing/octet_counting.rs b/lib/codecs/src/decoding/framing/octet_counting.rs index 0e9f2d15a4b4d..281a8b64dba03 100644 --- a/lib/codecs/src/decoding/framing/octet_counting.rs +++ b/lib/codecs/src/decoding/framing/octet_counting.rs @@ -2,7 +2,6 @@ use std::io; use bytes::{Buf, Bytes, BytesMut}; use derivative::Derivative; -use serde::{Deserialize, Serialize}; use tokio_util::codec::{LinesCodec, LinesCodecError}; use tracing::trace; use vector_config::configurable_component; @@ -10,7 +9,8 @@ use vector_config::configurable_component; use super::BoxedFramingError; /// Config used to build a `OctetCountingDecoder`. -#[derive(Debug, Clone, Default, Deserialize, Serialize)] +#[configurable_component] +#[derive(Debug, Clone, Default)] pub struct OctetCountingDecoderConfig { #[serde( default, diff --git a/lib/codecs/src/decoding/mod.rs b/lib/codecs/src/decoding/mod.rs index acf26f33a944f..96b3d04dee82c 100644 --- a/lib/codecs/src/decoding/mod.rs +++ b/lib/codecs/src/decoding/mod.rs @@ -73,9 +73,6 @@ impl StreamDecodingError for Error { /// Framing handles how events are separated when encoded in a raw byte form, where each event is /// a frame that must be prefixed, or delimited, in a way that marks where an event begins and /// ends within the byte stream. -// Unfortunately, copying options of the nested enum variants is necessary -// since `serde` doesn't allow `flatten`ing these: -// https://github.com/serde-rs/serde/issues/1402. #[configurable_component] #[derive(Clone, Debug)] #[serde(tag = "method", rename_all = "snake_case")] @@ -85,35 +82,18 @@ pub enum FramingConfig { Bytes, /// Byte frames which are delimited by a chosen character. - CharacterDelimited { - /// Options for the character delimited decoder. - character_delimited: CharacterDelimitedDecoderOptions, - }, + CharacterDelimited(CharacterDelimitedDecoderConfig), /// Byte frames which are prefixed by an unsigned big-endian 32-bit integer indicating the length. LengthDelimited, /// Byte frames which are delimited by a newline character. - NewlineDelimited { - #[serde( - default, - skip_serializing_if = "vector_core::serde::skip_serializing_if_default" - )] - /// Options for the newline delimited decoder. - newline_delimited: NewlineDelimitedDecoderOptions, - }, + NewlineDelimited(NewlineDelimitedDecoderConfig), /// Byte frames according to the [octet counting][octet_counting] format. /// /// [octet_counting]: https://tools.ietf.org/html/rfc6587#section-3.4.1 - OctetCounting { - #[serde( - default, - skip_serializing_if = "vector_core::serde::skip_serializing_if_default" - )] - /// Options for the octet counting decoder. - octet_counting: OctetCountingDecoderOptions, - }, + OctetCounting(OctetCountingDecoderConfig), } impl From for FramingConfig { @@ -124,9 +104,7 @@ impl From for FramingConfig { impl From for FramingConfig { fn from(config: CharacterDelimitedDecoderConfig) -> Self { - Self::CharacterDelimited { - character_delimited: config.character_delimited, - } + Self::CharacterDelimited(config) } } @@ -138,17 +116,13 @@ impl From for FramingConfig { impl From for FramingConfig { fn from(config: NewlineDelimitedDecoderConfig) -> Self { - Self::NewlineDelimited { - newline_delimited: config.newline_delimited, - } + Self::NewlineDelimited(config) } } impl From for FramingConfig { fn from(config: OctetCountingDecoderConfig) -> Self { - Self::OctetCounting { - octet_counting: config.octet_counting, - } + Self::OctetCounting(config) } } @@ -157,29 +131,12 @@ impl FramingConfig { pub fn build(&self) -> Framer { match self { FramingConfig::Bytes => Framer::Bytes(BytesDecoderConfig.build()), - FramingConfig::CharacterDelimited { - character_delimited, - } => Framer::CharacterDelimited( - CharacterDelimitedDecoderConfig { - character_delimited: character_delimited.clone(), - } - .build(), - ), + FramingConfig::CharacterDelimited(config) => Framer::CharacterDelimited(config.build()), FramingConfig::LengthDelimited => { Framer::LengthDelimited(LengthDelimitedDecoderConfig.build()) } - FramingConfig::NewlineDelimited { newline_delimited } => Framer::NewlineDelimited( - NewlineDelimitedDecoderConfig { - newline_delimited: newline_delimited.clone(), - } - .build(), - ), - FramingConfig::OctetCounting { octet_counting } => Framer::OctetCounting( - OctetCountingDecoderConfig { - octet_counting: octet_counting.clone(), - } - .build(), - ), + FramingConfig::NewlineDelimited(config) => Framer::NewlineDelimited(config.build()), + FramingConfig::OctetCounting(config) => Framer::OctetCounting(config.build()), } } } @@ -229,9 +186,6 @@ impl tokio_util::codec::Decoder for Framer { } /// Deserializer configuration. -// Unfortunately, copying options of the nested enum variants is necessary -// since `serde` doesn't allow `flatten`ing these: -// https://github.com/serde-rs/serde/issues/1402. #[configurable_component] #[derive(Clone, Debug)] #[serde(tag = "codec", rename_all = "snake_case")] @@ -244,14 +198,7 @@ pub enum DeserializerConfig { /// Decodes the raw bytes as [JSON][json]. /// /// [json]: https://www.json.org/ - Json { - /// JSON-specific decoding options. - #[serde( - default, - skip_serializing_if = "vector_core::serde::skip_serializing_if_default" - )] - json: JsonDeserializerOptions, - }, + Json(JsonDeserializerConfig), #[cfg(feature = "syslog")] /// Decodes the raw bytes as a Syslog message. @@ -261,14 +208,7 @@ pub enum DeserializerConfig { /// /// [rfc3164]: https://www.ietf.org/rfc/rfc3164.txt /// [rfc5424]: https://www.ietf.org/rfc/rfc5424.txt - Syslog { - /// Syslog-specific decoding options. - #[serde( - default, - skip_serializing_if = "vector_core::serde::skip_serializing_if_default" - )] - syslog: SyslogDeserializerOptions, - }, + Syslog(SyslogDeserializerConfig), /// Decodes the raw bytes as Vector’s [native Protocol Buffers format][vector_native_protobuf]. /// @@ -284,26 +224,12 @@ pub enum DeserializerConfig { /// /// [vector_native_json]: https://github.com/vectordotdev/vector/blob/master/lib/codecs/tests/data/native_encoding/schema.cue /// [experimental]: https://vector.dev/highlights/2022-03-31-native-event-codecs - NativeJson { - /// Vector's native JSON-specific decoding options. - #[serde( - default, - skip_serializing_if = "vector_core::serde::skip_serializing_if_default" - )] - native_json: NativeJsonDeserializerOptions, - }, + NativeJson(NativeJsonDeserializerConfig), /// Decodes the raw bytes as a [GELF][gelf] message. /// /// [gelf]: https://docs.graylog.org/docs/gelf - Gelf { - /// GELF-specific decoding options. - #[serde( - default, - skip_serializing_if = "vector_core::serde::skip_serializing_if_default" - )] - gelf: GelfDeserializerOptions, - }, + Gelf(GelfDeserializerConfig), } impl From for DeserializerConfig { @@ -314,22 +240,32 @@ impl From for DeserializerConfig { impl From for DeserializerConfig { fn from(config: JsonDeserializerConfig) -> Self { - Self::Json { json: config.json } + Self::Json(config) } } #[cfg(feature = "syslog")] impl From for DeserializerConfig { fn from(config: SyslogDeserializerConfig) -> Self { - Self::Syslog { - syslog: config.syslog, - } + Self::Syslog(config) } } impl From for DeserializerConfig { fn from(config: GelfDeserializerConfig) -> Self { - Self::Gelf { gelf: config.gelf } + Self::Gelf(config) + } +} + +impl From for DeserializerConfig { + fn from(_: NativeDeserializerConfig) -> Self { + Self::Native + } +} + +impl From for DeserializerConfig { + fn from(config: NativeJsonDeserializerConfig) -> Self { + Self::NativeJson(config) } } @@ -338,20 +274,12 @@ impl DeserializerConfig { pub fn build(&self) -> Deserializer { match self { DeserializerConfig::Bytes => Deserializer::Bytes(BytesDeserializerConfig.build()), - DeserializerConfig::Json { json } => { - Deserializer::Json(JsonDeserializerConfig::new(json.clone()).build()) - } + DeserializerConfig::Json(config) => Deserializer::Json(config.build()), #[cfg(feature = "syslog")] - DeserializerConfig::Syslog { syslog } => { - Deserializer::Syslog(SyslogDeserializerConfig::new(syslog.clone()).build()) - } + DeserializerConfig::Syslog(config) => Deserializer::Syslog(config.build()), DeserializerConfig::Native => Deserializer::Native(NativeDeserializerConfig.build()), - DeserializerConfig::NativeJson { native_json } => Deserializer::NativeJson( - NativeJsonDeserializerConfig::new(native_json.clone()).build(), - ), - DeserializerConfig::Gelf { gelf } => { - Deserializer::Gelf(GelfDeserializerConfig::new(gelf.clone()).build()) - } + DeserializerConfig::NativeJson(config) => Deserializer::NativeJson(config.build()), + DeserializerConfig::Gelf(config) => Deserializer::Gelf(config.build()), } } @@ -360,15 +288,13 @@ impl DeserializerConfig { match self { DeserializerConfig::Native => FramingConfig::LengthDelimited, DeserializerConfig::Bytes - | DeserializerConfig::Json { .. } - | DeserializerConfig::Gelf { .. } - | DeserializerConfig::NativeJson { .. } => FramingConfig::NewlineDelimited { - newline_delimited: Default::default(), - }, + | DeserializerConfig::Json(_) + | DeserializerConfig::Gelf(_) + | DeserializerConfig::NativeJson(_) => { + FramingConfig::NewlineDelimited(Default::default()) + } #[cfg(feature = "syslog")] - DeserializerConfig::Syslog { .. } => FramingConfig::NewlineDelimited { - newline_delimited: Default::default(), - }, + DeserializerConfig::Syslog(_) => FramingConfig::NewlineDelimited(Default::default()), } } @@ -376,20 +302,12 @@ impl DeserializerConfig { pub fn output_type(&self) -> DataType { match self { DeserializerConfig::Bytes => BytesDeserializerConfig.output_type(), - DeserializerConfig::Json { json } => { - JsonDeserializerConfig::new(json.clone()).output_type() - } + DeserializerConfig::Json(config) => config.output_type(), #[cfg(feature = "syslog")] - DeserializerConfig::Syslog { syslog } => { - SyslogDeserializerConfig::new(syslog.clone()).output_type() - } + DeserializerConfig::Syslog(config) => config.output_type(), DeserializerConfig::Native => NativeDeserializerConfig.output_type(), - DeserializerConfig::NativeJson { native_json } => { - NativeJsonDeserializerConfig::new(native_json.clone()).output_type() - } - DeserializerConfig::Gelf { gelf } => { - GelfDeserializerConfig::new(gelf.clone()).output_type() - } + DeserializerConfig::NativeJson(config) => config.output_type(), + DeserializerConfig::Gelf(config) => config.output_type(), } } @@ -397,21 +315,12 @@ impl DeserializerConfig { pub fn schema_definition(&self, log_namespace: LogNamespace) -> schema::Definition { match self { DeserializerConfig::Bytes => BytesDeserializerConfig.schema_definition(log_namespace), - DeserializerConfig::Json { json } => { - JsonDeserializerConfig::new(json.clone()).schema_definition(log_namespace) - } + DeserializerConfig::Json(config) => config.schema_definition(log_namespace), #[cfg(feature = "syslog")] - DeserializerConfig::Syslog { syslog } => { - SyslogDeserializerConfig::new(syslog.clone()).schema_definition(log_namespace) - } + DeserializerConfig::Syslog(config) => config.schema_definition(log_namespace), DeserializerConfig::Native => NativeDeserializerConfig.schema_definition(log_namespace), - DeserializerConfig::NativeJson { native_json } => { - NativeJsonDeserializerConfig::new(native_json.clone()) - .schema_definition(log_namespace) - } - DeserializerConfig::Gelf { gelf } => { - GelfDeserializerConfig::new(gelf.clone()).schema_definition(log_namespace) - } + DeserializerConfig::NativeJson(config) => config.schema_definition(log_namespace), + DeserializerConfig::Gelf(config) => config.schema_definition(log_namespace), } } @@ -419,31 +328,31 @@ impl DeserializerConfig { pub const fn content_type(&self, framer: &FramingConfig) -> &'static str { match (&self, framer) { ( - DeserializerConfig::Json { .. } | DeserializerConfig::NativeJson { .. }, - FramingConfig::NewlineDelimited { .. }, + DeserializerConfig::Json(_) | DeserializerConfig::NativeJson(_), + FramingConfig::NewlineDelimited(_), ) => "application/x-ndjson", ( - DeserializerConfig::Gelf { .. } - | DeserializerConfig::Json { .. } - | DeserializerConfig::NativeJson { .. }, - FramingConfig::CharacterDelimited { + DeserializerConfig::Gelf(_) + | DeserializerConfig::Json(_) + | DeserializerConfig::NativeJson(_), + FramingConfig::CharacterDelimited(CharacterDelimitedDecoderConfig { character_delimited: CharacterDelimitedDecoderOptions { delimiter: b',', max_length: Some(usize::MAX), }, - }, + }), ) => "application/json", (DeserializerConfig::Native, _) => "application/octet-stream", ( - DeserializerConfig::Json { .. } - | DeserializerConfig::NativeJson { .. } + DeserializerConfig::Json(_) + | DeserializerConfig::NativeJson(_) | DeserializerConfig::Bytes - | DeserializerConfig::Gelf { .. }, + | DeserializerConfig::Gelf(_), _, ) => "text/plain", #[cfg(feature = "syslog")] - (DeserializerConfig::Syslog { .. }, _) => "text/plain", + (DeserializerConfig::Syslog(_), _) => "text/plain", } } } diff --git a/lib/codecs/src/encoding/framing/character_delimited.rs b/lib/codecs/src/encoding/framing/character_delimited.rs index f93c279d61cf8..2e536976618b8 100644 --- a/lib/codecs/src/encoding/framing/character_delimited.rs +++ b/lib/codecs/src/encoding/framing/character_delimited.rs @@ -1,12 +1,12 @@ use bytes::{BufMut, BytesMut}; -use serde::{Deserialize, Serialize}; use tokio_util::codec::Encoder; use vector_config::configurable_component; use super::BoxedFramingError; /// Config used to build a `CharacterDelimitedEncoder`. -#[derive(Debug, Clone, Deserialize, Serialize)] +#[configurable_component] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct CharacterDelimitedEncoderConfig { /// Options for the character delimited encoder. pub character_delimited: CharacterDelimitedEncoderOptions, diff --git a/lib/codecs/src/encoding/mod.rs b/lib/codecs/src/encoding/mod.rs index d798e8d7bf9d9..f9516411720d1 100644 --- a/lib/codecs/src/encoding/mod.rs +++ b/lib/codecs/src/encoding/mod.rs @@ -61,10 +61,7 @@ pub enum FramingConfig { Bytes, /// Event data is delimited by a single ASCII (7-bit) character. - CharacterDelimited { - /// Options for the character delimited encoder. - character_delimited: CharacterDelimitedEncoderOptions, - }, + CharacterDelimited(CharacterDelimitedEncoderConfig), /// Event data is prefixed with its length in bytes. /// @@ -83,9 +80,7 @@ impl From for FramingConfig { impl From for FramingConfig { fn from(config: CharacterDelimitedEncoderConfig) -> Self { - Self::CharacterDelimited { - character_delimited: config.character_delimited, - } + Self::CharacterDelimited(config) } } @@ -106,14 +101,7 @@ impl FramingConfig { pub fn build(&self) -> Framer { match self { FramingConfig::Bytes => Framer::Bytes(BytesEncoderConfig.build()), - FramingConfig::CharacterDelimited { - character_delimited, - } => Framer::CharacterDelimited( - CharacterDelimitedEncoderConfig { - character_delimited: character_delimited.clone(), - } - .build(), - ), + FramingConfig::CharacterDelimited(config) => Framer::CharacterDelimited(config.build()), FramingConfig::LengthDelimited => { Framer::LengthDelimited(LengthDelimitedEncoderConfig.build()) } @@ -201,10 +189,7 @@ pub enum SerializerConfig { /// /// This codec must be configured with fields to encode. /// - Csv( - /// Options for the CSV encoder. - CsvSerializerConfig, - ), + Csv(CsvSerializerConfig), /// Encodes an event as a [GELF][gelf] message. /// @@ -214,10 +199,7 @@ pub enum SerializerConfig { /// Encodes an event as [JSON][json]. /// /// [json]: https://www.json.org/ - Json( - /// Encoding options specific to the text serializer. - JsonSerializerConfig, - ), + Json(JsonSerializerConfig), /// Encodes an event as a [logfmt][logfmt] message. /// @@ -257,10 +239,7 @@ pub enum SerializerConfig { /// Be careful if you are modifying your log events (for example, by using a `remap` /// transform) and removing the message field while doing additional parsing on it, as this /// could lead to the encoding emitting empty strings for the given event. - Text( - /// Encoding options specific to the text serializer. - TextSerializerConfig, - ), + Text(TextSerializerConfig), } impl From for SerializerConfig { diff --git a/src/components/validation/resources/mod.rs b/src/components/validation/resources/mod.rs index a7e99bc998a17..2b9fc3c542ccb 100644 --- a/src/components/validation/resources/mod.rs +++ b/src/components/validation/resources/mod.rs @@ -2,7 +2,7 @@ mod event; mod http; use codecs::{ - decoding::{self, DeserializerConfig, NewlineDelimitedDecoderOptions}, + decoding::{self, DeserializerConfig}, encoding::{ self, Framer, FramingConfig, JsonSerializerConfig, SerializerConfig, TextSerializerConfig, }, @@ -160,20 +160,18 @@ fn deserializer_config_to_serializer(config: &DeserializerConfig) -> encoding::S fn decoder_framing_to_encoding_framer(framing: &decoding::FramingConfig) -> encoding::Framer { let framing_config = match framing { decoding::FramingConfig::Bytes => encoding::FramingConfig::Bytes, - decoding::FramingConfig::CharacterDelimited { - character_delimited, - } => encoding::FramingConfig::CharacterDelimited { - character_delimited: encoding::CharacterDelimitedEncoderOptions { - delimiter: character_delimited.delimiter, - }, - }, - decoding::FramingConfig::LengthDelimited => encoding::FramingConfig::LengthDelimited, - decoding::FramingConfig::NewlineDelimited { .. } => { - encoding::FramingConfig::NewlineDelimited + decoding::FramingConfig::CharacterDelimited(config) => { + encoding::FramingConfig::CharacterDelimited(encoding::CharacterDelimitedEncoderConfig { + character_delimited: encoding::CharacterDelimitedEncoderOptions { + delimiter: config.character_delimited.delimiter, + }, + }) } + decoding::FramingConfig::LengthDelimited => encoding::FramingConfig::LengthDelimited, + decoding::FramingConfig::NewlineDelimited(_) => encoding::FramingConfig::NewlineDelimited, // TODO: There's no equivalent octet counting framer for encoding... although // there's no particular reason that would make it hard to write. - decoding::FramingConfig::OctetCounting { .. } => todo!(), + decoding::FramingConfig::OctetCounting(_) => todo!(), }; framing_config.build() @@ -183,17 +181,11 @@ fn serializer_config_to_deserializer(config: &SerializerConfig) -> decoding::Des let deserializer_config = match config { SerializerConfig::Avro { .. } => todo!(), SerializerConfig::Csv { .. } => todo!(), - SerializerConfig::Gelf => DeserializerConfig::Gelf { - gelf: Default::default(), - }, - SerializerConfig::Json(_) => DeserializerConfig::Json { - json: Default::default(), - }, + SerializerConfig::Gelf => DeserializerConfig::Gelf(Default::default()), + SerializerConfig::Json(_) => DeserializerConfig::Json(Default::default()), SerializerConfig::Logfmt => todo!(), SerializerConfig::Native => DeserializerConfig::Native, - SerializerConfig::NativeJson => DeserializerConfig::NativeJson { - native_json: Default::default(), - }, + SerializerConfig::NativeJson => DeserializerConfig::NativeJson(Default::default()), SerializerConfig::RawMessage | SerializerConfig::Text(_) => DeserializerConfig::Bytes, }; @@ -203,18 +195,18 @@ fn serializer_config_to_deserializer(config: &SerializerConfig) -> decoding::Des fn encoder_framing_to_decoding_framer(framing: encoding::FramingConfig) -> decoding::Framer { let framing_config = match framing { encoding::FramingConfig::Bytes => decoding::FramingConfig::Bytes, - encoding::FramingConfig::CharacterDelimited { - character_delimited, - } => decoding::FramingConfig::CharacterDelimited { - character_delimited: decoding::CharacterDelimitedDecoderOptions { - delimiter: character_delimited.delimiter, - max_length: None, - }, - }, + encoding::FramingConfig::CharacterDelimited(config) => { + decoding::FramingConfig::CharacterDelimited(decoding::CharacterDelimitedDecoderConfig { + character_delimited: decoding::CharacterDelimitedDecoderOptions { + delimiter: config.character_delimited.delimiter, + max_length: None, + }, + }) + } encoding::FramingConfig::LengthDelimited => decoding::FramingConfig::LengthDelimited, - encoding::FramingConfig::NewlineDelimited => decoding::FramingConfig::NewlineDelimited { - newline_delimited: NewlineDelimitedDecoderOptions::default(), - }, + encoding::FramingConfig::NewlineDelimited => { + decoding::FramingConfig::NewlineDelimited(Default::default()) + } }; framing_config.build() diff --git a/src/sources/aws_s3/mod.rs b/src/sources/aws_s3/mod.rs index 17ec4cb45cd82..02a6cbcf6a9b5 100644 --- a/src/sources/aws_s3/mod.rs +++ b/src/sources/aws_s3/mod.rs @@ -3,7 +3,7 @@ use std::{convert::TryInto, io::ErrorKind}; use async_compression::tokio::bufread; use aws_sdk_s3::types::ByteStream; use codecs::decoding::{DeserializerConfig, FramingConfig, NewlineDelimitedDecoderOptions}; -use codecs::BytesDeserializerConfig; +use codecs::{BytesDeserializerConfig, NewlineDelimitedDecoderConfig}; use futures::{stream, stream::StreamExt, TryStreamExt}; use lookup::owned_value_path; use snafu::Snafu; @@ -133,9 +133,9 @@ pub struct AwsS3Config { const fn default_framing() -> FramingConfig { // This is used for backwards compatibility. It used to be the only (hardcoded) option. - FramingConfig::NewlineDelimited { + FramingConfig::NewlineDelimited(NewlineDelimitedDecoderConfig { newline_delimited: NewlineDelimitedDecoderOptions { max_length: None }, - } + }) } impl_generate_config_from_default!(AwsS3Config); diff --git a/src/sources/datadog_agent/tests.rs b/src/sources/datadog_agent/tests.rs index ed3c5d3a4d9d4..4fc3ef4321ee1 100644 --- a/src/sources/datadog_agent/tests.rs +++ b/src/sources/datadog_agent/tests.rs @@ -1595,9 +1595,7 @@ fn test_config_outputs() { ( "json / single output", TestCase { - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), multiple_outputs: false, want: HashMap::from([( None, @@ -1622,9 +1620,7 @@ fn test_config_outputs() { ( "json / multiple output", TestCase { - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), multiple_outputs: true, want: HashMap::from([ ( @@ -1666,9 +1662,7 @@ fn test_config_outputs() { ( "syslog / single output", TestCase { - decoding: DeserializerConfig::Syslog { - syslog: Default::default(), - }, + decoding: DeserializerConfig::Syslog(Default::default()), multiple_outputs: false, want: HashMap::from([( None, @@ -1743,9 +1737,7 @@ fn test_config_outputs() { ( "syslog / multiple output", TestCase { - decoding: DeserializerConfig::Syslog { - syslog: Default::default(), - }, + decoding: DeserializerConfig::Syslog(Default::default()), multiple_outputs: true, want: HashMap::from([ ( diff --git a/src/sources/http_client/client.rs b/src/sources/http_client/client.rs index 6ba6ea77220f1..55036ccde446b 100644 --- a/src/sources/http_client/client.rs +++ b/src/sources/http_client/client.rs @@ -235,9 +235,7 @@ impl ValidatableComponent for HttpClientConfig { let config = Self { endpoint: uri.to_string(), interval: Duration::from_secs(1), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), ..Default::default() }; diff --git a/src/sources/http_client/integration_tests.rs b/src/sources/http_client/integration_tests.rs index 5927265abd2bf..f7b04403e45d3 100644 --- a/src/sources/http_client/integration_tests.rs +++ b/src/sources/http_client/integration_tests.rs @@ -96,9 +96,7 @@ async fn collected_logs_json() { endpoint: format!("{}/logs/json.json", dufs_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -122,9 +120,7 @@ async fn collected_metrics_native_json() { endpoint: format!("{}/metrics/native.json", dufs_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::NativeJson { - native_json: Default::default(), - }, + decoding: DeserializerConfig::NativeJson(Default::default()), framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -153,9 +149,7 @@ async fn collected_trace_native_json() { endpoint: format!("{}/traces/native.json", dufs_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::NativeJson { - native_json: Default::default(), - }, + decoding: DeserializerConfig::NativeJson(Default::default()), framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -179,9 +173,7 @@ async fn unauthorized_no_auth() { endpoint: format!("{}/logs/json.json", dufs_auth_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -199,9 +191,7 @@ async fn unauthorized_wrong_auth() { endpoint: format!("{}/logs/json.json", dufs_auth_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -222,9 +212,7 @@ async fn authorized() { endpoint: format!("{}/logs/json.json", dufs_auth_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -245,9 +233,7 @@ async fn tls_invalid_ca() { endpoint: format!("{}/logs/json.json", dufs_https_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -268,9 +254,7 @@ async fn tls_valid() { endpoint: format!("{}/logs/json.json", dufs_https_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, @@ -292,9 +276,7 @@ async fn shutdown() { endpoint: format!("{}/logs/json.json", dufs_address()), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, diff --git a/src/sources/http_client/tests.rs b/src/sources/http_client/tests.rs index bf1e6b007511a..ecd799a73696b 100644 --- a/src/sources/http_client/tests.rs +++ b/src/sources/http_client/tests.rs @@ -1,13 +1,11 @@ +use codecs::CharacterDelimitedDecoderConfig; use std::collections::HashMap; use tokio::time::Duration; use warp::{http::HeaderMap, Filter}; use crate::sources::util::http::HttpMethod; use crate::{serde::default_decoding, serde::default_framing_message_based}; -use codecs::decoding::{ - CharacterDelimitedDecoderOptions, DeserializerConfig, FramingConfig, - NewlineDelimitedDecoderOptions, -}; +use codecs::decoding::{CharacterDelimitedDecoderOptions, DeserializerConfig, FramingConfig}; use vector_core::event::Event; use super::HttpClientConfig; @@ -78,12 +76,8 @@ async fn json_decoding_newline_delimited() { endpoint: format!("http://{}/endpoint", in_addr), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, - framing: FramingConfig::NewlineDelimited { - newline_delimited: NewlineDelimitedDecoderOptions::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), + framing: FramingConfig::NewlineDelimited(Default::default()), headers: HashMap::new(), method: HttpMethod::Get, tls: None, @@ -110,15 +104,13 @@ async fn json_decoding_character_delimited() { endpoint: format!("http://{}/endpoint", in_addr), interval: INTERVAL, query: HashMap::new(), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, - framing: FramingConfig::CharacterDelimited { + decoding: DeserializerConfig::Json(Default::default()), + framing: FramingConfig::CharacterDelimited(CharacterDelimitedDecoderConfig { character_delimited: CharacterDelimitedDecoderOptions { delimiter: b',', max_length: Some(usize::MAX), }, - }, + }), headers: HashMap::new(), method: HttpMethod::Get, tls: None, @@ -150,9 +142,7 @@ async fn request_query_applied() { vec!["val1".to_string(), "val2".to_string()], ), ]), - decoding: DeserializerConfig::Json { - json: Default::default(), - }, + decoding: DeserializerConfig::Json(Default::default()), framing: default_framing_message_based(), headers: HashMap::new(), method: HttpMethod::Get, diff --git a/src/sources/http_server.rs b/src/sources/http_server.rs index 50f9b3fd0bd32..2bd2afecee34f 100644 --- a/src/sources/http_server.rs +++ b/src/sources/http_server.rs @@ -256,9 +256,7 @@ impl_generate_config_from_default!(SimpleHttpConfig); impl ValidatableComponent for SimpleHttpConfig { fn validation_configuration() -> ValidationConfiguration { let config = Self { - decoding: Some(DeserializerConfig::Json { - json: Default::default(), - }), + decoding: Some(DeserializerConfig::Json(Default::default())), ..Default::default() }; diff --git a/src/sources/socket/unix.rs b/src/sources/socket/unix.rs index 2144a8c8e0278..4d09e128a2c94 100644 --- a/src/sources/socket/unix.rs +++ b/src/sources/socket/unix.rs @@ -127,11 +127,9 @@ pub(super) fn unix_datagram( let max_length = config .framing .and_then(|framing| match framing { - FramingConfig::CharacterDelimited { - character_delimited, - } => character_delimited.max_length, - FramingConfig::NewlineDelimited { newline_delimited } => newline_delimited.max_length, - FramingConfig::OctetCounting { octet_counting } => octet_counting.max_length, + FramingConfig::CharacterDelimited(config) => config.character_delimited.max_length, + FramingConfig::NewlineDelimited(config) => config.newline_delimited.max_length, + FramingConfig::OctetCounting(config) => config.octet_counting.max_length, _ => None, }) .unwrap_or_else(crate::serde::default_max_length); From 2263756d0a39cb99d62a826ff0993f461ae80937 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Thu, 15 Jun 2023 10:04:32 -0700 Subject: [PATCH 145/236] chore(deps, releasing): Update to Alpine 3.18 (#17695) Also added a note to the release checklists to look for new versions. Ideally dependabot could help us keep up-to-date here. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- .github/ISSUE_TEMPLATE/minor-release.md | 2 ++ distribution/docker/alpine/Dockerfile | 4 ++-- distribution/docker/distroless-static/Dockerfile | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/minor-release.md b/.github/ISSUE_TEMPLATE/minor-release.md index 40f950e1f6982..0ea25a30c0432 100644 --- a/.github/ISSUE_TEMPLATE/minor-release.md +++ b/.github/ISSUE_TEMPLATE/minor-release.md @@ -11,6 +11,8 @@ The week before the release: - `git fetch && git checkout origin/master && git checkout -b v0. && git push -u` - [ ] Create a new release preparation branch from `master` - `git checkout -b prepare-v0. && git push -u` +- [ ] Check if there is a newer version of Alpine or Debian available to update the release images + in `distribution/docker/`. Update if so. - [ ] Run `cargo vdev build release-cue` to generate a new cue file for the release - [ ] Add `changelog` key to generated cue file - [ ] `git log --no-merges --cherry-pick --right-only ...` diff --git a/distribution/docker/alpine/Dockerfile b/distribution/docker/alpine/Dockerfile index 3ca4900b4fe2d..c3479f1001191 100644 --- a/distribution/docker/alpine/Dockerfile +++ b/distribution/docker/alpine/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/alpine:3.14 AS builder +FROM docker.io/alpine:3.18 AS builder WORKDIR /vector @@ -7,7 +7,7 @@ RUN tar -xvf vector-0*-"$(cat /etc/apk/arch)"-unknown-linux-musl*.tar.gz --strip RUN mkdir -p /var/lib/vector -FROM docker.io/alpine:3.14 +FROM docker.io/alpine:3.18 RUN apk --no-cache add ca-certificates tzdata COPY --from=builder /vector/bin/* /usr/local/bin/ diff --git a/distribution/docker/distroless-static/Dockerfile b/distribution/docker/distroless-static/Dockerfile index dc5ef9cd955bd..874c165d64457 100644 --- a/distribution/docker/distroless-static/Dockerfile +++ b/distribution/docker/distroless-static/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/alpine:3.14 AS builder +FROM docker.io/alpine:3.18 AS builder WORKDIR /vector From 079d895ebffeb62cf51cb11144b17fd481292510 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Thu, 15 Jun 2023 15:55:48 -0400 Subject: [PATCH 146/236] chore: Add docker config to dependabot (#17696) --- .github/dependabot.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 102567ca0e6a5..8b1f14d85576a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -10,6 +10,16 @@ updates: commit-message: prefix: "chore(deps)" open-pull-requests-limit: 100 + - package-ecosystem: "docker" + directory: "/distribution/docker/" + schedule: + interval: "daily" + time: "04:00" # UTC + labels: + - "domain: deps" + commit-message: + prefix: "chore(deps)" + open-pull-requests-limit: 100 - package-ecosystem: "github-actions" directory: "/" schedule: From 960635387235ea270d748038a3a0ddd615813f29 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Thu, 15 Jun 2023 14:00:58 -0600 Subject: [PATCH 147/236] chore(config): Make config schema output ordered (#17694) Key/value maps in the config schema currently use an `IndexMap`. This provides for stable ordering of the output between runs. Unfortunately, due to the use of initializers via `inventory`, that ordering is dependent on the order in which those initializers are run, which is in turn dependent on the choices the linker (and link-time optimization) makes when building Vector. The result is that config schemas can be very hard to compare between builds due to large blocks of it moving around in the output file. This change changes this type to a `BTreeMap`, which is fully ordered internally. This will result in a consistent ordering of maps, hopefully producing config schema output that is more consistent. --- Cargo.lock | 1 - lib/vector-config-common/Cargo.toml | 1 - lib/vector-config-common/src/schema/mod.rs | 4 +++- .../src/schema/visitors/inline_single.rs | 12 ++---------- lib/vector-config/src/schema/visitors/merge.rs | 4 ++-- lib/vector-config/src/schema/visitors/unevaluated.rs | 4 ++-- 6 files changed, 9 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aa907ba7203e4..8a3b47031e3b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9408,7 +9408,6 @@ version = "0.1.0" dependencies = [ "convert_case 0.6.0", "darling 0.13.4", - "indexmap", "once_cell", "proc-macro2 1.0.60", "quote 1.0.28", diff --git a/lib/vector-config-common/Cargo.toml b/lib/vector-config-common/Cargo.toml index 9e29eb538f98a..0d4ddaed219a0 100644 --- a/lib/vector-config-common/Cargo.toml +++ b/lib/vector-config-common/Cargo.toml @@ -7,7 +7,6 @@ license = "MPL-2.0" [dependencies] convert_case = { version = "0.6", default-features = false } darling = { version = "0.13", default-features = false, features = ["suggestions"] } -indexmap = { version = "1.9", default-features = false, features = ["serde"] } once_cell = { version = "1", default-features = false, features = ["std"] } proc-macro2 = { version = "1.0", default-features = false } serde = { version = "1.0", default-features = false, features = ["derive"] } diff --git a/lib/vector-config-common/src/schema/mod.rs b/lib/vector-config-common/src/schema/mod.rs index 2f4c6876de35e..763cdd217e8fc 100644 --- a/lib/vector-config-common/src/schema/mod.rs +++ b/lib/vector-config-common/src/schema/mod.rs @@ -8,7 +8,9 @@ pub mod visit; pub(crate) const DEFINITIONS_PREFIX: &str = "#/definitions/"; -pub type Map = indexmap::IndexMap; +// We have chosen the `BTree*` types here instead of hash tables to provide for a consistent +// ordering of the output elements between runs and changes to the configuration. +pub type Map = std::collections::BTreeMap; pub type Set = std::collections::BTreeSet; pub use self::gen::{SchemaGenerator, SchemaSettings}; diff --git a/lib/vector-config/src/schema/visitors/inline_single.rs b/lib/vector-config/src/schema/visitors/inline_single.rs index 68347e36937dd..20035118b4470 100644 --- a/lib/vector-config/src/schema/visitors/inline_single.rs +++ b/lib/vector-config/src/schema/visitors/inline_single.rs @@ -43,17 +43,11 @@ impl Visitor for InlineSingleUseReferencesVisitor { occurrence_visitor.visit_root_schema(root); let occurrence_map = occurrence_visitor.into_occurrences(); - let eligible_to_inline = occurrence_map + self.eligible_to_inline = occurrence_map .into_iter() // Filter out any schemas which have more than one occurrence, as naturally, we're // trying to inline single-use schema references. :) - .filter_map(|(def_name, occurrences)| { - if occurrences == 1 { - Some(def_name) - } else { - None - } - }) + .filter_map(|(def_name, occurrences)| (occurrences == 1).then_some(def_name)) // However, we'll also filter out some specific schema definitions which are only // referenced once, specifically: component base types and component types themselves. // @@ -72,8 +66,6 @@ impl Visitor for InlineSingleUseReferencesVisitor { .map(|s| s.as_ref().to_string()) .collect::>(); - self.eligible_to_inline = eligible_to_inline; - // Now run our own visitor logic, which will use the inline eligibility to determine if a // schema reference in a being-visited schema should be replaced inline with the original // referenced schema, in turn removing the schema definition. diff --git a/lib/vector-config/src/schema/visitors/merge.rs b/lib/vector-config/src/schema/visitors/merge.rs index 741b74157e137..a05478caef168 100644 --- a/lib/vector-config/src/schema/visitors/merge.rs +++ b/lib/vector-config/src/schema/visitors/merge.rs @@ -95,7 +95,7 @@ impl Mergeable for serde_json::Map { impl Mergeable for Map where - K: std::hash::Hash + Eq + Clone, + K: Clone + Eq + Ord, V: Clone + Mergeable, { fn merge(&mut self, other: &Self) { @@ -261,7 +261,7 @@ where fn merge_map(destination: &mut Map, source: &Map) where - K: std::hash::Hash + Eq + Clone, + K: Clone + Eq + Ord, V: Clone + Mergeable, { destination.merge(source); diff --git a/lib/vector-config/src/schema/visitors/unevaluated.rs b/lib/vector-config/src/schema/visitors/unevaluated.rs index 1619465cde24a..105c3dbfcaac4 100644 --- a/lib/vector-config/src/schema/visitors/unevaluated.rs +++ b/lib/vector-config/src/schema/visitors/unevaluated.rs @@ -361,9 +361,9 @@ fn is_markable_schema(definitions: &Map, schema: &SchemaObject) .as_ref() .and_then(|reference| { let reference = get_cleaned_schema_reference(reference); - definitions.get_full(reference) + definitions.get_key_value(reference) }) - .and_then(|(_, name, schema)| schema.as_object().map(|schema| (name, schema))) + .and_then(|(name, schema)| schema.as_object().map(|schema| (name, schema))) .map_or(false, |(name, schema)| { debug!( "Following schema reference '{}' for subschema markability.", From 2ad964d43b9a47808104eced885cebf6541f4a72 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Fri, 16 Jun 2023 09:48:28 -0400 Subject: [PATCH 148/236] docs: Additional notes on proposing new integrations (#17658) Signed-off-by: Spencer Gilbert --- docs/CONTRIBUTING.md | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index bc87acb59fec1..7f4a52ed229f9 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -56,18 +56,40 @@ Vector team member will find this document useful. ### New sources, sinks, and transforms -If you're contributing a new source, sink, or transform to Vector, thank you that's way cool! There's a few steps you -need to think about if you want to make sure we can merge your contribution. We're here to help you along with these steps, -but they are a blocker to getting a new integration released. - -To merge a new source, sink, or transform, you need to: +If you're thinking of contributing a new source, sink, or transform to Vector, thank you that's way cool! The answers to +the below questions are required for each newly proposed component and depending on the answers, we may elect to not +include the proposed component. If you're having trouble with any of the questions, we're available to help you. + +**Prior to beginning work on a new source or sink if a GitHub Issue does not already exist, please open one to discuss +the introduction of the new integration.** Maintainers will review the proposal with the following checklist in mind, +try and consider them when sharing your proposal to reduce the amount of time it takes to review your proposal. This +list is not exhaustive, and may be updated over time. + +- [ ] Can the proposed component’s functionality be replicated by an existing component, with a specific configuration? +(ex: Azure Event Hub as a `kafka` sink configuration) + - [ ] Alternatively implemented as a wrapper around an existing component. (ex. `axiom` wrapping `elasticsearch`) +- [ ] Can an existing component replicate the proposed component’s functionality, with non-breaking changes? +- [ ] Can an existing component be rewritten in a more generic fashion to cover both the existing and proposed functions? +- [ ] Is the proposed component generically usable or is it specific to a particular service? + - [ ] How established is the target of the integration, what is the relative market share of the integrated service? +- [ ] Is there sufficient demand for the component? + - [ ] If the integration can be served with a workaround or more generic component, how painful is this for users? +- [ ] Is the contribution from an individual or the organization owning the integrated service? (examples of +organization backed integrations: `databend` sink, `axiom` sink) + - [ ] Is the contributor committed to maintaining the integration if it is accepted? +- [ ] What is the overall complexity of the proposed design of this integration from a technical and functional +standpoint, and what is the expected ongoing maintenance burden? +- [ ] How will this integration be tested and QA’d for any changes and fixes? + - [ ] Will we have access to an account with the service if the integration is not open source? + +To merge a new source, sink, or transform, the pull request is required to: - [ ] Add tests, especially integration tests if your contribution connects to an external service. - [ ] Add instrumentation so folks using your integration can get insight into how it's working and performing. You can see some [example of instrumentation in existing integrations](https://github.com/vectordotdev/vector/tree/master/src/internal_events). - [ ] Add documentation. You can see [examples in the `docs` directory](https://github.com/vectordotdev/vector/blob/master/docs). -When adding new integration tests, the following changes are needed in the github workflows: +When adding new integration tests, the following changes are needed in the GitHub Workflows: - in `.github/workflows/integration.yml`, add another entry in the matrix definition for the new integration. - in `.github/workflows/integration-comment.yml`, add another entry in the matrix definition for the new integration. From bebac21cb699be64d1b009d3619d5af5c5be20ec Mon Sep 17 00:00:00 2001 From: dengmingtong Date: Fri, 16 Jun 2023 22:32:09 +0800 Subject: [PATCH 149/236] feat(kinesis sinks): implement full retry of partial failures in firehose/streams (#17535) This PR is from https://github.com/vectordotdev/vector/pull/16771 PR. Refactor some action checking. closes: #17424 --------- Signed-off-by: Spencer Gilbert Co-authored-by: Jason Goodwin Co-authored-by: Jason Goodwin Co-authored-by: Spencer Gilbert --- src/sinks/aws_kinesis/config.rs | 8 ++++++- src/sinks/aws_kinesis/firehose/config.rs | 17 ++++++++++++++- .../aws_kinesis/firehose/integration_tests.rs | 1 + src/sinks/aws_kinesis/firehose/record.rs | 21 ++++++++++++++++--- src/sinks/aws_kinesis/firehose/tests.rs | 2 ++ src/sinks/aws_kinesis/record.rs | 7 ++++++- src/sinks/aws_kinesis/service.rs | 20 +++++++----------- src/sinks/aws_kinesis/streams/config.rs | 17 ++++++++++++++- .../aws_kinesis/streams/integration_tests.rs | 1 + src/sinks/aws_kinesis/streams/record.rs | 21 ++++++++++++++++--- .../components/sinks/aws_kinesis_firehose.cue | 1 + .../sinks/base/aws_kinesis_firehose.cue | 5 +++++ .../sinks/base/aws_kinesis_streams.cue | 5 +++++ 13 files changed, 103 insertions(+), 23 deletions(-) diff --git a/src/sinks/aws_kinesis/config.rs b/src/sinks/aws_kinesis/config.rs index 4e2d136a054ff..31c35e74c8fdc 100644 --- a/src/sinks/aws_kinesis/config.rs +++ b/src/sinks/aws_kinesis/config.rs @@ -52,6 +52,11 @@ pub struct KinesisSinkBaseConfig { #[serde(default)] pub auth: AwsAuthentication, + /// Whether or not to retry successful requests containing partial failures. + #[serde(default)] + #[configurable(metadata(docs::advanced))] + pub request_retry_partial: bool, + #[configurable(derived)] #[serde( default, @@ -77,6 +82,7 @@ pub fn build_sink( partition_key_field: Option, batch_settings: BatcherSettings, client: C, + retry_logic: RT, ) -> crate::Result where C: SendRecord + Clone + Send + Sync + 'static, @@ -92,7 +98,7 @@ where let region = config.region.region(); let service = ServiceBuilder::new() - .settings::>(request_limits, RT::default()) + .settings::>(request_limits, retry_logic) .service(KinesisService:: { client, stream_name: config.stream_name.clone(), diff --git a/src/sinks/aws_kinesis/firehose/config.rs b/src/sinks/aws_kinesis/firehose/config.rs index c8080e0711da3..aca15796d87c9 100644 --- a/src/sinks/aws_kinesis/firehose/config.rs +++ b/src/sinks/aws_kinesis/firehose/config.rs @@ -8,6 +8,7 @@ use futures::FutureExt; use snafu::Snafu; use vector_config::configurable_component; +use crate::sinks::util::retries::RetryAction; use crate::{ aws::{create_client, is_retriable_error, ClientBuilder}, config::{AcknowledgementsConfig, GenerateConfig, Input, ProxyConfig, SinkConfig, SinkContext}, @@ -141,6 +142,9 @@ impl SinkConfig for KinesisFirehoseSinkConfig { None, batch_settings, KinesisFirehoseClient { client }, + KinesisRetryLogic { + retry_partial: self.base.request_retry_partial, + }, )?; Ok((sink, healthcheck)) @@ -166,7 +170,9 @@ impl GenerateConfig for KinesisFirehoseSinkConfig { } #[derive(Clone, Default)] -struct KinesisRetryLogic; +struct KinesisRetryLogic { + retry_partial: bool, +} impl RetryLogic for KinesisRetryLogic { type Error = SdkError; @@ -180,4 +186,13 @@ impl RetryLogic for KinesisRetryLogic { } is_retriable_error(error) } + + fn should_retry_response(&self, response: &Self::Response) -> RetryAction { + if response.failure_count > 0 && self.retry_partial { + let msg = format!("partial error count {}", response.failure_count); + RetryAction::Retry(msg.into()) + } else { + RetryAction::Successful + } + } } diff --git a/src/sinks/aws_kinesis/firehose/integration_tests.rs b/src/sinks/aws_kinesis/firehose/integration_tests.rs index 8a46c57f83e14..9a4b903811ba7 100644 --- a/src/sinks/aws_kinesis/firehose/integration_tests.rs +++ b/src/sinks/aws_kinesis/firehose/integration_tests.rs @@ -57,6 +57,7 @@ async fn firehose_put_records() { tls: None, auth: Default::default(), acknowledgements: Default::default(), + request_retry_partial: Default::default(), }; let config = KinesisFirehoseSinkConfig { batch, base }; diff --git a/src/sinks/aws_kinesis/firehose/record.rs b/src/sinks/aws_kinesis/firehose/record.rs index 49d1ee821f5c3..52a656240282c 100644 --- a/src/sinks/aws_kinesis/firehose/record.rs +++ b/src/sinks/aws_kinesis/firehose/record.rs @@ -1,8 +1,11 @@ +use aws_sdk_firehose::output::PutRecordBatchOutput; use aws_sdk_firehose::types::{Blob, SdkError}; use bytes::Bytes; use tracing::Instrument; -use super::{KinesisClient, KinesisError, KinesisRecord, Record, SendRecord}; +use crate::sinks::prelude::*; + +use super::{KinesisClient, KinesisError, KinesisRecord, KinesisResponse, Record, SendRecord}; #[derive(Clone)] pub struct KinesisFirehoseRecord { @@ -46,7 +49,15 @@ impl SendRecord for KinesisFirehoseClient { type T = KinesisRecord; type E = KinesisError; - async fn send(&self, records: Vec, stream_name: String) -> Option> { + async fn send( + &self, + records: Vec, + stream_name: String, + ) -> Result> { + let rec_count = records.len(); + let total_size = records.iter().fold(0, |acc, record| { + acc + record.data().map(|v| v.as_ref().len()).unwrap_or_default() + }); self.client .put_record_batch() .set_records(Some(records)) @@ -54,6 +65,10 @@ impl SendRecord for KinesisFirehoseClient { .send() .instrument(info_span!("request").or_current()) .await - .err() + .map(|output: PutRecordBatchOutput| KinesisResponse { + count: rec_count, + failure_count: output.failed_put_count().unwrap_or(0) as usize, + events_byte_size: JsonSize::new(total_size), + }) } } diff --git a/src/sinks/aws_kinesis/firehose/tests.rs b/src/sinks/aws_kinesis/firehose/tests.rs index a15fb47a4e794..54c55d9efee1d 100644 --- a/src/sinks/aws_kinesis/firehose/tests.rs +++ b/src/sinks/aws_kinesis/firehose/tests.rs @@ -33,6 +33,7 @@ async fn check_batch_size() { request: Default::default(), tls: None, auth: Default::default(), + request_retry_partial: false, acknowledgements: Default::default(), }; @@ -62,6 +63,7 @@ async fn check_batch_events() { request: Default::default(), tls: None, auth: Default::default(), + request_retry_partial: false, acknowledgements: Default::default(), }; diff --git a/src/sinks/aws_kinesis/record.rs b/src/sinks/aws_kinesis/record.rs index 03ad11c710416..a244f028cb78d 100644 --- a/src/sinks/aws_kinesis/record.rs +++ b/src/sinks/aws_kinesis/record.rs @@ -2,6 +2,7 @@ use async_trait::async_trait; use aws_smithy_client::SdkError; use bytes::Bytes; +use super::KinesisResponse; /// An AWS Kinesis record type primarily to store the underlying aws crates' actual record `T`, and /// to abstract the encoded length calculation. pub trait Record { @@ -24,5 +25,9 @@ pub trait SendRecord { type E; /// Sends the records. - async fn send(&self, records: Vec, stream_name: String) -> Option>; + async fn send( + &self, + records: Vec, + stream_name: String, + ) -> Result>; } diff --git a/src/sinks/aws_kinesis/service.rs b/src/sinks/aws_kinesis/service.rs index 3539fee4e2eab..4ebc53f0d746a 100644 --- a/src/sinks/aws_kinesis/service.rs +++ b/src/sinks/aws_kinesis/service.rs @@ -37,8 +37,9 @@ where } pub struct KinesisResponse { - count: usize, - events_byte_size: JsonSize, + pub(crate) count: usize, + pub(crate) failure_count: usize, + pub(crate) events_byte_size: JsonSize, } impl DriverResponse for KinesisResponse { @@ -72,7 +73,6 @@ where let events_byte_size = requests .get_metadata() .events_estimated_json_encoded_byte_size(); - let count = requests.get_metadata().event_count(); let records = requests .events @@ -84,16 +84,10 @@ where let stream_name = self.stream_name.clone(); Box::pin(async move { - // Returning a Result (a trait that implements Try) is not a stable feature, - // so instead we have to explicitly check for error and return. - // https://github.com/rust-lang/rust/issues/84277 - if let Some(e) = client.send(records, stream_name).await { - return Err(e); - } - - Ok(KinesisResponse { - count, - events_byte_size, + client.send(records, stream_name).await.map(|mut r| { + // augment the response + r.events_byte_size = events_byte_size; + r }) }) } diff --git a/src/sinks/aws_kinesis/streams/config.rs b/src/sinks/aws_kinesis/streams/config.rs index 673ab7b4d212a..515c5fa66ab0e 100644 --- a/src/sinks/aws_kinesis/streams/config.rs +++ b/src/sinks/aws_kinesis/streams/config.rs @@ -6,6 +6,7 @@ use futures::FutureExt; use snafu::Snafu; use vector_config::{component::GenerateConfig, configurable_component}; +use crate::sinks::util::retries::RetryAction; use crate::{ aws::{create_client, is_retriable_error, ClientBuilder}, config::{AcknowledgementsConfig, Input, ProxyConfig, SinkConfig, SinkContext}, @@ -148,6 +149,9 @@ impl SinkConfig for KinesisStreamsSinkConfig { self.partition_key_field.clone(), batch_settings, KinesisStreamClient { client }, + KinesisRetryLogic { + retry_partial: self.base.request_retry_partial, + }, )?; Ok((sink, healthcheck)) @@ -173,7 +177,9 @@ impl GenerateConfig for KinesisStreamsSinkConfig { } } #[derive(Default, Clone)] -struct KinesisRetryLogic; +struct KinesisRetryLogic { + retry_partial: bool, +} impl RetryLogic for KinesisRetryLogic { type Error = SdkError; @@ -193,6 +199,15 @@ impl RetryLogic for KinesisRetryLogic { } is_retriable_error(error) } + + fn should_retry_response(&self, response: &Self::Response) -> RetryAction { + if response.failure_count > 0 && self.retry_partial { + let msg = format!("partial error count {}", response.failure_count); + RetryAction::Retry(msg.into()) + } else { + RetryAction::Successful + } + } } #[cfg(test)] diff --git a/src/sinks/aws_kinesis/streams/integration_tests.rs b/src/sinks/aws_kinesis/streams/integration_tests.rs index a9a66804e3729..8793aa520c024 100644 --- a/src/sinks/aws_kinesis/streams/integration_tests.rs +++ b/src/sinks/aws_kinesis/streams/integration_tests.rs @@ -98,6 +98,7 @@ async fn kinesis_put_records_without_partition_key() { tls: Default::default(), auth: Default::default(), acknowledgements: Default::default(), + request_retry_partial: Default::default(), }; let config = KinesisStreamsSinkConfig { diff --git a/src/sinks/aws_kinesis/streams/record.rs b/src/sinks/aws_kinesis/streams/record.rs index 67eba50d9aff2..339d6997af63a 100644 --- a/src/sinks/aws_kinesis/streams/record.rs +++ b/src/sinks/aws_kinesis/streams/record.rs @@ -1,8 +1,11 @@ +use aws_sdk_kinesis::output::PutRecordsOutput; use aws_sdk_kinesis::types::{Blob, SdkError}; use bytes::Bytes; use tracing::Instrument; -use super::{KinesisClient, KinesisError, KinesisRecord, Record, SendRecord}; +use crate::sinks::prelude::*; + +use super::{KinesisClient, KinesisError, KinesisRecord, KinesisResponse, Record, SendRecord}; #[derive(Clone)] pub struct KinesisStreamRecord { @@ -62,7 +65,15 @@ impl SendRecord for KinesisStreamClient { type T = KinesisRecord; type E = KinesisError; - async fn send(&self, records: Vec, stream_name: String) -> Option> { + async fn send( + &self, + records: Vec, + stream_name: String, + ) -> Result> { + let rec_count = records.len(); + let total_size = records.iter().fold(0, |acc, record| { + acc + record.data().map(|v| v.as_ref().len()).unwrap_or_default() + }); self.client .put_records() .set_records(Some(records)) @@ -70,6 +81,10 @@ impl SendRecord for KinesisStreamClient { .send() .instrument(info_span!("request").or_current()) .await - .err() + .map(|output: PutRecordsOutput| KinesisResponse { + count: rec_count, + failure_count: output.failed_record_count().unwrap_or(0) as usize, + events_byte_size: JsonSize::new(total_size), + }) } } diff --git a/website/cue/reference/components/sinks/aws_kinesis_firehose.cue b/website/cue/reference/components/sinks/aws_kinesis_firehose.cue index b06766ee167cc..35847e6db84df 100644 --- a/website/cue/reference/components/sinks/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sinks/aws_kinesis_firehose.cue @@ -75,6 +75,7 @@ components: sinks: aws_kinesis_firehose: components._aws & { configuration: base.components.sinks.aws_kinesis_firehose.configuration & { _aws_include: false + request_retry_partial: warnings: ["This can cause duplicate logs to be published."] } input: { diff --git a/website/cue/reference/components/sinks/base/aws_kinesis_firehose.cue b/website/cue/reference/components/sinks/base/aws_kinesis_firehose.cue index 2a4c55bf3e8ee..19a70860692e5 100644 --- a/website/cue/reference/components/sinks/base/aws_kinesis_firehose.cue +++ b/website/cue/reference/components/sinks/base/aws_kinesis_firehose.cue @@ -480,6 +480,11 @@ base: components: sinks: aws_kinesis_firehose: configuration: { } } } + request_retry_partial: { + description: "Whether or not to retry successful requests containing partial failures." + required: false + type: bool: default: false + } stream_name: { description: """ The [stream name][stream_name] of the target Kinesis Firehose delivery stream. diff --git a/website/cue/reference/components/sinks/base/aws_kinesis_streams.cue b/website/cue/reference/components/sinks/base/aws_kinesis_streams.cue index 180d54f1b9a96..40164b9d0e292 100644 --- a/website/cue/reference/components/sinks/base/aws_kinesis_streams.cue +++ b/website/cue/reference/components/sinks/base/aws_kinesis_streams.cue @@ -489,6 +489,11 @@ base: components: sinks: aws_kinesis_streams: configuration: { } } } + request_retry_partial: { + description: "Whether or not to retry successful requests containing partial failures." + required: false + type: bool: default: false + } stream_name: { description: """ The [stream name][stream_name] of the target Kinesis Firehose delivery stream. From c21f892e574579e323742da009f15a39c43555af Mon Sep 17 00:00:00 2001 From: Dominic Burkart Date: Fri, 16 Jun 2023 18:03:47 +0200 Subject: [PATCH 150/236] chore(flush on shutdown): validate s3 sink flushes (#17667) Adding regression test for related issue: https://github.com/vectordotdev/vector/issues/11405 --- src/sinks/aws_s3/integration_tests.rs | 74 +++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/src/sinks/aws_s3/integration_tests.rs b/src/sinks/aws_s3/integration_tests.rs index a86d647fb3c6c..4b5c082b7aa91 100644 --- a/src/sinks/aws_s3/integration_tests.rs +++ b/src/sinks/aws_s3/integration_tests.rs @@ -406,6 +406,80 @@ async fn s3_healthchecks_invalid_bucket() { .is_err()); } +#[tokio::test] +async fn s3_flush_on_exhaustion() { + let cx = SinkContext::new_test(); + + let bucket = uuid::Uuid::new_v4().to_string(); + create_bucket(&bucket, false).await; + + // batch size of ten events, timeout of ten seconds + let config = { + let mut batch = BatchConfig::default(); + batch.max_events = Some(10); + batch.timeout_secs = Some(10.0); + + S3SinkConfig { + bucket: bucket.to_string(), + key_prefix: random_string(10) + "/date=%F", + filename_time_format: default_filename_time_format(), + filename_append_uuid: true, + filename_extension: None, + options: S3Options::default(), + region: RegionOrEndpoint::with_both("minio", s3_address()), + encoding: (None::, TextSerializerConfig::default()).into(), + compression: Compression::None, + batch, + request: TowerRequestConfig::default(), + tls: Default::default(), + auth: Default::default(), + acknowledgements: Default::default(), + } + }; + let prefix = config.key_prefix.clone(); + let service = config.create_service(&cx.globals.proxy).await.unwrap(); + let sink = config.build_processor(service).unwrap(); + + let (lines, _events) = random_lines_with_stream(100, 2, None); // only generate two events (less than batch size) + + let events = lines.clone().into_iter().enumerate().map(|(i, line)| { + let mut e = LogEvent::from(line); + let i = if i < 10 { + 1 + } else if i < 20 { + 2 + } else { + 3 + }; + e.insert("i", i.to_string()); + Event::from(e) + }); + + // Here, we validate that the s3 sink flushes when its source stream is exhausted + // by giving it a number of inputs less than the batch size, verifying that the + // outputs for the in-flight batch are flushed. By timing out in 3 seconds with a + // flush period of ten seconds, we verify that the flush is triggered *at stream + // completion* and not because of periodic flushing. + assert!(tokio::time::timeout( + Duration::from_secs(3), + run_and_assert_sink_compliance(sink, stream::iter(events), &AWS_SINK_TAGS) + ) + .await + .is_ok()); + + let keys = get_keys(&bucket, prefix).await; + assert_eq!(keys.len(), 1); + + let mut response_lines: Vec = Vec::new(); + let mut key_stream = stream::iter(keys); + while let Some(key) = key_stream.next().await { + let obj = get_object(&bucket, key).await; + response_lines.append(&mut get_lines(obj).await); + } + + assert_eq!(lines, response_lines); // if all events are received, and lines.len() < batch size, then a flush was performed. +} + async fn client() -> S3Client { let auth = AwsAuthentication::test_auth(); let region = RegionOrEndpoint::with_both("minio", s3_address()); From d122d32b8c83133b753c9e31d19be6c6609fb9a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 13:43:56 +0100 Subject: [PATCH 151/236] chore(deps): Bump sha2 from 0.10.6 to 0.10.7 (#17698) Bumps [sha2](https://github.com/RustCrypto/hashes) from 0.10.6 to 0.10.7.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=sha2&package-manager=cargo&previous-version=0.10.6&new-version=0.10.7)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 44 ++++++++++++++++++++++---------------------- Cargo.toml | 2 +- vdev/Cargo.toml | 2 +- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a3b47031e3b4..250a6af25914a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -191,7 +191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cf4144857f9e4d7dd6cc4ba4c78efd2a46bad682b029bd0d91e76a021af1b2a" dependencies = [ "byteorder", - "digest 0.10.6", + "digest 0.10.7", "lazy_static", "libflate", "log", @@ -952,7 +952,7 @@ dependencies = [ "once_cell", "percent-encoding", "regex", - "sha2 0.10.6", + "sha2 0.10.7", "time", "tracing 0.1.37", ] @@ -986,7 +986,7 @@ dependencies = [ "md-5", "pin-project-lite", "sha1", - "sha2 0.10.6", + "sha2 0.10.7", "tracing 0.1.37", ] @@ -1307,7 +1307,7 @@ dependencies = [ "serde-xml-rs", "serde_derive", "serde_json", - "sha2 0.10.6", + "sha2 0.10.7", "time", "url", "uuid", @@ -2687,9 +2687,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.3", "crypto-common", @@ -3819,7 +3819,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4835,7 +4835,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -5030,7 +5030,7 @@ dependencies = [ "serde_bytes", "serde_with 1.14.0", "sha-1", - "sha2 0.10.6", + "sha2 0.10.7", "socket2 0.4.9", "stringprep", "strsim 0.10.0", @@ -5458,7 +5458,7 @@ dependencies = [ "serde", "serde_json", "serde_path_to_error", - "sha2 0.10.6", + "sha2 0.10.7", "thiserror", "url", ] @@ -5771,7 +5771,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -5845,7 +5845,7 @@ checksum = "5e3b284b1f13a20dc5ebc90aff59a51b8d7137c221131b52a7260c08cbc1cc80" dependencies = [ "once_cell", "pest", - "sha2 0.10.6", + "sha2 0.10.7", ] [[package]] @@ -6055,7 +6055,7 @@ dependencies = [ "md-5", "memchr", "rand 0.8.5", - "sha2 0.10.6", + "sha2 0.10.7", "stringprep", ] @@ -7495,7 +7495,7 @@ checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7506,7 +7506,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7524,13 +7524,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7539,7 +7539,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -9075,7 +9075,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml 0.9.21", - "sha2 0.10.6", + "sha2 0.10.7", "tempfile", "toml 0.7.4", ] @@ -9226,7 +9226,7 @@ dependencies = [ "serde_json", "serde_with 2.3.2", "serde_yaml 0.9.21", - "sha2 0.10.6", + "sha2 0.10.7", "similar-asserts", "smallvec", "smpl_jwt", @@ -9648,7 +9648,7 @@ dependencies = [ "serde", "serde_json", "sha-1", - "sha2 0.10.6", + "sha2 0.10.7", "sha3", "snafu", "strip-ansi-escapes", diff --git a/Cargo.toml b/Cargo.toml index 9d0dfe2fb4e81..ab186fd63eb05 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -223,7 +223,7 @@ tui = { version = "0.19.0", optional = true, default-features = false, features # Datadog Pipelines hex = { version = "0.4.3", default-features = false, optional = true } -sha2 = { version = "0.10.6", default-features = false, optional = true } +sha2 = { version = "0.10.7", default-features = false, optional = true } # VRL Lang vrl = { package = "vrl", version = "0.4.0", features = ["cli", "test"] } diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 1e713e44137db..bf59e597785bc 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -35,6 +35,6 @@ reqwest = { version = "0.11", features = ["json", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.96" serde_yaml = "0.9.21" -sha2 = "0.10.6" +sha2 = "0.10.7" tempfile = "3.6.0" toml = { version = "0.7.4", default-features = false, features = ["parse"] } From cd6d1540bf74d13ad6bc9c90fc3fe2affb11e6dc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 10:06:24 +0100 Subject: [PATCH 152/236] chore(deps): Bump notify from 6.0.0 to 6.0.1 (#17700) Bumps [notify](https://github.com/notify-rs/notify) from 6.0.0 to 6.0.1.
Release notes

Sourced from notify's releases.

notify 6.0.1 (2023-06-16)

  • DOCS: fix swapped debouncer-full / -mini links in the readme/crates.io 4be6bde
Changelog

Sourced from notify's changelog.

notify 6.0.1 (2023-06-16)

  • DOCS: fix swapped debouncer-full / -mini links in the readme/crates.io 4be6bde

debouncer-full 0.2.0 (2023-06-16)

  • CHANGE: emit events as DebouncedEvents, each containing the original notify event and the time at which it occurred #488

#488: notify-rs/notify#488

Commits
  • 5f8cbeb more things to check for releases
  • 0367500 prepare notify v6.0.1
  • c7b0756 add missing link for PR 488
  • 40ec37f cargo fmt over everything
  • 0611b06 move fs import so it won't emit warnings
  • 58a4c13 Add DebouncedEvent to debouncer-full
  • 4bce637 fixup debouncer-mini version for examples
  • 4be6bde fix mixed up debouncer mini/full link
  • 936794f actually increase debouncer-mini version 0.3.0
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=notify&package-manager=cargo&previous-version=6.0.0&new-version=6.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 250a6af25914a..618b63e2d805a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5271,9 +5271,9 @@ checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" [[package]] name = "notify" -version = "6.0.0" +version = "6.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d9ba6c734de18ca27c8cef5cd7058aa4ac9f63596131e4c7e41e579319032a2" +checksum = "5738a2795d57ea20abec2d6d76c6081186709c0024187cd5977265eda6598b51" dependencies = [ "bitflags", "filetime", diff --git a/Cargo.toml b/Cargo.toml index ab186fd63eb05..ec4c1c1a6cf56 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -279,7 +279,7 @@ mongodb = { version = "2.5.0", default-features = false, features = ["tokio-runt nats = { version = "0.24.0", default-features = false, optional = true } nkeys = { version = "0.3.0", default-features = false, optional = true } nom = { version = "7.1.3", default-features = false, optional = true } -notify = { version = "6.0.0", default-features = false, features = ["macos_fsevent"] } +notify = { version = "6.0.1", default-features = false, features = ["macos_fsevent"] } once_cell = { version = "1.18", default-features = false } openssl = { version = "0.10.54", default-features = false, features = ["vendored"] } openssl-probe = { version = "0.1.5", default-features = false } From 53e178570b5b87bc2124f4299865cbb00916fe20 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 11:55:02 +0000 Subject: [PATCH 153/236] chore(deps): Bump gloo-utils from 0.1.6 to 0.1.7 (#17707) Bumps [gloo-utils](https://github.com/rustwasm/gloo) from 0.1.6 to 0.1.7.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=gloo-utils&package-manager=cargo&previous-version=0.1.6&new-version=0.1.7)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 618b63e2d805a..4b76e9fe96194 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3441,9 +3441,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-utils" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8e8fc851e9c7b9852508bc6e3f690f452f474417e8545ec9857b7f7377036b5" +checksum = "037fcb07216cb3a30f7292bd0176b050b7b9a052ba830ef7d5d65f6dc64ba58e" dependencies = [ "js-sys", "serde", From 9cd54043fab1e82722adaeeaee290d7084074439 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Tue, 20 Jun 2023 11:48:48 -0600 Subject: [PATCH 154/236] chore(config): Convert top-level sinks enum to typetag (#17710) Closes #16702 --- docs/tutorials/sinks/1_basic_sink.md | 59 +-- src/api/schema/components/mod.rs | 1 - src/components/validation/mod.rs | 6 +- src/components/validation/runner/config.rs | 8 +- src/config/builder.rs | 17 +- src/config/graph.rs | 4 +- src/config/mod.rs | 2 +- src/config/sink.rs | 48 ++- src/config/unit_test/unit_test_components.rs | 6 +- src/sinks/amqp/config.rs | 6 +- src/sinks/appsignal/mod.rs | 3 +- src/sinks/aws_cloudwatch_logs/config.rs | 6 +- src/sinks/aws_cloudwatch_metrics/mod.rs | 6 +- src/sinks/aws_kinesis/firehose/config.rs | 6 +- src/sinks/aws_kinesis/streams/config.rs | 6 +- src/sinks/aws_s3/config.rs | 6 +- src/sinks/aws_sqs/config.rs | 6 +- src/sinks/axiom.rs | 3 +- src/sinks/azure_blob/config.rs | 6 +- src/sinks/azure_monitor_logs.rs | 6 +- src/sinks/blackhole/config.rs | 6 +- src/sinks/clickhouse/config.rs | 3 +- src/sinks/console/config.rs | 6 +- src/sinks/databend/config.rs | 3 +- src/sinks/datadog/events/config.rs | 6 +- src/sinks/datadog/logs/config.rs | 5 +- src/sinks/datadog/logs/integration_tests.rs | 2 +- src/sinks/datadog/logs/mod.rs | 2 +- src/sinks/datadog/logs/tests.rs | 4 +- src/sinks/datadog/metrics/config.rs | 3 +- src/sinks/datadog/traces/config.rs | 3 +- src/sinks/datadog_archives.rs | 1 + src/sinks/elasticsearch/config.rs | 3 +- src/sinks/file/mod.rs | 3 +- src/sinks/gcp/chronicle_unstructured.rs | 6 +- src/sinks/gcp/cloud_storage.rs | 6 +- src/sinks/gcp/pubsub.rs | 6 +- src/sinks/gcp/stackdriver_logs.rs | 6 +- src/sinks/gcp/stackdriver_metrics.rs | 6 +- src/sinks/honeycomb.rs | 3 +- src/sinks/http.rs | 3 +- src/sinks/humio/logs.rs | 3 +- src/sinks/humio/metrics.rs | 3 +- src/sinks/influxdb/logs.rs | 3 +- src/sinks/influxdb/metrics.rs | 3 +- src/sinks/kafka/config.rs | 6 +- src/sinks/loki/config.rs | 3 +- src/sinks/mezmo.rs | 6 +- src/sinks/mod.rs | 389 ------------------ src/sinks/nats.rs | 6 +- src/sinks/new_relic/config.rs | 3 +- src/sinks/papertrail.rs | 3 +- src/sinks/prometheus/exporter.rs | 6 +- src/sinks/prometheus/remote_write.rs | 6 +- src/sinks/pulsar/config.rs | 3 +- src/sinks/redis.rs | 3 +- src/sinks/sematext/logs.rs | 3 +- src/sinks/sematext/metrics.rs | 3 +- src/sinks/socket.rs | 3 +- src/sinks/splunk_hec/logs/config.rs | 6 +- src/sinks/splunk_hec/metrics/config.rs | 6 +- src/sinks/statsd/config.rs | 3 +- src/sinks/util/adaptive_concurrency/tests.rs | 3 +- src/sinks/vector/config.rs | 3 +- src/sinks/webhdfs/config.rs | 3 +- src/sinks/websocket/config.rs | 6 +- src/test_util/mock/sinks/backpressure.rs | 3 +- src/test_util/mock/sinks/basic.rs | 3 +- src/test_util/mock/sinks/error.rs | 3 +- src/test_util/mock/sinks/oneshot.rs | 3 +- src/test_util/mock/sinks/panic.rs | 3 +- src/topology/builder.rs | 3 +- src/topology/schema.rs | 2 +- .../components/sinks/base/aws_s3.cue | 8 +- 74 files changed, 270 insertions(+), 540 deletions(-) diff --git a/docs/tutorials/sinks/1_basic_sink.md b/docs/tutorials/sinks/1_basic_sink.md index ca91266925e80..18194636eb469 100644 --- a/docs/tutorials/sinks/1_basic_sink.md +++ b/docs/tutorials/sinks/1_basic_sink.md @@ -33,7 +33,7 @@ is deserialized to the fields in this struct so the user can customise the sink's behaviour. ```rust -#[configurable_component(sink("basic"))] +#[configurable_component(sink("basic", "Basic sink."))] #[derive(Clone, Debug)] /// A basic sink that dumps its output to stdout. pub struct BasicConfig { @@ -75,10 +75,12 @@ configuration for the sink. # SinkConfig We need to implement the [`SinkConfig`][sink_config] trait. This is used by -Vector to generate the main Sink from the configuration. +Vector to generate the main Sink from the configuration. Note that type name +given to `typetag` below must match the name of the configurable component above. ```rust #[async_trait::async_trait] +#[typetag::serde(name = "basic")] impl SinkConfig for BasicConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let healthcheck = Box::pin(async move { Ok(()) }); @@ -198,59 +200,6 @@ sinks-logs = [ "sinks-chronicle", ``` -## Module - -Import this module into Vector. In `src/sinks/mod.rs` add the lines: - - -```diff - #[cfg(feature = "sinks-azure_monitor_logs")] - pub mod azure_monitor_logs; -+ #[cfg(feature = "sinks-basic")] -+ pub mod basic; - #[cfg(feature = "sinks-blackhole")] - pub mod blackhole; -``` - -All sinks are feature gated, this allows us to build custom versions of Vector -with only the components required. We will ignore the feature flag for now with -our new basic sink. - -Next, each sink needs to be added to the [`Sinks`][sinks_enum] enum. Find the -enum in `mod.rs` and add our new sink to it. - -```diff -#[configurable_component] -#[allow(clippy::large_enum_variant)] -#[derive(Clone, Debug)] -#[serde(tag = "type", rename_all = "snake_case")] -#[enum_dispatch(SinkConfig)] -pub enum Sinks { - ... - -+ /// Basic -+ #[cfg(feature = "sinks-basic")] -+ Basic(#[configurable(derived)] basic::BasicConfig), - - ... - -``` - -Then we need to add this to the `get_component_name` function defined below. - -```diff - - fn get_component_name(&self) -> &'static str { - match self { - ... - -+ #[cfg(feature = "sinks-basic")] -+ Self::Basic(config) => config.get_component_name(), - - ... - -``` - # Acknowledgements When our sink finishes processing the event, it needs to acknowledge this so diff --git a/src/api/schema/components/mod.rs b/src/api/schema/components/mod.rs index 6f096034ad87a..20880f3a06698 100644 --- a/src/api/schema/components/mod.rs +++ b/src/api/schema/components/mod.rs @@ -11,7 +11,6 @@ use std::{ use async_graphql::{Enum, InputObject, Interface, Object, Subscription}; use once_cell::sync::Lazy; use tokio_stream::{wrappers::BroadcastStream, Stream, StreamExt}; -use vector_config::NamedComponent; use vector_core::internal_event::DEFAULT_OUTPUT; use crate::{ diff --git a/src/components/validation/mod.rs b/src/components/validation/mod.rs index a7687c33264be..7457cd6548ad9 100644 --- a/src/components/validation/mod.rs +++ b/src/components/validation/mod.rs @@ -6,7 +6,7 @@ mod test_case; pub mod util; mod validators; -use crate::{config::BoxedSource, config::BoxedTransform, sinks::Sinks}; +use crate::config::{BoxedSink, BoxedSource, BoxedTransform}; pub use self::resources::*; #[cfg(feature = "component-validation-runner")] @@ -46,7 +46,7 @@ pub enum ComponentConfiguration { Transform(BoxedTransform), /// A sink component. - Sink(Sinks), + Sink(BoxedSink), } /// Configuration for validating a component. @@ -88,7 +88,7 @@ impl ValidationConfiguration { } /// Creates a new `ValidationConfiguration` for a sink. - pub fn from_sink>( + pub fn from_sink>( component_name: &'static str, config: C, external_resource: Option, diff --git a/src/components/validation/runner/config.rs b/src/components/validation/runner/config.rs index f526583d5282c..c087fd8bf809a 100644 --- a/src/components/validation/runner/config.rs +++ b/src/components/validation/runner/config.rs @@ -4,8 +4,8 @@ use crate::{ util::GrpcAddress, ComponentConfiguration, ComponentType, ValidationConfiguration, }, - config::{BoxedSource, BoxedTransform, ConfigBuilder}, - sinks::{vector::VectorConfig as VectorSinkConfig, Sinks}, + config::{BoxedSink, BoxedSource, BoxedTransform, ConfigBuilder}, + sinks::vector::VectorConfig as VectorSinkConfig, sources::vector::VectorConfig as VectorSourceConfig, test_util::next_addr, }; @@ -78,7 +78,7 @@ impl TopologyBuilder { } } - fn from_sink(sink: Sinks) -> Self { + fn from_sink(sink: BoxedSink) -> Self { let (input_edge, input_source) = build_input_edge(); let mut config_builder = ConfigBuilder::default(); @@ -130,7 +130,7 @@ fn build_input_edge() -> (InputEdge, impl Into) { (input_edge, input_source) } -fn build_output_edge() -> (OutputEdge, impl Into) { +fn build_output_edge() -> (OutputEdge, impl Into) { let output_listen_addr = GrpcAddress::from(next_addr()); debug!(endpoint = %output_listen_addr, "Creating controlled output edge."); diff --git a/src/config/builder.rs b/src/config/builder.rs index 301e239627e42..3645ef3755987 100644 --- a/src/config/builder.rs +++ b/src/config/builder.rs @@ -8,18 +8,16 @@ use serde_json::Value; use vector_config::configurable_component; use vector_core::config::GlobalOptions; -use crate::{ - enrichment_tables::EnrichmentTables, providers::Providers, secrets::SecretBackends, - sinks::Sinks, -}; +use crate::{enrichment_tables::EnrichmentTables, providers::Providers, secrets::SecretBackends}; #[cfg(feature = "api")] use super::api; #[cfg(feature = "enterprise")] use super::enterprise; use super::{ - compiler, schema, BoxedSource, BoxedTransform, ComponentKey, Config, EnrichmentTableOuter, - HealthcheckOptions, SinkOuter, SourceOuter, TestDefinition, TransformOuter, + compiler, schema, BoxedSink, BoxedSource, BoxedTransform, ComponentKey, Config, + EnrichmentTableOuter, HealthcheckOptions, SinkOuter, SourceOuter, TestDefinition, + TransformOuter, }; /// A complete Vector configuration. @@ -269,7 +267,12 @@ impl ConfigBuilder { .insert(ComponentKey::from(key.into()), SourceOuter::new(source)); } - pub fn add_sink, S: Into>(&mut self, key: K, inputs: &[&str], sink: S) { + pub fn add_sink, S: Into>( + &mut self, + key: K, + inputs: &[&str], + sink: S, + ) { let inputs = inputs .iter() .map(|value| value.to_string()) diff --git a/src/config/graph.rs b/src/config/graph.rs index 859b590257de2..6f4cdb81d4451 100644 --- a/src/config/graph.rs +++ b/src/config/graph.rs @@ -2,8 +2,8 @@ use indexmap::{set::IndexSet, IndexMap}; use std::collections::{HashMap, HashSet, VecDeque}; use super::{ - schema, ComponentKey, DataType, OutputId, SinkConfig, SinkOuter, SourceOuter, SourceOutput, - TransformOuter, TransformOutput, + schema, ComponentKey, DataType, OutputId, SinkOuter, SourceOuter, SourceOutput, TransformOuter, + TransformOutput, }; #[derive(Debug, Clone)] diff --git a/src/config/mod.rs b/src/config/mod.rs index 59a26d367d3de..61c1a219d9eed 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -54,7 +54,7 @@ pub use loading::{ }; pub use provider::ProviderConfig; pub use secret::SecretBackend; -pub use sink::{SinkConfig, SinkContext, SinkHealthcheckOptions, SinkOuter}; +pub use sink::{BoxedSink, SinkConfig, SinkContext, SinkHealthcheckOptions, SinkOuter}; pub use source::{BoxedSource, SourceConfig, SourceContext, SourceOuter}; pub use transform::{ get_transform_output_ids, BoxedTransform, TransformConfig, TransformContext, TransformOuter, diff --git a/src/config/sink.rs b/src/config/sink.rs index aa25a0e614f08..c0d6aba694b07 100644 --- a/src/config/sink.rs +++ b/src/config/sink.rs @@ -1,15 +1,47 @@ +use std::cell::RefCell; + use async_trait::async_trait; -use enum_dispatch::enum_dispatch; +use dyn_clone::DynClone; use serde::Serialize; use vector_buffers::{BufferConfig, BufferType}; -use vector_config::{configurable_component, Configurable, NamedComponent}; +use vector_config::{ + configurable_component, Configurable, GenerateError, Metadata, NamedComponent, +}; +use vector_config_common::attributes::CustomAttribute; +use vector_config_common::schema::{SchemaGenerator, SchemaObject}; use vector_core::{ config::{AcknowledgementsConfig, GlobalOptions, Input}, sink::VectorSink, }; use super::{id::Inputs, schema, ComponentKey, ProxyConfig, Resource}; -use crate::sinks::{util::UriSerde, Healthcheck, Sinks}; +use crate::sinks::{util::UriSerde, Healthcheck}; + +pub type BoxedSink = Box; + +impl Configurable for BoxedSink { + fn referenceable_name() -> Option<&'static str> { + Some("vector::sinks::Sinks") + } + + fn metadata() -> Metadata { + let mut metadata = Metadata::default(); + metadata.set_description("Configurable sinks in Vector."); + metadata.add_custom_attribute(CustomAttribute::kv("docs::enum_tagging", "internal")); + metadata.add_custom_attribute(CustomAttribute::kv("docs::enum_tag_field", "type")); + metadata + } + + fn generate_schema(gen: &RefCell) -> Result { + vector_config::component::SinkDescription::generate_schemas(gen) + } +} + +impl From for BoxedSink { + fn from(value: T) -> Self { + Box::new(value) + } +} /// Fully resolved sink component. #[configurable_component] @@ -49,7 +81,7 @@ where #[serde(flatten)] #[configurable(metadata(docs::hidden))] - pub inner: Sinks, + pub inner: BoxedSink, } impl SinkOuter @@ -59,7 +91,7 @@ where pub fn new(inputs: I, inner: IS) -> SinkOuter where I: IntoIterator, - IS: Into, + IS: Into, { SinkOuter { inputs: Inputs::from_iter(inputs), @@ -170,8 +202,8 @@ impl From for SinkHealthcheckOptions { /// Generalized interface for describing and building sink components. #[async_trait] -#[enum_dispatch] -pub trait SinkConfig: NamedComponent + core::fmt::Debug + Send + Sync { +#[typetag::serde(tag = "type")] +pub trait SinkConfig: DynClone + NamedComponent + core::fmt::Debug + Send + Sync { /// Builds the sink with the given context. /// /// If the sink is built successfully, `Ok(...)` is returned containing the sink and the sink's @@ -201,6 +233,8 @@ pub trait SinkConfig: NamedComponent + core::fmt::Debug + Send + Sync { fn acknowledgements(&self) -> &AcknowledgementsConfig; } +dyn_clone::clone_trait_object!(SinkConfig); + #[derive(Debug, Clone)] pub struct SinkContext { pub healthcheck: SinkHealthcheckOptions, diff --git a/src/config/unit_test/unit_test_components.rs b/src/config/unit_test/unit_test_components.rs index 157c2306abe3a..3166e396a17e4 100644 --- a/src/config/unit_test/unit_test_components.rs +++ b/src/config/unit_test/unit_test_components.rs @@ -135,7 +135,7 @@ pub struct UnitTestSinkResult { } /// Configuration for the `unit_test` sink. -#[configurable_component(sink("unit_test"))] +#[configurable_component(sink("unit_test", "Unit test."))] #[derive(Clone, Default, Derivative)] #[derivative(Debug)] pub struct UnitTestSinkConfig { @@ -158,6 +158,7 @@ pub struct UnitTestSinkConfig { impl_generate_config_from_default!(UnitTestSinkConfig); #[async_trait::async_trait] +#[typetag::serde(name = "unit_test")] impl SinkConfig for UnitTestSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let tx = self.result_tx.lock().await.take(); @@ -272,7 +273,7 @@ impl StreamSink for UnitTestSink { } /// Configuration for the `unit_test_stream` sink. -#[configurable_component(sink("unit_test_stream"))] +#[configurable_component(sink("unit_test_stream", "Unit test stream."))] #[derive(Clone, Default)] pub struct UnitTestStreamSinkConfig { /// Sink that receives the processed events. @@ -297,6 +298,7 @@ impl std::fmt::Debug for UnitTestStreamSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "unit_test_stream")] impl SinkConfig for UnitTestStreamSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let sink = self.sink.lock().await.take().unwrap(); diff --git a/src/sinks/amqp/config.rs b/src/sinks/amqp/config.rs index a83cb7c1f2aad..b266bb1892370 100644 --- a/src/sinks/amqp/config.rs +++ b/src/sinks/amqp/config.rs @@ -36,7 +36,10 @@ impl AmqpPropertiesConfig { /// Configuration for the `amqp` sink. /// /// Supports AMQP version 0.9.1 -#[configurable_component(sink("amqp"))] +#[configurable_component(sink( + "amqp", + "Send events to AMQP 0.9.1 compatible brokers like RabbitMQ." +))] #[derive(Clone, Debug)] pub struct AmqpSinkConfig { /// The exchange to publish messages to. @@ -89,6 +92,7 @@ impl GenerateConfig for AmqpSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "amqp")] impl SinkConfig for AmqpSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let sink = AmqpSink::new(self.clone()).await?; diff --git a/src/sinks/appsignal/mod.rs b/src/sinks/appsignal/mod.rs index 9d24731f01c9a..32c1247192368 100644 --- a/src/sinks/appsignal/mod.rs +++ b/src/sinks/appsignal/mod.rs @@ -45,7 +45,7 @@ enum FinishError { } /// Configuration for the `appsignal` sink. -#[configurable_component(sink("appsignal"))] +#[configurable_component(sink("appsignal", "Send events to AppSignal."))] #[derive(Clone, Debug, Default)] pub struct AppsignalSinkConfig { /// The URI for the AppSignal API to send data to. @@ -106,6 +106,7 @@ impl SinkBatchSettings for AppsignalDefaultBatchSettings { impl_generate_config_from_default!(AppsignalSinkConfig); #[async_trait::async_trait] +#[typetag::serde(name = "appsignal")] impl SinkConfig for AppsignalSinkConfig { async fn build( &self, diff --git a/src/sinks/aws_cloudwatch_logs/config.rs b/src/sinks/aws_cloudwatch_logs/config.rs index 2197d5b96746c..a03811e4612ba 100644 --- a/src/sinks/aws_cloudwatch_logs/config.rs +++ b/src/sinks/aws_cloudwatch_logs/config.rs @@ -49,7 +49,10 @@ impl ClientBuilder for CloudwatchLogsClientBuilder { } /// Configuration for the `aws_cloudwatch_logs` sink. -#[configurable_component(sink("aws_cloudwatch_logs"))] +#[configurable_component(sink( + "aws_cloudwatch_logs", + "Publish log events to AWS CloudWatch Logs." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct CloudwatchLogsSinkConfig { @@ -161,6 +164,7 @@ impl CloudwatchLogsSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "aws_cloudwatch_logs")] impl SinkConfig for CloudwatchLogsSinkConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let batcher_settings = self.batch.into_batcher_settings()?; diff --git a/src/sinks/aws_cloudwatch_metrics/mod.rs b/src/sinks/aws_cloudwatch_metrics/mod.rs index e169dde9b01fb..a1f22ca212e53 100644 --- a/src/sinks/aws_cloudwatch_metrics/mod.rs +++ b/src/sinks/aws_cloudwatch_metrics/mod.rs @@ -46,7 +46,10 @@ impl SinkBatchSettings for CloudWatchMetricsDefaultBatchSettings { } /// Configuration for the `aws_cloudwatch_metrics` sink. -#[configurable_component(sink("aws_cloudwatch_metrics"))] +#[configurable_component(sink( + "aws_cloudwatch_metrics", + "Publish metric events to AWS CloudWatch Metrics." +))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct CloudWatchMetricsSinkConfig { @@ -120,6 +123,7 @@ impl ClientBuilder for CloudwatchMetricsClientBuilder { } #[async_trait::async_trait] +#[typetag::serde(name = "aws_cloudwatch_metrics")] impl SinkConfig for CloudWatchMetricsSinkConfig { async fn build( &self, diff --git a/src/sinks/aws_kinesis/firehose/config.rs b/src/sinks/aws_kinesis/firehose/config.rs index aca15796d87c9..d5bbd93f06c78 100644 --- a/src/sinks/aws_kinesis/firehose/config.rs +++ b/src/sinks/aws_kinesis/firehose/config.rs @@ -66,7 +66,10 @@ impl SinkBatchSettings for KinesisFirehoseDefaultBatchSettings { } /// Configuration for the `aws_kinesis_firehose` sink. -#[configurable_component(sink("aws_kinesis_firehose"))] +#[configurable_component(sink( + "aws_kinesis_firehose", + "Publish logs to AWS Kinesis Data Firehose topics." +))] #[derive(Clone, Debug)] pub struct KinesisFirehoseSinkConfig { #[serde(flatten)] @@ -119,6 +122,7 @@ impl KinesisFirehoseSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "aws_kinesis_firehose")] impl SinkConfig for KinesisFirehoseSinkConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let client = self.create_client(&cx.proxy).await?; diff --git a/src/sinks/aws_kinesis/streams/config.rs b/src/sinks/aws_kinesis/streams/config.rs index 515c5fa66ab0e..8949f90d40352 100644 --- a/src/sinks/aws_kinesis/streams/config.rs +++ b/src/sinks/aws_kinesis/streams/config.rs @@ -67,7 +67,10 @@ impl SinkBatchSettings for KinesisDefaultBatchSettings { } /// Configuration for the `aws_kinesis_streams` sink. -#[configurable_component(sink("aws_kinesis_streams"))] +#[configurable_component(sink( + "aws_kinesis_streams", + "Publish logs to AWS Kinesis Streams topics." +))] #[derive(Clone, Debug)] pub struct KinesisStreamsSinkConfig { #[serde(flatten)] @@ -126,6 +129,7 @@ impl KinesisStreamsSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "aws_kinesis_streams")] impl SinkConfig for KinesisStreamsSinkConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let client = self.create_client(&cx.proxy).await?; diff --git a/src/sinks/aws_s3/config.rs b/src/sinks/aws_s3/config.rs index 27afbcd706d21..6123ee737d22f 100644 --- a/src/sinks/aws_s3/config.rs +++ b/src/sinks/aws_s3/config.rs @@ -33,7 +33,10 @@ use crate::{ }; /// Configuration for the `aws_s3` sink. -#[configurable_component(sink("aws_s3"))] +#[configurable_component(sink( + "aws_s3", + "Store observability events in the AWS S3 object storage system." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct S3SinkConfig { @@ -166,6 +169,7 @@ impl GenerateConfig for S3SinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "aws_s3")] impl SinkConfig for S3SinkConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let service = self.create_service(&cx.proxy).await?; diff --git a/src/sinks/aws_sqs/config.rs b/src/sinks/aws_sqs/config.rs index e303c28d5a45c..45579f52925bc 100644 --- a/src/sinks/aws_sqs/config.rs +++ b/src/sinks/aws_sqs/config.rs @@ -32,7 +32,10 @@ pub(super) enum BuildError { } /// Configuration for the `aws_sqs` sink. -#[configurable_component(sink("aws_sqs"))] +#[configurable_component(sink( + "aws_sqs", + "Publish observability events to AWS Simple Queue Service topics." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct SqsSinkConfig { @@ -111,6 +114,7 @@ impl GenerateConfig for SqsSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "aws_sqs")] impl SinkConfig for SqsSinkConfig { async fn build( &self, diff --git a/src/sinks/axiom.rs b/src/sinks/axiom.rs index a8a73a52f7c40..b94c7f6e7f69c 100644 --- a/src/sinks/axiom.rs +++ b/src/sinks/axiom.rs @@ -16,7 +16,7 @@ use crate::{ static CLOUD_URL: &str = "https://api.axiom.co"; /// Configuration for the `axiom` sink. -#[configurable_component(sink("axiom"))] +#[configurable_component(sink("axiom", "Deliver log events to Axiom."))] #[derive(Clone, Debug, Default)] pub struct AxiomConfig { /// URI of the Axiom endpoint to send data to. @@ -77,6 +77,7 @@ impl GenerateConfig for AxiomConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "axiom")] impl SinkConfig for AxiomConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let mut request = self.request.clone(); diff --git a/src/sinks/azure_blob/config.rs b/src/sinks/azure_blob/config.rs index 507633ff54165..2fb5287fa9ca3 100644 --- a/src/sinks/azure_blob/config.rs +++ b/src/sinks/azure_blob/config.rs @@ -25,7 +25,10 @@ use crate::{ }; /// Configuration for the `azure_blob` sink. -#[configurable_component(sink("azure_blob"))] +#[configurable_component(sink( + "azure_blob", + "Store your observability data in Azure Blob Storage." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct AzureBlobSinkConfig { @@ -164,6 +167,7 @@ impl GenerateConfig for AzureBlobSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "azure_blob")] impl SinkConfig for AzureBlobSinkConfig { async fn build(&self, _cx: SinkContext) -> Result<(VectorSink, Healthcheck)> { let client = azure_common::config::build_client( diff --git a/src/sinks/azure_monitor_logs.rs b/src/sinks/azure_monitor_logs.rs index 77e675daf3074..aef40146c8d2a 100644 --- a/src/sinks/azure_monitor_logs.rs +++ b/src/sinks/azure_monitor_logs.rs @@ -39,7 +39,10 @@ fn default_host() -> String { } /// Configuration for the `azure_monitor_logs` sink. -#[configurable_component(sink("azure_monitor_logs"))] +#[configurable_component(sink( + "azure_monitor_logs", + "Publish log events to the Azure Monitor Logs service." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct AzureMonitorLogsConfig { @@ -177,6 +180,7 @@ const SHARED_KEY: &str = "SharedKey"; const API_VERSION: &str = "2016-04-01"; #[async_trait::async_trait] +#[typetag::serde(name = "azure_monitor_logs")] impl SinkConfig for AzureMonitorLogsConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let batch_settings = self diff --git a/src/sinks/blackhole/config.rs b/src/sinks/blackhole/config.rs index 070f32a99c0f2..d2d69e75cc44f 100644 --- a/src/sinks/blackhole/config.rs +++ b/src/sinks/blackhole/config.rs @@ -15,7 +15,10 @@ const fn default_print_interval_secs() -> Duration { /// Configuration for the `blackhole` sink. #[serde_as] -#[configurable_component(sink("blackhole"))] +#[configurable_component(sink( + "blackhole", + "Send observability events nowhere, which can be useful for debugging purposes." +))] #[derive(Clone, Debug, Derivative)] #[serde(deny_unknown_fields, default)] #[derivative(Default)] @@ -46,6 +49,7 @@ pub struct BlackholeConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "blackhole")] impl SinkConfig for BlackholeConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let sink = BlackholeSink::new(self.clone()); diff --git a/src/sinks/clickhouse/config.rs b/src/sinks/clickhouse/config.rs index a5ae3763f2ad5..08299eb541099 100644 --- a/src/sinks/clickhouse/config.rs +++ b/src/sinks/clickhouse/config.rs @@ -17,7 +17,7 @@ use crate::{ use super::http_sink::build_http_sink; /// Configuration for the `clickhouse` sink. -#[configurable_component(sink("clickhouse"))] +#[configurable_component(sink("clickhouse", "Deliver log data to a ClickHouse database."))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct ClickhouseConfig { @@ -79,6 +79,7 @@ pub struct ClickhouseConfig { impl_generate_config_from_default!(ClickhouseConfig); #[async_trait::async_trait] +#[typetag::serde(name = "clickhouse")] impl SinkConfig for ClickhouseConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { // later we can build different sink(http, native) here diff --git a/src/sinks/console/config.rs b/src/sinks/console/config.rs index dcdefb12fb949..3aa1c46bb62af 100644 --- a/src/sinks/console/config.rs +++ b/src/sinks/console/config.rs @@ -33,7 +33,10 @@ pub enum Target { } /// Configuration for the `console` sink. -#[configurable_component(sink("console"))] +#[configurable_component(sink( + "console", + "Display observability events in the console, which can be useful for debugging purposes." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct ConsoleSinkConfig { @@ -69,6 +72,7 @@ impl GenerateConfig for ConsoleSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "console")] impl SinkConfig for ConsoleSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let transformer = self.encoding.transformer(); diff --git a/src/sinks/databend/config.rs b/src/sinks/databend/config.rs index 4a47adb01de8b..69a464090c445 100644 --- a/src/sinks/databend/config.rs +++ b/src/sinks/databend/config.rs @@ -30,7 +30,7 @@ use super::{ }; /// Configuration for the `databend` sink. -#[configurable_component(sink("databend"))] +#[configurable_component(sink("databend", "Deliver log data to a Databend database."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct DatabendConfig { @@ -103,6 +103,7 @@ impl DatabendConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "databend")] impl SinkConfig for DatabendConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let auth = self.auth.choose_one(&self.endpoint.auth)?; diff --git a/src/sinks/datadog/events/config.rs b/src/sinks/datadog/events/config.rs index 8ca941d1cf0cb..606d364b32ec8 100644 --- a/src/sinks/datadog/events/config.rs +++ b/src/sinks/datadog/events/config.rs @@ -24,7 +24,10 @@ use crate::{ }; /// Configuration for the `datadog_events` sink. -#[configurable_component(sink("datadog_events"))] +#[configurable_component(sink( + "datadog_events", + "Publish observability events to the Datadog Events API." +))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct DatadogEventsConfig { @@ -89,6 +92,7 @@ impl DatadogEventsConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "datadog_events")] impl SinkConfig for DatadogEventsConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let client = self.build_client(cx.proxy())?; diff --git a/src/sinks/datadog/logs/config.rs b/src/sinks/datadog/logs/config.rs index 5e1d6cc150962..80fb5293257a1 100644 --- a/src/sinks/datadog/logs/config.rs +++ b/src/sinks/datadog/logs/config.rs @@ -46,7 +46,7 @@ impl SinkBatchSettings for DatadogLogsDefaultBatchSettings { } /// Configuration for the `datadog_logs` sink. -#[configurable_component(sink("datadog_logs"))] +#[configurable_component(sink("datadog_logs", "Publish log events to Datadog."))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct DatadogLogsConfig { @@ -160,6 +160,7 @@ impl DatadogLogsConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "datadog_logs")] impl SinkConfig for DatadogLogsConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let client = self.create_client(&cx.proxy)?; @@ -193,7 +194,7 @@ impl SinkConfig for DatadogLogsConfig { #[cfg(test)] mod test { - use crate::sinks::datadog::logs::DatadogLogsConfig; + use super::super::config::DatadogLogsConfig; #[test] fn generate_config() { diff --git a/src/sinks/datadog/logs/integration_tests.rs b/src/sinks/datadog/logs/integration_tests.rs index 78156c1cf5c77..0d34486ec0897 100644 --- a/src/sinks/datadog/logs/integration_tests.rs +++ b/src/sinks/datadog/logs/integration_tests.rs @@ -1,9 +1,9 @@ use indoc::indoc; use vector_core::event::{BatchNotifier, BatchStatus}; +use super::config::DatadogLogsConfig; use crate::{ config::SinkConfig, - sinks::datadog::logs::DatadogLogsConfig, sinks::util::test::load_sink, test_util::{ components::{run_and_assert_sink_compliance, SINK_TAGS}, diff --git a/src/sinks/datadog/logs/mod.rs b/src/sinks/datadog/logs/mod.rs index 02b857a160181..0f99ee8dc8947 100644 --- a/src/sinks/datadog/logs/mod.rs +++ b/src/sinks/datadog/logs/mod.rs @@ -28,4 +28,4 @@ mod config; mod service; mod sink; -pub(crate) use config::DatadogLogsConfig; +pub use self::config::DatadogLogsConfig; diff --git a/src/sinks/datadog/logs/tests.rs b/src/sinks/datadog/logs/tests.rs index 86430fd5890bf..efe366120100e 100644 --- a/src/sinks/datadog/logs/tests.rs +++ b/src/sinks/datadog/logs/tests.rs @@ -17,8 +17,6 @@ use crate::{ config::SinkConfig, http::HttpError, sinks::{ - datadog::logs::DatadogLogsConfig, - datadog::DatadogApiError, util::retries::RetryLogic, util::test::{build_test_server_status, load_sink}, }, @@ -32,7 +30,7 @@ use crate::{ tls::TlsError, }; -use super::service::LogApiRetry; +use super::{super::DatadogApiError, config::DatadogLogsConfig, service::LogApiRetry}; // The sink must support v1 and v2 API endpoints which have different codes for // signaling status. This enum allows us to signal which API endpoint and what diff --git a/src/sinks/datadog/metrics/config.rs b/src/sinks/datadog/metrics/config.rs index 4de73c5cf8ed6..1acedc003c079 100644 --- a/src/sinks/datadog/metrics/config.rs +++ b/src/sinks/datadog/metrics/config.rs @@ -86,7 +86,7 @@ impl DatadogMetricsEndpointConfiguration { } /// Configuration for the `datadog_metrics` sink. -#[configurable_component(sink("datadog_metrics"))] +#[configurable_component(sink("datadog_metrics", "Publish metric events to Datadog."))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct DatadogMetricsConfig { @@ -118,6 +118,7 @@ pub struct DatadogMetricsConfig { impl_generate_config_from_default!(DatadogMetricsConfig); #[async_trait::async_trait] +#[typetag::serde(name = "datadog_metrics")] impl SinkConfig for DatadogMetricsConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let client = self.build_client(&cx.proxy)?; diff --git a/src/sinks/datadog/traces/config.rs b/src/sinks/datadog/traces/config.rs index 533b874a539da..bb8f4d183d63d 100644 --- a/src/sinks/datadog/traces/config.rs +++ b/src/sinks/datadog/traces/config.rs @@ -54,7 +54,7 @@ impl SinkBatchSettings for DatadogTracesDefaultBatchSettings { } /// Configuration for the `datadog_traces` sink. -#[configurable_component(sink("datadog_traces"))] +#[configurable_component(sink("datadog_traces", "Publish trace events to Datadog."))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct DatadogTracesConfig { @@ -211,6 +211,7 @@ impl DatadogTracesConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "datadog_traces")] impl SinkConfig for DatadogTracesConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let client = self.build_client(&cx.proxy)?; diff --git a/src/sinks/datadog_archives.rs b/src/sinks/datadog_archives.rs index e439481c076a1..d92265ae05a42 100644 --- a/src/sinks/datadog_archives.rs +++ b/src/sinks/datadog_archives.rs @@ -865,6 +865,7 @@ impl NamedComponent for DatadogArchivesSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "datadog_archives")] impl SinkConfig for DatadogArchivesSinkConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, super::Healthcheck)> { let sink_and_healthcheck = self.build_sink(cx).await?; diff --git a/src/sinks/elasticsearch/config.rs b/src/sinks/elasticsearch/config.rs index fbfc88edcc3c8..1e6c9d708894a 100644 --- a/src/sinks/elasticsearch/config.rs +++ b/src/sinks/elasticsearch/config.rs @@ -40,7 +40,7 @@ use vrl::value::Kind; pub const DATA_STREAM_TIMESTAMP_KEY: &str = "@timestamp"; /// Configuration for the `elasticsearch` sink. -#[configurable_component(sink("elasticsearch"))] +#[configurable_component(sink("elasticsearch", "Index observability events in Elasticsearch."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct ElasticsearchConfig { @@ -466,6 +466,7 @@ impl DataStreamConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "elasticsearch")] impl SinkConfig for ElasticsearchConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let commons = ElasticsearchCommon::parse_many(self, cx.proxy()).await?; diff --git a/src/sinks/file/mod.rs b/src/sinks/file/mod.rs index c8b0784ddd435..04f166aa986fb 100644 --- a/src/sinks/file/mod.rs +++ b/src/sinks/file/mod.rs @@ -40,7 +40,7 @@ use bytes_path::BytesPath; /// Configuration for the `file` sink. #[serde_as] -#[configurable_component(sink("file"))] +#[configurable_component(sink("file", "Output observability events into files."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct FileSinkConfig { @@ -170,6 +170,7 @@ impl OutFile { } #[async_trait::async_trait] +#[typetag::serde(name = "file")] impl SinkConfig for FileSinkConfig { async fn build( &self, diff --git a/src/sinks/gcp/chronicle_unstructured.rs b/src/sinks/gcp/chronicle_unstructured.rs index a9390b4335d37..3f7b3d4494d25 100644 --- a/src/sinks/gcp/chronicle_unstructured.rs +++ b/src/sinks/gcp/chronicle_unstructured.rs @@ -97,7 +97,10 @@ impl SinkBatchSettings for ChronicleUnstructuredDefaultBatchSettings { } /// Configuration for the `gcp_chronicle_unstructured` sink. -#[configurable_component(sink("gcp_chronicle_unstructured"))] +#[configurable_component(sink( + "gcp_chronicle_unstructured", + "Store unstructured log events in Google Chronicle." +))] #[derive(Clone, Debug)] pub struct ChronicleUnstructuredConfig { /// The endpoint to send data to. @@ -190,6 +193,7 @@ pub enum ChronicleError { } #[async_trait::async_trait] +#[typetag::serde(name = "gcp_chronicle_unstructured")] impl SinkConfig for ChronicleUnstructuredConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let creds = self.auth.build(Scope::MalachiteIngestion).await?; diff --git a/src/sinks/gcp/cloud_storage.rs b/src/sinks/gcp/cloud_storage.rs index 13983ad90bc6e..5b172586ee0cf 100644 --- a/src/sinks/gcp/cloud_storage.rs +++ b/src/sinks/gcp/cloud_storage.rs @@ -49,7 +49,10 @@ pub enum GcsHealthcheckError { } /// Configuration for the `gcp_cloud_storage` sink. -#[configurable_component(sink("gcp_cloud_storage"))] +#[configurable_component(sink( + "gcp_cloud_storage", + "Store observability events in GCP Cloud Storage." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct GcsSinkConfig { @@ -201,6 +204,7 @@ impl GenerateConfig for GcsSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "gcp_cloud_storage")] impl SinkConfig for GcsSinkConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let auth = self.auth.build(Scope::DevStorageReadWrite).await?; diff --git a/src/sinks/gcp/pubsub.rs b/src/sinks/gcp/pubsub.rs index dded23587438f..a950c3dc46dab 100644 --- a/src/sinks/gcp/pubsub.rs +++ b/src/sinks/gcp/pubsub.rs @@ -45,7 +45,10 @@ impl SinkBatchSettings for PubsubDefaultBatchSettings { } /// Configuration for the `gcp_pubsub` sink. -#[configurable_component(sink("gcp_pubsub"))] +#[configurable_component(sink( + "gcp_pubsub", + "Publish observability events to GCP's Pub/Sub messaging system." +))] #[derive(Clone, Debug)] pub struct PubsubConfig { /// The project name to which to publish events. @@ -111,6 +114,7 @@ impl GenerateConfig for PubsubConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "gcp_pubsub")] impl SinkConfig for PubsubConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let sink = PubsubSink::from_config(self).await?; diff --git a/src/sinks/gcp/stackdriver_logs.rs b/src/sinks/gcp/stackdriver_logs.rs index 8e7c43276bd20..e79287429071d 100644 --- a/src/sinks/gcp/stackdriver_logs.rs +++ b/src/sinks/gcp/stackdriver_logs.rs @@ -36,7 +36,10 @@ enum HealthcheckError { } /// Configuration for the `gcp_stackdriver_logs` sink. -#[configurable_component(sink("gcp_stackdriver_logs"))] +#[configurable_component(sink( + "gcp_stackdriver_logs", + "Deliver logs to GCP's Cloud Operations suite." +))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct StackdriverConfig { @@ -202,6 +205,7 @@ fn label_examples() -> HashMap { impl_generate_config_from_default!(StackdriverConfig); #[async_trait::async_trait] +#[typetag::serde(name = "gcp_stackdriver_logs")] impl SinkConfig for StackdriverConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let auth = self.auth.build(Scope::LoggingWrite).await?; diff --git a/src/sinks/gcp/stackdriver_metrics.rs b/src/sinks/gcp/stackdriver_metrics.rs index a68140e2bced8..c18a8318b7f79 100644 --- a/src/sinks/gcp/stackdriver_metrics.rs +++ b/src/sinks/gcp/stackdriver_metrics.rs @@ -36,7 +36,10 @@ impl SinkBatchSettings for StackdriverMetricsDefaultBatchSettings { } /// Configuration for the `gcp_stackdriver_metrics` sink. -#[configurable_component(sink("gcp_stackdriver_metrics"))] +#[configurable_component(sink( + "gcp_stackdriver_metrics", + "Deliver metrics to GCP's Cloud Monitoring system." +))] #[derive(Clone, Debug, Default)] pub struct StackdriverConfig { #[serde(skip, default = "default_endpoint")] @@ -93,6 +96,7 @@ fn default_endpoint() -> String { impl_generate_config_from_default!(StackdriverConfig); #[async_trait::async_trait] +#[typetag::serde(name = "gcp_stackdriver_metrics")] impl SinkConfig for StackdriverConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let auth = self.auth.build(Scope::MonitoringWrite).await?; diff --git a/src/sinks/honeycomb.rs b/src/sinks/honeycomb.rs index f9a05c2a7a5b0..559a1f4a5bf85 100644 --- a/src/sinks/honeycomb.rs +++ b/src/sinks/honeycomb.rs @@ -19,7 +19,7 @@ use crate::{ }; /// Configuration for the `honeycomb` sink. -#[configurable_component(sink("honeycomb"))] +#[configurable_component(sink("honeycomb", "Deliver log events to Honeycomb."))] #[derive(Clone, Debug)] pub struct HoneycombConfig { // This endpoint is not user-configurable and only exists for testing purposes @@ -85,6 +85,7 @@ impl GenerateConfig for HoneycombConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "honeycomb")] impl SinkConfig for HoneycombConfig { async fn build( &self, diff --git a/src/sinks/http.rs b/src/sinks/http.rs index 1d21b052c2c82..8f7d0fdf8b633 100644 --- a/src/sinks/http.rs +++ b/src/sinks/http.rs @@ -29,7 +29,7 @@ use crate::{ }; /// Configuration for the `http` sink. -#[configurable_component(sink("http"))] +#[configurable_component(sink("http", "Deliver observability event data to an HTTP server."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct HttpSinkConfig { @@ -201,6 +201,7 @@ fn default_sink(encoding: EncodingConfigWithFraming) -> HttpSink { } #[async_trait::async_trait] +#[typetag::serde(name = "http")] impl SinkConfig for HttpSinkConfig { async fn build( &self, diff --git a/src/sinks/humio/logs.rs b/src/sinks/humio/logs.rs index 6af56db905115..6e30e66f29ee9 100644 --- a/src/sinks/humio/logs.rs +++ b/src/sinks/humio/logs.rs @@ -26,7 +26,7 @@ use crate::{ pub(super) const HOST: &str = "https://cloud.humio.com"; /// Configuration for the `humio_logs` sink. -#[configurable_component(sink("humio_logs"))] +#[configurable_component(sink("humio_logs", "Deliver log event data to Humio."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct HumioLogsConfig { @@ -168,6 +168,7 @@ impl GenerateConfig for HumioLogsConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "humio_logs")] impl SinkConfig for HumioLogsConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { self.build_hec_config().build(cx).await diff --git a/src/sinks/humio/metrics.rs b/src/sinks/humio/metrics.rs index c79aa6b852d1a..a336f60590aa9 100644 --- a/src/sinks/humio/metrics.rs +++ b/src/sinks/humio/metrics.rs @@ -38,7 +38,7 @@ use crate::{ // `humio_logs` config here. // // [1]: https://github.com/serde-rs/serde/issues/1504 -#[configurable_component(sink("humio_metrics"))] +#[configurable_component(sink("humio_metrics", "Deliver metric event data to Humio."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct HumioMetricsConfig { @@ -151,6 +151,7 @@ impl GenerateConfig for HumioMetricsConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "humio_metrics")] impl SinkConfig for HumioMetricsConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let transform = self diff --git a/src/sinks/influxdb/logs.rs b/src/sinks/influxdb/logs.rs index 96c5be07ee96f..fe2e44b950368 100644 --- a/src/sinks/influxdb/logs.rs +++ b/src/sinks/influxdb/logs.rs @@ -41,7 +41,7 @@ impl SinkBatchSettings for InfluxDbLogsDefaultBatchSettings { } /// Configuration for the `influxdb_logs` sink. -#[configurable_component(sink("influxdb_logs"))] +#[configurable_component(sink("influxdb_logs", "Deliver log event data to InfluxDB."))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct InfluxDbLogsConfig { @@ -157,6 +157,7 @@ impl GenerateConfig for InfluxDbLogsConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "influxdb_logs")] impl SinkConfig for InfluxDbLogsConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let measurement = self.get_measurement()?; diff --git a/src/sinks/influxdb/metrics.rs b/src/sinks/influxdb/metrics.rs index e8f8d281ca106..5c6f98856198a 100644 --- a/src/sinks/influxdb/metrics.rs +++ b/src/sinks/influxdb/metrics.rs @@ -52,7 +52,7 @@ impl SinkBatchSettings for InfluxDbDefaultBatchSettings { } /// Configuration for the `influxdb_metrics` sink. -#[configurable_component(sink("influxdb_metrics"))] +#[configurable_component(sink("influxdb_metrics", "Deliver metric event data to InfluxDB."))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct InfluxDbConfig { @@ -122,6 +122,7 @@ struct InfluxDbRequest { impl_generate_config_from_default!(InfluxDbConfig); #[async_trait::async_trait] +#[typetag::serde(name = "influxdb_metrics")] impl SinkConfig for InfluxDbConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let tls_settings = TlsSettings::from_options(&self.tls)?; diff --git a/src/sinks/kafka/config.rs b/src/sinks/kafka/config.rs index 9615c22ec7280..57cb86c2001dd 100644 --- a/src/sinks/kafka/config.rs +++ b/src/sinks/kafka/config.rs @@ -20,7 +20,10 @@ pub(crate) const QUEUED_MIN_MESSAGES: u64 = 100000; /// Configuration for the `kafka` sink. #[serde_as] -#[configurable_component(sink("kafka"))] +#[configurable_component(sink( + "kafka", + "Publish observability event data to Apache Kafka topics." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct KafkaSinkConfig { @@ -262,6 +265,7 @@ impl GenerateConfig for KafkaSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "kafka")] impl SinkConfig for KafkaSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let sink = KafkaSink::new(self.clone())?; diff --git a/src/sinks/loki/config.rs b/src/sinks/loki/config.rs index 6cb74c426ec08..f72c68c964007 100644 --- a/src/sinks/loki/config.rs +++ b/src/sinks/loki/config.rs @@ -52,7 +52,7 @@ fn default_loki_path() -> String { } /// Configuration for the `loki` sink. -#[configurable_component(sink("loki"))] +#[configurable_component(sink("loki", "Deliver log event data to the Loki aggregation system."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct LokiConfig { @@ -211,6 +211,7 @@ impl LokiConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "loki")] impl SinkConfig for LokiConfig { async fn build( &self, diff --git a/src/sinks/mezmo.rs b/src/sinks/mezmo.rs index 00a8bf09278bc..a79179ef3f0d0 100644 --- a/src/sinks/mezmo.rs +++ b/src/sinks/mezmo.rs @@ -25,7 +25,7 @@ use crate::{ const PATH: &str = "/logs/ingest"; /// Configuration for the `logdna` sink. -#[configurable_component(sink("logdna"))] +#[configurable_component(sink("logdna", "Deliver log event data to LogDNA."))] #[configurable(metadata( deprecated = "The `logdna` sink has been renamed. Please use `mezmo` instead." ))] @@ -39,6 +39,7 @@ impl GenerateConfig for LogdnaConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "logdna")] impl SinkConfig for LogdnaConfig { async fn build( &self, @@ -58,7 +59,7 @@ impl SinkConfig for LogdnaConfig { } /// Configuration for the `mezmo` (formerly `logdna`) sink. -#[configurable_component(sink("mezmo"))] +#[configurable_component(sink("mezmo", "Deliver log event data to Mezmo."))] #[derive(Clone, Debug)] pub struct MezmoConfig { /// The Ingestion API key. @@ -155,6 +156,7 @@ impl GenerateConfig for MezmoConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "mezmo")] impl SinkConfig for MezmoConfig { async fn build( &self, diff --git a/src/sinks/mod.rs b/src/sinks/mod.rs index b21c5749841c6..d64d92e37664b 100644 --- a/src/sinks/mod.rs +++ b/src/sinks/mod.rs @@ -1,5 +1,4 @@ #![allow(missing_docs)] -use enum_dispatch::enum_dispatch; use futures::future::BoxFuture; use snafu::Snafu; @@ -104,14 +103,8 @@ pub mod webhdfs; #[cfg(feature = "sinks-websocket")] pub mod websocket; -use vector_config::{configurable_component, NamedComponent}; pub use vector_core::{config::Input, sink::VectorSink}; -use crate::config::{ - unit_test::{UnitTestSinkConfig, UnitTestStreamSinkConfig}, - AcknowledgementsConfig, Resource, SinkConfig, SinkContext, -}; - pub type Healthcheck = BoxFuture<'static, crate::Result<()>>; /// Common build errors @@ -133,385 +126,3 @@ pub enum HealthcheckError { #[snafu(display("Unexpected status: {}", status))] UnexpectedStatus { status: ::http::StatusCode }, } - -/// Configurable sinks in Vector. -#[configurable_component] -#[allow(clippy::large_enum_variant)] -#[derive(Clone, Debug)] -#[serde(tag = "type", rename_all = "snake_case")] -#[enum_dispatch(SinkConfig)] -pub enum Sinks { - /// Send events to AMQP 0.9.1 compatible brokers like RabbitMQ. - #[cfg(feature = "sinks-amqp")] - Amqp(amqp::AmqpSinkConfig), - - /// Send events to AppSignal. - #[cfg(feature = "sinks-appsignal")] - Appsignal(appsignal::AppsignalSinkConfig), - - /// Publish log events to AWS CloudWatch Logs. - #[cfg(feature = "sinks-aws_cloudwatch_logs")] - AwsCloudwatchLogs(aws_cloudwatch_logs::CloudwatchLogsSinkConfig), - - /// Publish metric events to AWS CloudWatch Metrics. - #[cfg(feature = "sinks-aws_cloudwatch_metrics")] - AwsCloudwatchMetrics(aws_cloudwatch_metrics::CloudWatchMetricsSinkConfig), - - /// Publish logs to AWS Kinesis Data Firehose topics. - #[cfg(feature = "sinks-aws_kinesis_firehose")] - #[configurable(metadata(docs::human_name = "AWS Kinesis Data Firehose Logs"))] - AwsKinesisFirehose(aws_kinesis::firehose::KinesisFirehoseSinkConfig), - - /// Publish logs to AWS Kinesis Streams topics. - #[cfg(feature = "sinks-aws_kinesis_streams")] - #[configurable(metadata(docs::human_name = "AWS Kinesis Streams Logs"))] - AwsKinesisStreams(aws_kinesis::streams::KinesisStreamsSinkConfig), - - /// Store observability events in the AWS S3 object storage system. - #[cfg(feature = "sinks-aws_s3")] - AwsS3(aws_s3::S3SinkConfig), - - /// Publish observability events to AWS Simple Queue Service topics. - #[cfg(feature = "sinks-aws_sqs")] - AwsSqs(aws_sqs::SqsSinkConfig), - - /// Deliver log events to Axiom. - #[cfg(feature = "sinks-axiom")] - Axiom(axiom::AxiomConfig), - - /// Store your observability data in Azure Blob Storage. - #[cfg(feature = "sinks-azure_blob")] - #[configurable(metadata(docs::human_name = "Azure Blob Storage"))] - AzureBlob(azure_blob::AzureBlobSinkConfig), - - /// Publish log events to the Azure Monitor Logs service. - #[cfg(feature = "sinks-azure_monitor_logs")] - AzureMonitorLogs(azure_monitor_logs::AzureMonitorLogsConfig), - - /// Send observability events nowhere, which can be useful for debugging purposes. - #[cfg(feature = "sinks-blackhole")] - Blackhole(blackhole::BlackholeConfig), - - /// Deliver log data to a ClickHouse database. - #[cfg(feature = "sinks-clickhouse")] - Clickhouse(clickhouse::ClickhouseConfig), - - /// Display observability events in the console, which can be useful for debugging purposes. - #[cfg(feature = "sinks-console")] - Console(console::ConsoleSinkConfig), - - /// Deliver log data to a Databend database. - #[cfg(feature = "sinks-databend")] - Databend(databend::DatabendConfig), - - /// Send events to Datadog Archives. - #[cfg(feature = "sinks-datadog_archives")] - DatadogArchives(datadog_archives::DatadogArchivesSinkConfig), - - /// Publish observability events to the Datadog Events API. - #[cfg(feature = "sinks-datadog_events")] - DatadogEvents(datadog::events::DatadogEventsConfig), - - /// Publish log events to Datadog. - #[cfg(feature = "sinks-datadog_logs")] - DatadogLogs(datadog::logs::DatadogLogsConfig), - - /// Publish metric events to Datadog. - #[cfg(feature = "sinks-datadog_metrics")] - DatadogMetrics(datadog::metrics::DatadogMetricsConfig), - - /// Publish traces to Datadog. - #[cfg(feature = "sinks-datadog_traces")] - DatadogTraces(datadog::traces::DatadogTracesConfig), - - /// Index observability events in Elasticsearch. - #[cfg(feature = "sinks-elasticsearch")] - Elasticsearch(elasticsearch::ElasticsearchConfig), - - /// Output observability events into files. - #[cfg(feature = "sinks-file")] - File(file::FileSinkConfig), - - /// Store unstructured log events in Google Chronicle. - #[cfg(feature = "sinks-gcp")] - GcpChronicleUnstructured(gcp::chronicle_unstructured::ChronicleUnstructuredConfig), - - /// Deliver logs to GCP's Cloud Operations suite. - #[cfg(feature = "sinks-gcp")] - #[configurable(metadata(docs::human_name = "GCP Operations (Stackdriver)"))] - GcpStackdriverLogs(gcp::stackdriver_logs::StackdriverConfig), - - /// Deliver metrics to GCP's Cloud Monitoring system. - #[cfg(feature = "sinks-gcp")] - #[configurable(metadata(docs::human_name = "GCP Cloud Monitoring (Stackdriver)"))] - GcpStackdriverMetrics(gcp::stackdriver_metrics::StackdriverConfig), - - /// Store observability events in GCP Cloud Storage. - #[cfg(feature = "sinks-gcp")] - GcpCloudStorage(gcp::cloud_storage::GcsSinkConfig), - - /// Publish observability events to GCP's Pub/Sub messaging system. - #[cfg(feature = "sinks-gcp")] - GcpPubsub(gcp::pubsub::PubsubConfig), - - /// WebHDFS. - #[cfg(feature = "sinks-webhdfs")] - Webhdfs(webhdfs::WebHdfsConfig), - - /// Deliver log events to Honeycomb. - #[cfg(feature = "sinks-honeycomb")] - Honeycomb(honeycomb::HoneycombConfig), - - /// Deliver observability event data to an HTTP server. - #[cfg(feature = "sinks-http")] - Http(http::HttpSinkConfig), - - /// Deliver log event data to Humio. - #[cfg(feature = "sinks-humio")] - HumioLogs(humio::logs::HumioLogsConfig), - - /// Deliver metric event data to Humio. - #[cfg(feature = "sinks-humio")] - HumioMetrics(humio::metrics::HumioMetricsConfig), - - /// Deliver log event data to InfluxDB. - #[cfg(any(feature = "sinks-influxdb", feature = "prometheus-integration-tests"))] - InfluxdbLogs(influxdb::logs::InfluxDbLogsConfig), - - /// Deliver metric event data to InfluxDB. - #[cfg(any(feature = "sinks-influxdb", feature = "prometheus-integration-tests"))] - InfluxdbMetrics(influxdb::metrics::InfluxDbConfig), - - /// Publish observability event data to Apache Kafka topics. - #[cfg(feature = "sinks-kafka")] - Kafka(kafka::KafkaSinkConfig), - - /// Deliver log event data to Mezmo. - #[cfg(feature = "sinks-mezmo")] - Mezmo(mezmo::MezmoConfig), - - /// Deliver log event data to LogDNA. - #[cfg(feature = "sinks-mezmo")] - Logdna(mezmo::LogdnaConfig), - - /// Deliver log event data to the Loki aggregation system. - #[cfg(feature = "sinks-loki")] - Loki(loki::LokiConfig), - - /// Publish observability data to subjects on the NATS messaging system. - #[cfg(feature = "sinks-nats")] - Nats(self::nats::NatsSinkConfig), - - /// Deliver events to New Relic. - #[cfg(feature = "sinks-new_relic")] - NewRelic(new_relic::NewRelicConfig), - - /// Deliver log events to Papertrail from SolarWinds. - #[cfg(feature = "sinks-papertrail")] - Papertrail(papertrail::PapertrailConfig), - - /// Expose metric events on a Prometheus compatible endpoint. - #[cfg(feature = "sinks-prometheus")] - PrometheusExporter(prometheus::exporter::PrometheusExporterConfig), - - /// Deliver metric data to a Prometheus remote write endpoint. - #[cfg(feature = "sinks-prometheus")] - PrometheusRemoteWrite(prometheus::remote_write::RemoteWriteConfig), - - /// Publish observability events to Apache Pulsar topics. - #[cfg(feature = "sinks-pulsar")] - Pulsar(pulsar::config::PulsarSinkConfig), - - /// Publish observability data to Redis. - #[cfg(feature = "sinks-redis")] - Redis(redis::RedisSinkConfig), - - /// Publish log events to Sematext. - #[cfg(feature = "sinks-sematext")] - SematextLogs(sematext::logs::SematextLogsConfig), - - /// Publish metric events to Sematext. - #[cfg(feature = "sinks-sematext")] - SematextMetrics(sematext::metrics::SematextMetricsConfig), - - /// Deliver logs to a remote socket endpoint. - #[cfg(feature = "sinks-socket")] - Socket(socket::SocketSinkConfig), - - /// Deliver log data to Splunk's HTTP Event Collector. - #[cfg(feature = "sinks-splunk_hec")] - SplunkHecLogs(splunk_hec::logs::config::HecLogsSinkConfig), - - /// Deliver metric data to Splunk's HTTP Event Collector. - #[cfg(feature = "sinks-splunk_hec")] - SplunkHecMetrics(splunk_hec::metrics::config::HecMetricsSinkConfig), - - /// Deliver metric data to a StatsD aggregator. - #[cfg(feature = "sinks-statsd")] - Statsd(statsd::StatsdSinkConfig), - - /// Test (adaptive concurrency). - #[cfg(all(test, feature = "sources-demo_logs"))] - TestArc(self::util::adaptive_concurrency::tests::TestConfig), - - /// Test (backpressure). - #[cfg(test)] - TestBackpressure(crate::test_util::mock::sinks::BackpressureSinkConfig), - - /// Test (basic). - #[cfg(test)] - TestBasic(crate::test_util::mock::sinks::BasicSinkConfig), - - /// Test (error). - #[cfg(test)] - TestError(crate::test_util::mock::sinks::ErrorSinkConfig), - - /// Test (oneshot). - #[cfg(test)] - TestOneshot(crate::test_util::mock::sinks::OneshotSinkConfig), - - /// Test (panic). - #[cfg(test)] - TestPanic(crate::test_util::mock::sinks::PanicSinkConfig), - - /// Unit test. - UnitTest(UnitTestSinkConfig), - - /// Unit test stream. - UnitTestStream(UnitTestStreamSinkConfig), - - /// Relay observability data to a Vector instance. - #[cfg(feature = "sinks-vector")] - Vector(vector::VectorConfig), - - /// Deliver observability event data to a websocket listener. - #[cfg(feature = "sinks-websocket")] - Websocket(websocket::WebSocketSinkConfig), -} - -impl NamedComponent for Sinks { - fn get_component_name(&self) -> &'static str { - match self { - #[cfg(feature = "sinks-amqp")] - Self::Amqp(config) => config.get_component_name(), - #[cfg(feature = "sinks-appsignal")] - Self::Appsignal(config) => config.get_component_name(), - #[cfg(feature = "sinks-aws_cloudwatch_logs")] - Self::AwsCloudwatchLogs(config) => config.get_component_name(), - #[cfg(feature = "sinks-aws_cloudwatch_metrics")] - Self::AwsCloudwatchMetrics(config) => config.get_component_name(), - #[cfg(feature = "sinks-aws_kinesis_firehose")] - Self::AwsKinesisFirehose(config) => config.get_component_name(), - #[cfg(feature = "sinks-aws_kinesis_streams")] - Self::AwsKinesisStreams(config) => config.get_component_name(), - #[cfg(feature = "sinks-aws_s3")] - Self::AwsS3(config) => config.get_component_name(), - #[cfg(feature = "sinks-aws_sqs")] - Self::AwsSqs(config) => config.get_component_name(), - #[cfg(feature = "sinks-axiom")] - Self::Axiom(config) => config.get_component_name(), - #[cfg(feature = "sinks-azure_blob")] - Self::AzureBlob(config) => config.get_component_name(), - #[cfg(feature = "sinks-azure_monitor_logs")] - Self::AzureMonitorLogs(config) => config.get_component_name(), - #[cfg(feature = "sinks-blackhole")] - Self::Blackhole(config) => config.get_component_name(), - #[cfg(feature = "sinks-clickhouse")] - Self::Clickhouse(config) => config.get_component_name(), - #[cfg(feature = "sinks-console")] - Self::Console(config) => config.get_component_name(), - #[cfg(feature = "sinks-databend")] - Self::Databend(config) => config.get_component_name(), - #[cfg(feature = "sinks-datadog_archives")] - Self::DatadogArchives(config) => config.get_component_name(), - #[cfg(feature = "sinks-datadog_events")] - Self::DatadogEvents(config) => config.get_component_name(), - #[cfg(feature = "sinks-datadog_logs")] - Self::DatadogLogs(config) => config.get_component_name(), - #[cfg(feature = "sinks-datadog_metrics")] - Self::DatadogMetrics(config) => config.get_component_name(), - #[cfg(feature = "sinks-datadog_traces")] - Self::DatadogTraces(config) => config.get_component_name(), - #[cfg(feature = "sinks-elasticsearch")] - Self::Elasticsearch(config) => config.get_component_name(), - #[cfg(feature = "sinks-file")] - Self::File(config) => config.get_component_name(), - #[cfg(feature = "sinks-gcp")] - Self::GcpChronicleUnstructured(config) => config.get_component_name(), - #[cfg(feature = "sinks-gcp")] - Self::GcpStackdriverLogs(config) => config.get_component_name(), - #[cfg(feature = "sinks-gcp")] - Self::GcpStackdriverMetrics(config) => config.get_component_name(), - #[cfg(feature = "sinks-gcp")] - Self::GcpCloudStorage(config) => config.get_component_name(), - #[cfg(feature = "sinks-gcp")] - Self::GcpPubsub(config) => config.get_component_name(), - #[cfg(feature = "sinks-webhdfs")] - Self::Webhdfs(config) => config.get_component_name(), - #[cfg(feature = "sinks-honeycomb")] - Self::Honeycomb(config) => config.get_component_name(), - #[cfg(feature = "sinks-http")] - Self::Http(config) => config.get_component_name(), - #[cfg(feature = "sinks-humio")] - Self::HumioLogs(config) => config.get_component_name(), - #[cfg(feature = "sinks-humio")] - Self::HumioMetrics(config) => config.get_component_name(), - #[cfg(any(feature = "sinks-influxdb", feature = "prometheus-integration-tests"))] - Self::InfluxdbLogs(config) => config.get_component_name(), - #[cfg(any(feature = "sinks-influxdb", feature = "prometheus-integration-tests"))] - Self::InfluxdbMetrics(config) => config.get_component_name(), - #[cfg(feature = "sinks-kafka")] - Self::Kafka(config) => config.get_component_name(), - #[cfg(feature = "sinks-mezmo")] - Self::Mezmo(config) => config.get_component_name(), - #[cfg(feature = "sinks-mezmo")] - Self::Logdna(config) => config.get_component_name(), - #[cfg(feature = "sinks-loki")] - Self::Loki(config) => config.get_component_name(), - #[cfg(feature = "sinks-nats")] - Self::Nats(config) => config.get_component_name(), - #[cfg(feature = "sinks-new_relic")] - Self::NewRelic(config) => config.get_component_name(), - #[cfg(feature = "sinks-papertrail")] - Self::Papertrail(config) => config.get_component_name(), - #[cfg(feature = "sinks-prometheus")] - Self::PrometheusExporter(config) => config.get_component_name(), - #[cfg(feature = "sinks-prometheus")] - Self::PrometheusRemoteWrite(config) => config.get_component_name(), - #[cfg(feature = "sinks-pulsar")] - Self::Pulsar(config) => config.get_component_name(), - #[cfg(feature = "sinks-redis")] - Self::Redis(config) => config.get_component_name(), - #[cfg(feature = "sinks-sematext")] - Self::SematextLogs(config) => config.get_component_name(), - #[cfg(feature = "sinks-sematext")] - Self::SematextMetrics(config) => config.get_component_name(), - #[cfg(feature = "sinks-socket")] - Self::Socket(config) => config.get_component_name(), - #[cfg(feature = "sinks-splunk_hec")] - Self::SplunkHecLogs(config) => config.get_component_name(), - #[cfg(feature = "sinks-splunk_hec")] - Self::SplunkHecMetrics(config) => config.get_component_name(), - #[cfg(feature = "sinks-statsd")] - Self::Statsd(config) => config.get_component_name(), - #[cfg(all(test, feature = "sources-demo_logs"))] - Self::TestArc(config) => config.get_component_name(), - #[cfg(test)] - Self::TestBackpressure(config) => config.get_component_name(), - #[cfg(test)] - Self::TestBasic(config) => config.get_component_name(), - #[cfg(test)] - Self::TestError(config) => config.get_component_name(), - #[cfg(test)] - Self::TestOneshot(config) => config.get_component_name(), - #[cfg(test)] - Self::TestPanic(config) => config.get_component_name(), - Self::UnitTest(config) => config.get_component_name(), - Self::UnitTestStream(config) => config.get_component_name(), - #[cfg(feature = "sinks-vector")] - Self::Vector(config) => config.get_component_name(), - #[cfg(feature = "sinks-websocket")] - Self::Websocket(config) => config.get_component_name(), - } - } -} diff --git a/src/sinks/nats.rs b/src/sinks/nats.rs index 37295fa96e100..e80f28245a631 100644 --- a/src/sinks/nats.rs +++ b/src/sinks/nats.rs @@ -41,7 +41,10 @@ enum BuildError { */ /// Configuration for the `nats` sink. -#[configurable_component(sink("nats"))] +#[configurable_component(sink( + "nats", + "Publish observability data to subjects on the NATS messaging system." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct NatsSinkConfig { @@ -114,6 +117,7 @@ impl GenerateConfig for NatsSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "nats")] impl SinkConfig for NatsSinkConfig { async fn build( &self, diff --git a/src/sinks/new_relic/config.rs b/src/sinks/new_relic/config.rs index 9399c80383b09..5900755b12207 100644 --- a/src/sinks/new_relic/config.rs +++ b/src/sinks/new_relic/config.rs @@ -75,7 +75,7 @@ impl RetryLogic for NewRelicApiRetry { } /// Configuration for the `new_relic` sink. -#[configurable_component(sink("new_relic"))] +#[configurable_component(sink("new_relic", "Deliver events to New Relic."))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct NewRelicConfig { @@ -139,6 +139,7 @@ impl NewRelicConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "new_relic")] impl SinkConfig for NewRelicConfig { async fn build( &self, diff --git a/src/sinks/papertrail.rs b/src/sinks/papertrail.rs index 7dbd82459e422..f3d3afb55ace0 100644 --- a/src/sinks/papertrail.rs +++ b/src/sinks/papertrail.rs @@ -16,7 +16,7 @@ use crate::{ }; /// Configuration for the `papertrail` sink. -#[configurable_component(sink("papertrail"))] +#[configurable_component(sink("papertrail", "Deliver log events to Papertrail from SolarWinds."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct PapertrailConfig { @@ -65,6 +65,7 @@ impl GenerateConfig for PapertrailConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "papertrail")] impl SinkConfig for PapertrailConfig { async fn build( &self, diff --git a/src/sinks/prometheus/exporter.rs b/src/sinks/prometheus/exporter.rs index 98bc9a504c014..bb7e6c775b629 100644 --- a/src/sinks/prometheus/exporter.rs +++ b/src/sinks/prometheus/exporter.rs @@ -61,7 +61,10 @@ enum BuildError { /// Configuration for the `prometheus_exporter` sink. #[serde_as] -#[configurable_component(sink("prometheus_exporter"))] +#[configurable_component(sink( + "prometheus_exporter", + "Expose metric events on a Prometheus compatible endpoint." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct PrometheusExporterConfig { @@ -189,6 +192,7 @@ impl GenerateConfig for PrometheusExporterConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "prometheus_exporter")] impl SinkConfig for PrometheusExporterConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { if self.flush_period_secs.as_secs() < MIN_FLUSH_PERIOD_SECS { diff --git a/src/sinks/prometheus/remote_write.rs b/src/sinks/prometheus/remote_write.rs index 6451e591f11e0..28c043080bc0a 100644 --- a/src/sinks/prometheus/remote_write.rs +++ b/src/sinks/prometheus/remote_write.rs @@ -53,7 +53,10 @@ enum Errors { } /// Configuration for the `prometheus_remote_write` sink. -#[configurable_component(sink("prometheus_remote_write"))] +#[configurable_component(sink( + "prometheus_remote_write", + "Deliver metric data to a Prometheus remote write endpoint." +))] #[derive(Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct RemoteWriteConfig { @@ -159,6 +162,7 @@ const fn convert_compression_to_content_encoding(compression: Compression) -> &' } #[async_trait::async_trait] +#[typetag::serde(name = "prometheus_remote_write")] impl SinkConfig for RemoteWriteConfig { async fn build( &self, diff --git a/src/sinks/pulsar/config.rs b/src/sinks/pulsar/config.rs index 7ec5ef601f32e..ab53a350f32b5 100644 --- a/src/sinks/pulsar/config.rs +++ b/src/sinks/pulsar/config.rs @@ -22,7 +22,7 @@ use vector_core::config::DataType; use vrl::value::Kind; /// Configuration for the `pulsar` sink. -#[configurable_component(sink("pulsar"))] +#[configurable_component(sink("pulsar", "Publish observability events to Apache Pulsar topics."))] #[derive(Clone, Debug)] pub struct PulsarSinkConfig { /// The endpoint to which the Pulsar client should connect to. @@ -281,6 +281,7 @@ impl GenerateConfig for PulsarSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "pulsar")] impl SinkConfig for PulsarSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let client = self diff --git a/src/sinks/redis.rs b/src/sinks/redis.rs index 5a53831bebd3c..402ad28aee906 100644 --- a/src/sinks/redis.rs +++ b/src/sinks/redis.rs @@ -109,7 +109,7 @@ impl SinkBatchSettings for RedisDefaultBatchSettings { } /// Configuration for the `redis` sink. -#[configurable_component(sink("redis"))] +#[configurable_component(sink("redis", "Publish observability data to Redis."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct RedisSinkConfig { @@ -171,6 +171,7 @@ impl GenerateConfig for RedisSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "redis")] impl SinkConfig for RedisSinkConfig { async fn build( &self, diff --git a/src/sinks/sematext/logs.rs b/src/sinks/sematext/logs.rs index a5dcb0da9434c..e4a3d05b00452 100644 --- a/src/sinks/sematext/logs.rs +++ b/src/sinks/sematext/logs.rs @@ -21,7 +21,7 @@ use crate::{ }; /// Configuration for the `sematext_logs` sink. -#[configurable_component(sink("sematext_logs"))] +#[configurable_component(sink("sematext_logs", "Publish log events to Sematext."))] #[derive(Clone, Debug)] pub struct SematextLogsConfig { #[serde(default = "super::default_region")] @@ -79,6 +79,7 @@ const US_ENDPOINT: &str = "https://logsene-receiver.sematext.com"; const EU_ENDPOINT: &str = "https://logsene-receiver.eu.sematext.com"; #[async_trait::async_trait] +#[typetag::serde(name = "sematext_logs")] impl SinkConfig for SematextLogsConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let endpoint = match (&self.endpoint, &self.region) { diff --git a/src/sinks/sematext/metrics.rs b/src/sinks/sematext/metrics.rs index 29a51c9309638..3e09630d556f6 100644 --- a/src/sinks/sematext/metrics.rs +++ b/src/sinks/sematext/metrics.rs @@ -47,7 +47,7 @@ impl SinkBatchSettings for SematextMetricsDefaultBatchSettings { } /// Configuration for the `sematext_metrics` sink. -#[configurable_component(sink("sematext_metrics"))] +#[configurable_component(sink("sematext_metrics", "Publish metric events to Sematext."))] #[derive(Clone, Debug)] pub struct SematextMetricsConfig { /// Sets the default namespace for any metrics sent. @@ -121,6 +121,7 @@ const US_ENDPOINT: &str = "https://spm-receiver.sematext.com"; const EU_ENDPOINT: &str = "https://spm-receiver.eu.sematext.com"; #[async_trait::async_trait] +#[typetag::serde(name = "sematext_metrics")] impl SinkConfig for SematextMetricsConfig { async fn build(&self, cx: SinkContext) -> Result<(VectorSink, Healthcheck)> { let client = HttpClient::new(None, cx.proxy())?; diff --git a/src/sinks/socket.rs b/src/sinks/socket.rs index 1919ba4b2fa9f..f22339497b47e 100644 --- a/src/sinks/socket.rs +++ b/src/sinks/socket.rs @@ -13,7 +13,7 @@ use crate::{ }; /// Configuration for the `socket` sink. -#[configurable_component(sink("socket"))] +#[configurable_component(sink("socket", "Deliver logs to a remote socket endpoint."))] #[derive(Clone, Debug)] pub struct SocketSinkConfig { #[serde(flatten)] @@ -113,6 +113,7 @@ impl SocketSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "socket")] impl SinkConfig for SocketSinkConfig { async fn build( &self, diff --git a/src/sinks/splunk_hec/logs/config.rs b/src/sinks/splunk_hec/logs/config.rs index 1e0c6675db193..64299669e388b 100644 --- a/src/sinks/splunk_hec/logs/config.rs +++ b/src/sinks/splunk_hec/logs/config.rs @@ -31,7 +31,10 @@ use crate::{ }; /// Configuration for the `splunk_hec_logs` sink. -#[configurable_component(sink("splunk_hec_logs"))] +#[configurable_component(sink( + "splunk_hec_logs", + "Deliver log data to Splunk's HTTP Event Collector." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct HecLogsSinkConfig { @@ -183,6 +186,7 @@ impl GenerateConfig for HecLogsSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "splunk_hec_logs")] impl SinkConfig for HecLogsSinkConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { if self.auto_extract_timestamp.is_some() && self.endpoint_target == EndpointTarget::Raw { diff --git a/src/sinks/splunk_hec/metrics/config.rs b/src/sinks/splunk_hec/metrics/config.rs index c989beb4977e4..c93519352868e 100644 --- a/src/sinks/splunk_hec/metrics/config.rs +++ b/src/sinks/splunk_hec/metrics/config.rs @@ -27,7 +27,10 @@ use crate::{ }; /// Configuration of the `splunk_hec_metrics` sink. -#[configurable_component(sink("splunk_hec_metrics"))] +#[configurable_component(sink( + "splunk_hec_metrics", + "Deliver metric data to Splunk's HTTP Event Collector." +))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct HecMetricsSinkConfig { @@ -138,6 +141,7 @@ impl GenerateConfig for HecMetricsSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "splunk_hec_metrics")] impl SinkConfig for HecMetricsSinkConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let client = create_client(&self.tls, cx.proxy())?; diff --git a/src/sinks/statsd/config.rs b/src/sinks/statsd/config.rs index 5e1052d59a8bd..340daa247c4dc 100644 --- a/src/sinks/statsd/config.rs +++ b/src/sinks/statsd/config.rs @@ -35,7 +35,7 @@ impl SinkBatchSettings for StatsdDefaultBatchSettings { } /// Configuration for the `statsd` sink. -#[configurable_component(sink("statsd"))] +#[configurable_component(sink("statsd", "Deliver metric data to a StatsD aggregator."))] #[derive(Clone, Debug)] pub struct StatsdSinkConfig { /// Sets the default namespace for any metrics sent. @@ -121,6 +121,7 @@ impl GenerateConfig for StatsdSinkConfig { } #[async_trait] +#[typetag::serde(name = "statsd")] impl SinkConfig for StatsdSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let batcher_settings = self.batch.into_batcher_settings()?; diff --git a/src/sinks/util/adaptive_concurrency/tests.rs b/src/sinks/util/adaptive_concurrency/tests.rs index 69c52bc6ca236..9c2cd0272215a 100644 --- a/src/sinks/util/adaptive_concurrency/tests.rs +++ b/src/sinks/util/adaptive_concurrency/tests.rs @@ -147,7 +147,7 @@ const fn default_concurrency() -> Concurrency { } /// Configuration for the `test_arc` sink. -#[configurable_component(sink("test_arc"))] +#[configurable_component(sink("test_arc", "Test (adaptive concurrency)."))] #[derive(Clone, Debug, Default)] pub struct TestConfig { #[configurable(derived)] @@ -170,6 +170,7 @@ pub struct TestConfig { impl_generate_config_from_default!(TestConfig); #[async_trait::async_trait] +#[typetag::serde(name = "test_arc")] impl SinkConfig for TestConfig { async fn build(&self, _cx: SinkContext) -> Result<(VectorSink, Healthcheck), crate::Error> { let mut batch_settings = BatchSettings::default(); diff --git a/src/sinks/vector/config.rs b/src/sinks/vector/config.rs index 7c2ebfea410c9..74450c117df01 100644 --- a/src/sinks/vector/config.rs +++ b/src/sinks/vector/config.rs @@ -28,7 +28,7 @@ use crate::{ }; /// Configuration for the `vector` sink. -#[configurable_component(sink("vector"))] +#[configurable_component(sink("vector", "Relay observability data to a Vector instance."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct VectorConfig { @@ -105,6 +105,7 @@ fn default_config(address: &str) -> VectorConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "vector")] impl SinkConfig for VectorConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSinkType, Healthcheck)> { let tls = MaybeTlsSettings::from_config(&self.tls, false)?; diff --git a/src/sinks/webhdfs/config.rs b/src/sinks/webhdfs/config.rs index 86518a8692f64..05e4df4cbf901 100644 --- a/src/sinks/webhdfs/config.rs +++ b/src/sinks/webhdfs/config.rs @@ -21,7 +21,7 @@ use crate::{ }; /// Configuration for the `webhdfs` sink. -#[configurable_component(sink("webhdfs"))] +#[configurable_component(sink("webhdfs", "WebHDFS."))] #[derive(Clone, Debug)] #[serde(deny_unknown_fields)] pub struct WebHdfsConfig { @@ -97,6 +97,7 @@ impl GenerateConfig for WebHdfsConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "webhdfs")] impl SinkConfig for WebHdfsConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let op = self.build_operator()?; diff --git a/src/sinks/websocket/config.rs b/src/sinks/websocket/config.rs index 64f6087079902..6027273ee4275 100644 --- a/src/sinks/websocket/config.rs +++ b/src/sinks/websocket/config.rs @@ -16,7 +16,10 @@ use crate::{ }; /// Configuration for the `websocket` sink. -#[configurable_component(sink("websocket"))] +#[configurable_component(sink( + "websocket", + "Deliver observability event data to a websocket listener." +))] #[derive(Clone, Debug)] pub struct WebSocketSinkConfig { /// The WebSocket URI to connect to. @@ -78,6 +81,7 @@ impl GenerateConfig for WebSocketSinkConfig { } #[async_trait::async_trait] +#[typetag::serde(name = "websocket")] impl SinkConfig for WebSocketSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let connector = self.build_connector()?; diff --git a/src/test_util/mock/sinks/backpressure.rs b/src/test_util/mock/sinks/backpressure.rs index f84b5c6f3f07c..1c3974a71e168 100644 --- a/src/test_util/mock/sinks/backpressure.rs +++ b/src/test_util/mock/sinks/backpressure.rs @@ -23,7 +23,7 @@ impl StreamSink for BackpressureSink { } /// Configuration for the `test_backpressure` sink. -#[configurable_component(sink("test_backpressure"))] +#[configurable_component(sink("test_backpressure", "Test (backpressure)."))] #[derive(Clone, Debug, Default)] pub struct BackpressureSinkConfig { /// Number of events to consume before stopping. @@ -33,6 +33,7 @@ pub struct BackpressureSinkConfig { impl_generate_config_from_default!(BackpressureSinkConfig); #[async_trait] +#[typetag::serde(name = "test_backpressure")] impl SinkConfig for BackpressureSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { let sink = BackpressureSink { diff --git a/src/test_util/mock/sinks/basic.rs b/src/test_util/mock/sinks/basic.rs index 0182b56d77bf0..a7463aace4f14 100644 --- a/src/test_util/mock/sinks/basic.rs +++ b/src/test_util/mock/sinks/basic.rs @@ -17,7 +17,7 @@ use crate::{ }; /// Configuration for the `test_basic` sink. -#[configurable_component(sink("test_basic"))] +#[configurable_component(sink("test_basic", "Test (basic)."))] #[derive(Clone, Debug, Default)] pub struct BasicSinkConfig { #[serde(skip)] @@ -65,6 +65,7 @@ enum HealthcheckError { } #[async_trait] +#[typetag::serde(name = "test_basic")] impl SinkConfig for BasicSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { // If this sink is set to not be healthy, just send the healthcheck error immediately over diff --git a/src/test_util/mock/sinks/error.rs b/src/test_util/mock/sinks/error.rs index b1a662fa6694f..bb6c7d6a9e323 100644 --- a/src/test_util/mock/sinks/error.rs +++ b/src/test_util/mock/sinks/error.rs @@ -18,7 +18,7 @@ use crate::{ }; /// Configuration for the `test_error` sink. -#[configurable_component(sink("test_error"))] +#[configurable_component(sink("test_error", "Test (error)."))] #[derive(Clone, Debug, Default)] pub struct ErrorSinkConfig { /// Dummy field used for generating unique configurations to trigger reloads. @@ -28,6 +28,7 @@ pub struct ErrorSinkConfig { impl_generate_config_from_default!(ErrorSinkConfig); #[async_trait] +#[typetag::serde(name = "test_error")] impl SinkConfig for ErrorSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { #[allow(deprecated)] diff --git a/src/test_util/mock/sinks/oneshot.rs b/src/test_util/mock/sinks/oneshot.rs index 084efdb107e83..3ebc5a27e4b9e 100644 --- a/src/test_util/mock/sinks/oneshot.rs +++ b/src/test_util/mock/sinks/oneshot.rs @@ -16,7 +16,7 @@ use crate::{ }; /// Configurable for the `test_oneshot` sink. -#[configurable_component(sink("test_oneshot"))] +#[configurable_component(sink("test_oneshot", "Test (oneshot)."))] #[derive(Clone, Debug, Default)] pub struct OneshotSinkConfig { #[serde(skip)] @@ -34,6 +34,7 @@ impl OneshotSinkConfig { } #[async_trait] +#[typetag::serde(name = "test_oneshot")] impl SinkConfig for OneshotSinkConfig { fn input(&self) -> Input { Input::all() diff --git a/src/test_util/mock/sinks/panic.rs b/src/test_util/mock/sinks/panic.rs index 34079fc30f4ec..22f56157679ed 100644 --- a/src/test_util/mock/sinks/panic.rs +++ b/src/test_util/mock/sinks/panic.rs @@ -18,7 +18,7 @@ use crate::{ }; /// Configuration for the `test_panic` sink. -#[configurable_component(sink("test_panic"))] +#[configurable_component(sink("test_panic", "Test (panic)."))] #[derive(Clone, Debug, Default)] pub struct PanicSinkConfig { /// Dummy field used for generating unique configurations to trigger reloads. @@ -28,6 +28,7 @@ pub struct PanicSinkConfig { impl_generate_config_from_default!(PanicSinkConfig); #[async_trait] +#[typetag::serde(name = "test_panic")] impl SinkConfig for PanicSinkConfig { async fn build(&self, _cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { #[allow(deprecated)] diff --git a/src/topology/builder.rs b/src/topology/builder.rs index a02dddd13f3d3..ba0d2dda8a761 100644 --- a/src/topology/builder.rs +++ b/src/topology/builder.rs @@ -19,7 +19,6 @@ use tracing::Instrument; use vector_common::internal_event::{ self, CountByteSize, EventsSent, InternalEventHandle as _, Registered, }; -use vector_config::NamedComponent; use vector_core::config::LogNamespace; use vector_core::{ buffers::{ @@ -42,7 +41,7 @@ use super::{ use crate::{ config::{ ComponentKey, DataType, EnrichmentTableConfig, Input, Inputs, OutputId, ProxyConfig, - SinkConfig, SinkContext, SourceContext, TransformContext, TransformOuter, TransformOutput, + SinkContext, SourceContext, TransformContext, TransformOuter, TransformOutput, }, event::{EventArray, EventContainer}, internal_events::EventsReceived, diff --git a/src/topology/schema.rs b/src/topology/schema.rs index b0256a22178b0..bd0974a5adeaf 100644 --- a/src/topology/schema.rs +++ b/src/topology/schema.rs @@ -6,7 +6,7 @@ use vector_core::config::SourceOutput; pub(super) use crate::schema::Definition; use crate::{ - config::{ComponentKey, Config, OutputId, SinkConfig, SinkOuter, TransformOutput}, + config::{ComponentKey, Config, OutputId, SinkOuter, TransformOutput}, topology, }; diff --git a/website/cue/reference/components/sinks/base/aws_s3.cue b/website/cue/reference/components/sinks/base/aws_s3.cue index a7fa8c1eb2216..11265897c80fc 100644 --- a/website/cue/reference/components/sinks/base/aws_s3.cue +++ b/website/cue/reference/components/sinks/base/aws_s3.cue @@ -727,8 +727,12 @@ base: components: sinks: aws_s3: configuration: { } } server_side_encryption: { - description: "The Server-side Encryption algorithm used when storing these objects." - required: false + description: """ + AWS S3 Server-Side Encryption algorithms. + + The Server-side Encryption algorithm used when storing these objects. + """ + required: false type: string: enum: { AES256: """ Each object is encrypted with AES-256 using a unique key. From 6705bdde058b1a532eda9398c9610dff46bb783b Mon Sep 17 00:00:00 2001 From: Sergey Yedrikov <48031344+syedriko@users.noreply.github.com> Date: Tue, 20 Jun 2023 15:30:41 -0400 Subject: [PATCH 155/236] fix(auth): Vector does not put the Proxy-Authorization header on the wire (#17353) (#17363) fix(auth): Vector does not put the Proxy-Authorization header on the wire (#17353) closes: #17353 --- lib/vector-core/src/config/proxy.rs | 29 +++++++++++++++-------------- src/http.rs | 27 +++++++++++++++++++++++---- 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/lib/vector-core/src/config/proxy.rs b/lib/vector-core/src/config/proxy.rs index 4f107db571960..afc4d58a56f21 100644 --- a/lib/vector-core/src/config/proxy.rs +++ b/lib/vector-core/src/config/proxy.rs @@ -201,7 +201,12 @@ impl ProxyConfig { mod tests { use base64::prelude::{Engine as _, BASE64_STANDARD}; use env_test_util::TempEnvVar; - use http::{HeaderValue, Uri}; + use http::{ + header::{AUTHORIZATION, PROXY_AUTHORIZATION}, + HeaderName, HeaderValue, Uri, + }; + + const PROXY_HEADERS: [HeaderName; 2] = [AUTHORIZATION, PROXY_AUTHORIZATION]; use super::*; @@ -341,20 +346,18 @@ mod tests { Some(first.uri()), Uri::try_from("http://user:pass@1.2.3.4:5678").as_ref().ok() ); - assert_eq!( - first.headers().get("authorization"), - expected_header_value.as_ref().ok() - ); + for h in &PROXY_HEADERS { + assert_eq!(first.headers().get(h), expected_header_value.as_ref().ok()); + } assert_eq!( Some(second.uri()), Uri::try_from("https://user:pass@2.3.4.5:9876") .as_ref() .ok() ); - assert_eq!( - second.headers().get("authorization"), - expected_header_value.as_ref().ok() - ); + for h in &PROXY_HEADERS { + assert_eq!(second.headers().get(h), expected_header_value.as_ref().ok()); + } } #[ignore] @@ -371,10 +374,8 @@ mod tests { .expect("should not be None"); let encoded_header = format!("Basic {}", BASE64_STANDARD.encode("user:P@ssw0rd")); let expected_header_value = HeaderValue::from_str(encoded_header.as_str()); - - assert_eq!( - first.headers().get("authorization"), - expected_header_value.as_ref().ok() - ); + for h in &PROXY_HEADERS { + assert_eq!(first.headers().get(h), expected_header_value.as_ref().ok()); + } } } diff --git a/src/http.rs b/src/http.rs index 71777df3c296c..7eee5646f0ae3 100644 --- a/src/http.rs +++ b/src/http.rs @@ -53,10 +53,12 @@ impl HttpError { } pub type HttpClientFuture = >>::Future; +type HttpProxyConnector = ProxyConnector>; pub struct HttpClient { - client: Client>, B>, + client: Client, user_agent: HeaderValue, + proxy_connector: HttpProxyConnector, } impl HttpClient @@ -77,14 +79,18 @@ where proxy_config: &ProxyConfig, client_builder: &mut client::Builder, ) -> Result, HttpError> { - let proxy = build_proxy_connector(tls_settings.into(), proxy_config)?; - let client = client_builder.build(proxy); + let proxy_connector = build_proxy_connector(tls_settings.into(), proxy_config)?; + let client = client_builder.build(proxy_connector.clone()); let version = crate::get_version(); let user_agent = HeaderValue::from_str(&format!("Vector/{}", version)) .expect("Invalid header value for version!"); - Ok(HttpClient { client, user_agent }) + Ok(HttpClient { + client, + user_agent, + proxy_connector, + }) } pub fn send( @@ -95,6 +101,7 @@ where let _enter = span.enter(); default_request_headers(&mut request, &self.user_agent); + self.maybe_add_proxy_headers(&mut request); emit!(http_client::AboutToSendHttpRequest { request: &request }); @@ -135,6 +142,17 @@ where Box::pin(fut) } + + fn maybe_add_proxy_headers(&self, request: &mut Request) { + if let Some(proxy_headers) = self.proxy_connector.http_headers(request.uri()) { + for (k, v) in proxy_headers { + let request_headers = request.headers_mut(); + if !request_headers.contains_key(k) { + request_headers.insert(k, v.into()); + } + } + } + } } pub fn build_proxy_connector( @@ -216,6 +234,7 @@ impl Clone for HttpClient { Self { client: self.client.clone(), user_agent: self.user_agent.clone(), + proxy_connector: self.proxy_connector.clone(), } } } From 12bc4a7d116273cda322fccf41b4e3ea6c333be3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 14:17:53 -0700 Subject: [PATCH 156/236] chore(ci): Bump aws-actions/configure-aws-credentials from 2.1.0 to 2.2.0 (#17697) Bumps [aws-actions/configure-aws-credentials](https://github.com/aws-actions/configure-aws-credentials) from 2.1.0 to 2.2.0.
Release notes

Sourced from aws-actions/configure-aws-credentials's releases.

v2.2.0

See the changelog for details about the changes included in this release.

Changelog

Sourced from aws-actions/configure-aws-credentials's changelog.

2.2.0 (2023-05-31)

Features

  • inline-session-policy prop enables assuming a role with inline session policies (d00f6c6)
  • managed-session-policies prop enables assuming a role with managed policy arns (d00f6c6)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=aws-actions/configure-aws-credentials&package-manager=github_actions&previous-version=2.1.0&new-version=2.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/regression.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index e78de733eb234..d1cea2f64a737 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -359,7 +359,7 @@ jobs: - compute-metadata steps: - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.1.0 + uses: aws-actions/configure-aws-credentials@v2.2.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -391,7 +391,7 @@ jobs: docker load --input baseline-image.tar - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.1.0 + uses: aws-actions/configure-aws-credentials@v2.2.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -429,7 +429,7 @@ jobs: docker load --input comparison-image.tar - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.1.0 + uses: aws-actions/configure-aws-credentials@v2.2.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -475,7 +475,7 @@ jobs: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.1.0 + uses: aws-actions/configure-aws-credentials@v2.2.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -594,7 +594,7 @@ jobs: - uses: actions/checkout@v3 - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.1.0 + uses: aws-actions/configure-aws-credentials@v2.2.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} @@ -685,7 +685,7 @@ jobs: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2.1.0 + uses: aws-actions/configure-aws-credentials@v2.2.0 with: aws-access-key-id: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.SINGLE_MACHINE_PERFORMANCE_BOT_SECRET_ACCESS_KEY }} From dd2527dcea295f4f9f6eb617306a822892e08a59 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Jun 2023 17:33:19 +0000 Subject: [PATCH 157/236] chore(deps): Bump openssl from 0.10.54 to 0.10.55 (#17716) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.54 to 0.10.55.
Release notes

Sourced from openssl's releases.

openssl-v0.10.55

What's Changed

New Contributors

Full Changelog: https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.54...openssl-v0.10.55

Commits
  • d7dae6f Merge pull request #1970 from alex/bump-for-release
  • 983b9e2 Release openssl v0.10.55 and openssl-sys v0.9.89
  • 28b3925 Merge pull request #1966 from tesuji/tidy-old-msrv
  • f03a2dc Merge pull request #1968 from sfackler/empty-domain-segfault
  • 155b3dc Fix handling of empty host strings
  • 9784356 chore: simplify cfg attributes
  • 8ab3c3f update min-version passed to bindgen
  • 8587ff8 chore: use pre-existing clean APIs instead
  • b1e16e9 clippy: use strip_prefix instead of manually strip
  • fb5ae60 clippy: remove unused allow attributes
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=openssl&package-manager=cargo&previous-version=0.10.54&new-version=0.10.55)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b76e9fe96194..132e547f43b48 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5578,9 +5578,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.54" +version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b3f656a17a6cbc115b5c7a40c616947d213ba182135b014d6051b73ab6f019" +checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags", "cfg-if", @@ -5619,9 +5619,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.88" +version = "0.9.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2ce0f250f34a308dcfdbb351f511359857d4ed2134ba715a4eadd46e1ffd617" +checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index ec4c1c1a6cf56..2615e080e739c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -281,7 +281,7 @@ nkeys = { version = "0.3.0", default-features = false, optional = true } nom = { version = "7.1.3", default-features = false, optional = true } notify = { version = "6.0.1", default-features = false, features = ["macos_fsevent"] } once_cell = { version = "1.18", default-features = false } -openssl = { version = "0.10.54", default-features = false, features = ["vendored"] } +openssl = { version = "0.10.55", default-features = false, features = ["vendored"] } openssl-probe = { version = "0.1.5", default-features = false } ordered-float = { version = "3.7.0", default-features = false } paste = "1.0.12" diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 99a53e604b3bc..1013b663ba464 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -31,7 +31,7 @@ mlua = { version = "0.8.9", default-features = false, features = ["lua54", "send no-proxy = { version = "0.3.2", default-features = false, features = ["serialize"] } once_cell = { version = "1.18", default-features = false } ordered-float = { version = "3.7.0", default-features = false } -openssl = { version = "0.10.54", default-features = false, features = ["vendored"] } +openssl = { version = "0.10.55", default-features = false, features = ["vendored"] } parking_lot = { version = "0.12.1", default-features = false } pin-project = { version = "1.1.0", default-features = false } proptest = { version = "1.2", optional = true } From e8e7e0448f51ed9646c484123fd4953442545c86 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Wed, 21 Jun 2023 10:00:00 -0700 Subject: [PATCH 158/236] chore(ci): Retry `make check-component-docs` check (#17718) This check seems to be flakey but we haven't been able to reliably reproduce. Just retry for now. In the future we hope to rewrite this script into Rust. Signed-off-by: Jesse Szwedko --------- Signed-off-by: Jesse Szwedko --- .github/workflows/test.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 76ad3ce2e27db..23497b73cf967 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -129,7 +129,11 @@ jobs: run: make check-markdown - name: Check Component Docs if: needs.changes.outputs.source == 'true' || needs.changes.outputs.component_docs == 'true' - run: make check-component-docs + uses: nick-fields/retry@v2 + with: + max_attempts: 10 + timeout_seconds: 900 + command: make check-component-docs - name: Check Rust Docs if: needs.changes.outputs.source == 'true' run: cd rust-doc && make docs From ddebde97bac79eaecb7feb286bfe5a25591e7d13 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Wed, 21 Jun 2023 13:29:50 -0700 Subject: [PATCH 159/236] chore(deps): Upgrade Ruby version to 3.1.4 (#17722) Seems to remove flakiness from component docs check so I removed the retry. I manually verified the other Ruby scripts still run on Ruby 3. I added a `.ruby-version` file to ensure CI and local Ruby use uses the same version. 3.1.4 should already be installed on the runner image per https://github.com/actions/runner-images/blob/ubuntu20/20230611.1/images/linux/Ubuntu2004-Readme.md. Signed-off-by: Jesse Szwedko --------- Signed-off-by: Jesse Szwedko --- .github/workflows/test.yml | 7 ++----- .ruby-version | 1 + scripts/Gemfile | 2 +- scripts/Gemfile.lock | 4 ++-- scripts/environment/prepare.sh | 4 ---- 5 files changed, 6 insertions(+), 12 deletions(-) create mode 100644 .ruby-version diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 23497b73cf967..0badffbcdcc90 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -89,6 +89,7 @@ jobs: # check-version needs tags fetch-depth: 0 # fetch everything - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh + - uses: ruby/setup-ruby@v1 - run: bash scripts/environment/prepare.sh - uses: actions/cache@v3 name: Cache Cargo registry + index @@ -129,11 +130,7 @@ jobs: run: make check-markdown - name: Check Component Docs if: needs.changes.outputs.source == 'true' || needs.changes.outputs.component_docs == 'true' - uses: nick-fields/retry@v2 - with: - max_attempts: 10 - timeout_seconds: 900 - command: make check-component-docs + run: make check-component-docs - name: Check Rust Docs if: needs.changes.outputs.source == 'true' run: cd rust-doc && make docs diff --git a/.ruby-version b/.ruby-version new file mode 100644 index 0000000000000..0aec50e6ede78 --- /dev/null +++ b/.ruby-version @@ -0,0 +1 @@ +3.1.4 diff --git a/scripts/Gemfile b/scripts/Gemfile index 3547dd6311eca..8fb1de071f728 100644 --- a/scripts/Gemfile +++ b/scripts/Gemfile @@ -1,4 +1,4 @@ -ruby '~> 2.7.0' +ruby '~> 3.1.0' # !!! # Please try not to add more dependencies here. diff --git a/scripts/Gemfile.lock b/scripts/Gemfile.lock index bac6a4ecd3493..051a61c1e83da 100644 --- a/scripts/Gemfile.lock +++ b/scripts/Gemfile.lock @@ -30,7 +30,7 @@ DEPENDENCIES toml-rb (~> 2.0) RUBY VERSION - ruby 2.7.1p83 + ruby 3.1.4p223 BUNDLED WITH - 2.1.4 + 2.4.14 diff --git a/scripts/environment/prepare.sh b/scripts/environment/prepare.sh index d00924206114c..73d51c951b1b4 100755 --- a/scripts/environment/prepare.sh +++ b/scripts/environment/prepare.sh @@ -21,10 +21,6 @@ if ! rust-license-tool --help >& /dev/null ; then cargo install --git https://github.com/DataDog/rust-license-tool fi -cd scripts -bundle install -cd .. - # Currently fixing this to version 0.30 since version 0.31 has introduced # a change that means it only works with versions of node > 10. # https://github.com/igorshubovych/markdownlint-cli/issues/258 From bc6925592f8d954212efb99f2f17bcac8a454169 Mon Sep 17 00:00:00 2001 From: neuronull Date: Wed, 21 Jun 2023 16:41:01 -0600 Subject: [PATCH 160/236] enhancement(ci): reduce billable time of Test Suite (#17714) - save time by running unit tests and vrl test not in separate jobs --- .github/workflows/test.yml | 87 ++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 51 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0badffbcdcc90..76d8dc2e27f47 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -33,50 +33,6 @@ jobs: base_ref: ${{ github.event.merge_group.base_ref || github.event.pull_request.base.ref }} head_ref: ${{ github.event.merge_group.head_ref || github.event.pull_request.head.ref }} - # Remove this once https://github.com/vectordotdev/vector/issues/3771 is closed. - # Then, modify the `cross-linux` job to run `test` instead of `build`. - test-linux: - name: Unit - x86_64-unknown-linux-gnu - runs-on: [linux, ubuntu-20.04-8core] - needs: changes - env: - CARGO_INCREMENTAL: 0 - if: ${{ needs.changes.outputs.source == 'true' }} - steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 - name: Cache Cargo registry + index - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - restore-keys: | - ${{ runner.os }}-cargo- - - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - - run: bash scripts/environment/prepare.sh - - run: echo "::add-matcher::.github/matchers/rust.json" - - run: make test - env: - CARGO_BUILD_JOBS: 5 - - name: Upload test results - run: scripts/upload-test-results.sh - if: always() - - test-vrl: - name: VRL - Linux - continue-on-error: true - runs-on: [linux, ubuntu-20.04-8core] - needs: changes - if: ${{ needs.changes.outputs.source == 'true' || needs.changes.outputs.cue == 'true' }} - steps: - - uses: actions/checkout@v3 - - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - - run: bash scripts/environment/prepare.sh - - run: cargo vdev test-vrl - checks: name: Checks runs-on: [linux, ubuntu-20.04-8core] @@ -88,9 +44,7 @@ jobs: with: # check-version needs tags fetch-depth: 0 # fetch everything - - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - - uses: ruby/setup-ruby@v1 - - run: bash scripts/environment/prepare.sh + - uses: actions/cache@v3 name: Cache Cargo registry + index with: @@ -102,38 +56,72 @@ jobs: key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ runner.os }}-cargo- + + - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh + + - uses: ruby/setup-ruby@v1 + + - run: bash scripts/environment/prepare.sh + - name: Enable Rust matcher run: echo "::add-matcher::.github/matchers/rust.json" + - name: Check code format run: make check-fmt + - name: Check clippy if: needs.changes.outputs.source == 'true' run: make check-clippy + + # Remove this once https://github.com/vectordotdev/vector/issues/3771 is closed. + # Then, modify the `cross-linux` job to run `test` instead of `build`. + - name: Unit - x86_64-unknown-linux-gnu + run: make test + env: + CARGO_BUILD_JOBS: 5 + + - name: Upload test results + run: scripts/upload-test-results.sh + if: always() + - name: Check version run: make check-version + - name: Check scripts run: make check-scripts + - name: Check events if: needs.changes.outputs.source == 'true' run: make check-events + - name: Check cargo deny advisories/licenses if: needs.changes.outputs.dependencies == 'true' || needs.changes.outputs.deny == 'true' run: make check-deny + - name: Check that the 3rd-party license file is up to date if: needs.changes.outputs.dependencies == 'true' run: make check-licenses + - name: Check Cue docs if: needs.changes.outputs.cue == 'true' run: make check-docs + - name: Check Markdown if: needs.changes.outputs.markdown == 'true' run: make check-markdown + - name: Check Component Docs if: needs.changes.outputs.source == 'true' || needs.changes.outputs.component_docs == 'true' run: make check-component-docs + - name: Check Rust Docs if: needs.changes.outputs.source == 'true' run: cd rust-doc && make docs + + - name: VRL - Linux + if: needs.changes.outputs.source == 'true' || needs.changes.outputs.cue == 'true' + run: cargo vdev test-vrl + - uses: actions/upload-artifact@v3 with: name: "config-schema.json" @@ -145,10 +133,7 @@ jobs: name: Test Suite runs-on: ubuntu-20.04 if: always() - needs: - - checks - - test-vrl - - test-linux + needs: checks env: FAILED: ${{ contains(needs.*.result, 'failure') }} steps: From 25131efdbe855a8f4d2491bd68fb76c58f7f8ad4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Jun 2023 09:54:09 +0100 Subject: [PATCH 161/236] chore(deps): Bump serde_json from 1.0.96 to 1.0.97 (#17701) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.96 to 1.0.97.
Release notes

Sourced from serde_json's releases.

v1.0.97

  • Add io_error_kind() method to serde_json::Error: fn io_error_kind(&self) -> Option<std::io::ErrorKind> (#1026)
Commits
  • a0ddb25 Release 1.0.97
  • 8b681ff Merge pull request #1026 from dtolnay/errorkind
  • 9308d97 Add Error::io_error_kind
  • 136b773 Merge pull request #1025 from dtolnay/io
  • 207a57b Standardize on "I/O" instead of "IO"
  • 6fda385 Merge pull request 1021 from ndmitchell/patch-2
  • 009a53b Switch to using null
  • 931ee23 Show error details during miri setup in CI
  • 0d7b0d3 Add an example of producing a Null in a json! literal
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=serde_json&package-manager=cargo&previous-version=1.0.96&new-version=1.0.97)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- lib/vector-api-client/Cargo.toml | 2 +- lib/vector-common/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- vdev/Cargo.toml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 132e547f43b48..66715e82f0c93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7341,9 +7341,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ "indexmap", "itoa", diff --git a/Cargo.toml b/Cargo.toml index 2615e080e739c..7ed63a800e2a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -190,7 +190,7 @@ tower-http = { version = "0.4.0", default-features = false, features = ["decompr serde = { version = "1.0.164", default-features = false, features = ["derive"] } serde-toml-merge = { version = "0.3.0", default-features = false } serde_bytes = { version = "0.11.9", default-features = false, features = ["std"], optional = true } -serde_json = { version = "1.0.96", default-features = false, features = ["raw_value"] } +serde_json = { version = "1.0.97", default-features = false, features = ["raw_value"] } serde_with = { version = "2.3.2", default-features = false, features = ["macros", "std"] } serde_yaml = { version = "0.9.21", default-features = false } diff --git a/lib/vector-api-client/Cargo.toml b/lib/vector-api-client/Cargo.toml index 8703e66edf1db..06c39deff3810 100644 --- a/lib/vector-api-client/Cargo.toml +++ b/lib/vector-api-client/Cargo.toml @@ -10,7 +10,7 @@ license = "MPL-2.0" # Serde serde = { version = "1.0.164", default-features = false, features = ["derive"] } -serde_json = { version = "1.0.96", default-features = false, features = ["raw_value"] } +serde_json = { version = "1.0.97", default-features = false, features = ["raw_value"] } # Error handling anyhow = { version = "1.0.71", default-features = false, features = ["std"] } diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 19aaa92d33137..1fde826696bad 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -55,7 +55,7 @@ ordered-float = { version = "3.7.0", default-features = false } paste = "1.0.12" pin-project = { version = "1.1.0", default-features = false } ryu = { version = "1", default-features = false } -serde_json = { version = "1.0.96", default-features = false, features = ["std", "raw_value"] } +serde_json = { version = "1.0.97", default-features = false, features = ["std", "raw_value"] } serde = { version = "1.0.164", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false } snafu = { version = "0.7", optional = true } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 1013b663ba464..211442af80960 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -41,7 +41,7 @@ quanta = { version = "0.11.1", default-features = false } regex = { version = "1.8.4", default-features = false, features = ["std", "perf"] } ryu = { version = "1", default-features = false } serde = { version = "1.0.164", default-features = false, features = ["derive", "rc"] } -serde_json = { version = "1.0.96", default-features = false } +serde_json = { version = "1.0.97", default-features = false } serde_with = { version = "2.3.2", default-features = false, features = ["std", "macros"] } smallvec = { version = "1", default-features = false, features = ["serde", "const_generics"] } snafu = { version = "0.7.4", default-features = false } diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index bf59e597785bc..c7f306b0e3783 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -33,7 +33,7 @@ paste = "1.0.12" regex = { version = "1.8.4", default-features = false, features = ["std", "perf"] } reqwest = { version = "0.11", features = ["json", "blocking"] } serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0.96" +serde_json = "1.0.97" serde_yaml = "0.9.21" sha2 = "0.10.7" tempfile = "3.6.0" From e5e6b9635cf3fd13676d845f184ef3a04167ceef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Jun 2023 09:54:27 +0100 Subject: [PATCH 162/236] chore(deps): Bump tower-http from 0.4.0 to 0.4.1 (#17711) Bumps [tower-http](https://github.com/tower-rs/tower-http) from 0.4.0 to 0.4.1.
Release notes

Sourced from tower-http's releases.

v0.4.1

Added

  • request_id: Derive Default for MakeRequestUuid (#335)
  • fs: Derive Default for ServeFileSystemResponseBody (#336)
  • compression: Expose compression quality on the CompressionLayer (#333)

Fixed

  • compression: Improve parsing of Accept-Encoding request header (#220)
  • normalize_path: Fix path normalization of index route (#347)
  • decompression: Enable multiple_members for GzipDecoder (#354)

#347: tower-rs/tower-http#347 #333: tower-rs/tower-http#333 #220: tower-rs/tower-http#220 #335: tower-rs/tower-http#335 #336: tower-rs/tower-http#336 #354: tower-rs/tower-http#354

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tower-http&package-manager=cargo&previous-version=0.4.0&new-version=0.4.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 68 +++++++++++++++++++++++++++++------------------------- Cargo.toml | 2 +- 2 files changed, 38 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66715e82f0c93..355627bef094b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1205,7 +1205,7 @@ checksum = "f8175979259124331c1d7bf6586ee7e0da434155e4b2d48ec2c8386281d8df39" dependencies = [ "async-trait", "axum-core", - "bitflags", + "bitflags 1.3.2", "bytes 1.4.0", "futures-util", "http", @@ -1434,6 +1434,12 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbe3c979c178231552ecba20214a8272df4e09f232a87aef4320cf06539aded" + [[package]] name = "bitmask-enum" version = "2.1.0" @@ -1915,7 +1921,7 @@ checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "ansi_term", "atty", - "bitflags", + "bitflags 1.3.2", "strsim 0.8.0", "textwrap", "unicode-width", @@ -1949,7 +1955,7 @@ version = "4.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "351f9ad9688141ed83dfd8f5fb998a06225ef444b48ff4dc43de6d409b7fd10b" dependencies = [ - "bitflags", + "bitflags 1.3.2", "clap_lex", "is-terminal", "strsim 0.10.0", @@ -2349,7 +2355,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64e6c0fbe2c17357405f7c758c1ef960fce08bdfb2c03d88d2a18d7e09c4b67" dependencies = [ - "bitflags", + "bitflags 1.3.2", "crossterm_winapi", "libc", "mio", @@ -2365,7 +2371,7 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a84cda67535339806297f1b331d6dd6320470d2a0fe65381e79ee9e156dd3d13" dependencies = [ - "bitflags", + "bitflags 1.3.2", "crossterm_winapi", "futures-core", "libc", @@ -3635,7 +3641,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "bytes 1.4.0", "headers-core", "http", @@ -3723,7 +3729,7 @@ name = "heim-disk" version = "0.1.0-rc.1" source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#76fa765c7ed7fbe43d1465bf52da6b8d19f2d2a9" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "core-foundation", "heim-common", @@ -3770,7 +3776,7 @@ name = "heim-net" version = "0.1.0-rc.1" source = "git+https://github.com/vectordotdev/heim.git?branch=update-nix#76fa765c7ed7fbe43d1465bf52da6b8d19f2d2a9" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "heim-common", "heim-runtime", @@ -4125,7 +4131,7 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff" dependencies = [ - "bitflags", + "bitflags 1.3.2", "inotify-sys", "libc", ] @@ -4406,7 +4412,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8367585489f01bc55dd27404dcf56b95e6da061a256a666ab23be9ba96a2e587" dependencies = [ - "bitflags", + "bitflags 1.3.2", "libc", ] @@ -4936,9 +4942,9 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -5007,7 +5013,7 @@ checksum = "ebe15399de63ad4294c80069967736cbb87ebe467a8cd0629df9cab88a6fbde6" dependencies = [ "async-trait", "base64 0.13.1", - "bitflags", + "bitflags 1.3.2", "bson", "chrono", "derivative", @@ -5179,7 +5185,7 @@ version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cc", "cfg-if", "libc", @@ -5191,7 +5197,7 @@ name = "nix" version = "0.26.2" source = "git+https://github.com/vectordotdev/nix.git?branch=memfd/gnu/musl#6c53a918d2d5bf4307fd60a19d9e10913ae71eeb" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", "memoffset 0.7.1", @@ -5275,7 +5281,7 @@ version = "6.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5738a2795d57ea20abec2d6d76c6081186709c0024187cd5977265eda6598b51" dependencies = [ - "bitflags", + "bitflags 1.3.2", "filetime", "fsevent-sys", "inotify", @@ -5493,7 +5499,7 @@ version = "6.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c4b31c8722ad9171c6d77d3557db078cab2bd50afcc9d09c8b315c59df8ca4f" dependencies = [ - "bitflags", + "bitflags 1.3.2", "libc", "once_cell", "onig_sys", @@ -5582,7 +5588,7 @@ version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "foreign-types", "libc", @@ -6242,7 +6248,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e35c06b98bf36aba164cc17cb25f7e232f5c4aeea73baa14b8a9f0d92dbfa65" dependencies = [ "bit-set", - "bitflags", + "bitflags 1.3.2", "byteorder", "lazy_static", "num-traits", @@ -6564,7 +6570,7 @@ version = "10.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6823ea29436221176fe662da99998ad3b4db2c7f31e7b6f5fe43adccd6320bb" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -6679,7 +6685,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -6688,7 +6694,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -6961,7 +6967,7 @@ version = "0.35.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "727a1a6d65f786ec22df8a81ca3121107f235970dc1705ed681d3e6e8b9cd5f9" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno 0.2.8", "io-lifetimes 0.7.5", "libc", @@ -6975,7 +6981,7 @@ version = "0.37.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" dependencies = [ - "bitflags", + "bitflags 1.3.2", "errno 0.3.1", "io-lifetimes 1.0.11", "libc", @@ -7096,7 +7102,7 @@ version = "11.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfc8644681285d1fb67a467fb3021bfea306b99b4146b166a1fe3ada965eece" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "clipboard-win", "libc", @@ -7216,7 +7222,7 @@ version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -8492,13 +8498,13 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1d42a9b3f3ec46ba828e8d376aec14592ea199f70a06a548587ecd1c4ab658" +checksum = "a8bd22a874a2d0b70452d5597b12c537331d49060824a95f49f108994f94aa4c" dependencies = [ "async-compression 0.3.15", "base64 0.20.0", - "bitflags", + "bitflags 2.3.2", "bytes 1.4.0", "futures-core", "futures-util", @@ -8779,7 +8785,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccdd26cbd674007e649a272da4475fb666d3aa0ad0531da7136db6fab0e5bad1" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cassowary", "crossterm 0.25.0", "unicode-segmentation", @@ -9980,7 +9986,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd9db37ecb5b13762d95468a2fc6009d4b2c62801243223aabd44fca13ad13c8" dependencies = [ - "bitflags", + "bitflags 1.3.2", "widestring 1.0.2", "windows-sys 0.45.0", ] diff --git a/Cargo.toml b/Cargo.toml index 7ed63a800e2a0..13f5d2a33e7f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -185,7 +185,7 @@ opendal = {version = "0.37", default-features = false, features = ["native-tls", # Tower tower = { version = "0.4.13", default-features = false, features = ["buffer", "limit", "retry", "timeout", "util", "balance", "discover"] } -tower-http = { version = "0.4.0", default-features = false, features = ["decompression-gzip"]} +tower-http = { version = "0.4.1", default-features = false, features = ["decompression-gzip"]} # Serde serde = { version = "1.0.164", default-features = false, features = ["derive"] } serde-toml-merge = { version = "0.3.0", default-features = false } From c96e3be34c239e94a366f9ced8e0e8b69570a562 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Jun 2023 08:55:03 +0000 Subject: [PATCH 163/236] chore(deps): Bump mongodb from 2.5.0 to 2.6.0 (#17726) Bumps [mongodb](https://github.com/mongodb/mongo-rust-driver) from 2.5.0 to 2.6.0.
Release notes

Sourced from mongodb's releases.

v2.6.0

The MongoDB Rust driver team is pleased to announce the v2.6.0 release of the mongodb crate, now available for download from crates.io.

Included Changes

Below are a selected list of changes with user impact; for a full list of changes see this GitHub query.

New Features

  • RUST-1421 Implement FromStr for Namespace (#889)
  • RUST-906 Add native support for AWS IAM Roles for service accounts, EKS in particular (#885)
  • RUST-1417 Add support for GCP attached service accounts when using GCP KMS (#877)
  • RUST-1442 On-demand Azure KMS credentials (#872)
  • RUST-1571 Add i64 server ID field to ConnectionInfo (#894)

Improvements

  • RUST-1358 Remove most type constraints on cursor values (#891)

Bugfixes

  • RUST-1645 Fix AWS Lambda detection logic (#876)
  • RUST-1370 Only check specific fields for hello equality (#892)

Tasks

  • RUST-1637 Simplify convenient transaction example (#864)
  • RUST-1605 Update to use libmongocrypt fle2v2 (#863)
  • RUST-1620 Bump maxWireVersion for MongoDB 7.0 (#863)
Commits
  • c33d83a release v2.6.0 (#899)
  • acbe2ba RUST-1571 Update ServerId to be int64 (#894)
  • e71aa72 RUST-1358 Remove most type constraints on cursor values (#891)
  • 6902cc3 minor: remove unused ocsp test configuration (#893)
  • cb24f4a RUST-1370 Only check specific fields for hello equality (#892)
  • 9ef3ada RUST-1421 Implement FromStr for Namespace (#889)
  • d9490a2 minor: skip session tests if mongocryptd fails to spawn (#888)
  • b6344d2 RUST-906 Add native support for AWS IAM Roles for service accounts, EKS in pa...
  • 220a3f7 RUST-1673 Pin Rust version in CI to 1.69 (#887)
  • 3e1c14e RUST-1643 Test against MongoDB 7.0 (#881)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=mongodb&package-manager=cargo&previous-version=2.5.0&new-version=2.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 355627bef094b..b40e12810da99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5007,9 +5007,9 @@ checksum = "6c1a54de846c4006b88b1516731cc1f6026eb5dc4bcb186aa071ef66d40524ec" [[package]] name = "mongodb" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebe15399de63ad4294c80069967736cbb87ebe467a8cd0629df9cab88a6fbde6" +checksum = "ebcd85ec209a5b84fd9f54b9e381f6fa17462bc74160d018fc94fd8b9f61faa8" dependencies = [ "async-trait", "base64 0.13.1", diff --git a/Cargo.toml b/Cargo.toml index 13f5d2a33e7f9..d99eca59ae9e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -275,7 +275,7 @@ logfmt = { version = "0.0.2", default-features = false, optional = true } lru = { version = "0.10.0", default-features = false, optional = true } maxminddb = { version = "0.23.0", default-features = false, optional = true } md-5 = { version = "0.10", default-features = false, optional = true } -mongodb = { version = "2.5.0", default-features = false, features = ["tokio-runtime"], optional = true } +mongodb = { version = "2.6.0", default-features = false, features = ["tokio-runtime"], optional = true } nats = { version = "0.24.0", default-features = false, optional = true } nkeys = { version = "0.3.0", default-features = false, optional = true } nom = { version = "7.1.3", default-features = false, optional = true } From 08099a8b567663416d907600e2f9c678482af272 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Thu, 22 Jun 2023 11:28:52 -0700 Subject: [PATCH 164/236] chore(observability): Have `tower_limit` use configured log level (#17715) I wasn't able to trigger this message on 0.30.0 so it may not even be relevant anymore, but at least use the configured log level. Closes: #1391 Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- src/app.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/app.rs b/src/app.rs index e599499afe3c9..efd87316b19e2 100644 --- a/src/app.rs +++ b/src/app.rs @@ -378,7 +378,7 @@ fn get_log_levels(default: &str) -> String { format!("codec={}", level), format!("vrl={}", level), format!("file_source={}", level), - "tower_limit=trace".to_owned(), + format!("tower_limit={}", level), format!("rdkafka={}", level), format!("buffers={}", level), format!("lapin={}", level), From a08443c890cc0e3223e4d17c71eb267f0305d50c Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Thu, 22 Jun 2023 14:17:06 -0700 Subject: [PATCH 165/236] chore(dev): Add @dsmith3197 to CODEOWNERS (#17729) Signed-off-by: Jesse Szwedko --------- Signed-off-by: Jesse Szwedko --- .github/CODEOWNERS | 36 ++++++++++++++--------------- .github/actions/spelling/expect.txt | 1 + 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ad64e818f18b0..970d00d30ebc1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -21,11 +21,11 @@ src/sinks/aws_kinesis/ @spencergilbert @vectordotdev/integrations-team # sink_aw src/sinks/aws_s3/ @spencergilbert @vectordotdev/integrations-team src/sinks/aws_sqs/ @spencergilbert @vectordotdev/integrations-team src/sinks/axiom.rs @spencergilbert @vectordotdev/integrations-team -src/sinks/azure_blob/ @vectordotdev/integrations-team -src/sinks/azure_monitor_logs.rs @vectordotdev/integrations-team -src/sinks/blackhole/ @vectordotdev/integrations-team -src/sinks/clickhouse/ @vectordotdev/integrations-team -src/sinks/console/ @vectordotdev/integrations-team +src/sinks/azure_blob/ @dsmith3197 @vectordotdev/integrations-team +src/sinks/azure_monitor_logs.rs @dsmith3197 @vectordotdev/integrations-team +src/sinks/blackhole/ @dsmith3197 @vectordotdev/integrations-team +src/sinks/clickhouse/ @dsmith3197 @vectordotdev/integrations-team +src/sinks/console/ @dsmith3197 @vectordotdev/integrations-team src/sinks/databend/ @spencergilbert @vectordotdev/integrations-team src/sinks/datadog_archives.rs @neuronull @vectordotdev/integrations-team src/sinks/datadog_events/ @neuronull @vectordotdev/integrations-team @@ -38,15 +38,15 @@ src/sinks/gcp/ @StephenWakely @vectordotdev/integrations-team # sink_gcp_chronic src/sinks/honeycomb.rs @spencergilbert @vectordotdev/integrations-team src/sinks/http.rs @neuronull @vectordotdev/integrations-team src/sinks/humio/ @StephenWakely @vectordotdev/integrations-team # sink_humio_logs,sink_humio_metrics -src/sinks/influxdb/ @vectordotdev/integrations-team # sink_influxdb_logs,sink_influxdb_metrics -src/sinks/kafka/ @vectordotdev/integrations-team +src/sinks/influxdb/ @dsmith3197 @vectordotdev/integrations-team # sink_influxdb_logs,sink_influxdb_metrics +src/sinks/kafka/ @dsmith3197 @vectordotdev/integrations-team src/sinks/logdna.rs @neuronull @vectordotdev/integrations-team src/sinks/loki/ @spencergilbert @vectordotdev/integrations-team src/sinks/nats.rs @StephenWakely @vectordotdev/integrations-team -src/sinks/new_relic/ @vectordotdev/integrations-team # sink_newrelix,sink_newrelic_logs +src/sinks/new_relic/ @dsmith3197 @vectordotdev/integrations-team # sink_newrelix,sink_newrelic_logs src/sinks/papertrail.rs @StephenWakely @vectordotdev/integrations-team src/sinks/prometheus/ @StephenWakely @vectordotdev/integrations-team # sink_prometheus_exporter,sink_prometheus_remote_write -src/sinks/pulsar.rs @vectordotdev/integrations-team +src/sinks/pulsar.rs @dsmith3197 @vectordotdev/integrations-team src/sinks/redis.rs @StephenWakely @vectordotdev/integrations-team src/sinks/sematext/ @spencergilbert @vectordotdev/integrations-team # sink_sematext_logs,sink_sematext_metrics src/sinks/socket.rs @neuronull @vectordotdev/integrations-team @@ -57,7 +57,7 @@ src/sinks/websocket/ @neuronull @vectordotdev/integrations-team src/source_sender/ @vectordotdev/core-team src/sources/ @vectordotdev/integrations-team src/sources/amqp.rs @StephenWakely @vectordotdev/integrations-team -src/sources/apache_metrics/ @vectordotdev/integrations-team +src/sources/apache_metrics/ @dsmith3197 @vectordotdev/integrations-team src/sources/aws_ecs_metrics/ @spencergilbert @vectordotdev/integrations-team src/sources/aws_kinesis_firehose/ @spencergilbert @vectordotdev/integrations-team src/sources/aws_s3/ @spencergilbert @vectordotdev/integrations-team @@ -66,27 +66,27 @@ src/sources/datadog_agent/ @neuronull @vectordotdev/integrations-team src/sources/demo_logs.rs @StephenWakely @vectordotdev/integrations-team src/sources/dnstap/ @StephenWakely @vectordotdev/integrations-team src/sources/docker_logs/ @spencergilbert @vectordotdev/integrations-team -src/sources/eventstoredb_metrics/ @vectordotdev/integrations-team -src/sources/exec/ @vectordotdev/integrations-team +src/sources/eventstoredb_metrics/ @dsmith3197 @vectordotdev/integrations-team +src/sources/exec/ @dsmith3197 @vectordotdev/integrations-team src/sources/file.rs @spencergilbert @vectordotdev/integrations-team -src/sources/file_descriptors/ @vectordotdev/integrations-team # source_file_descriptor,source_stdin +src/sources/file_descriptors/ @dsmith3197 @vectordotdev/integrations-team # source_file_descriptor,source_stdin src/sources/fluent/ @neuronull @vectordotdev/integrations-team src/sources/gcp_pubsub.rs @StephenWakely @vectordotdev/integrations-team src/sources/heroku_logs.rs @spencergilbert @vectordotdev/integrations-team -src/sources/host_metrics/ @vectordotdev/integrations-team +src/sources/host_metrics/ @dsmith3197 @vectordotdev/integrations-team src/sources/http_client/ @neuronull @vectordotdev/integrations-team src/sources/http_server.rs @neuronull @vectordotdev/integrations-team src/sources/internal_logs.rs @neuronull @vectordotdev/integrations-team src/sources/internal_metrics.rs @neuronull @vectordotdev/integrations-team src/sources/journald.rs @spencergilbert @vectordotdev/integrations-team -src/sources/kafka.rs @vectordotdev/integrations-team +src/sources/kafka.rs @dsmith3197 @vectordotdev/integrations-team src/sources/kubernetes_logs/ @spencergilbert @vectordotdev/integrations-team src/sources/logstash.rs @neuronull @vectordotdev/integrations-team -src/sources/mongodb_metrics/ @vectordotdev/integrations-team +src/sources/mongodb_metrics/ @dsmith3197 @vectordotdev/integrations-team src/sources/nats.rs @StephenWakely @vectordotdev/integrations-team -src/sources/nginx_metrics/ @vectordotdev/integrations-team +src/sources/nginx_metrics/ @dsmith3197 @vectordotdev/integrations-team src/sources/opentelemetry/ @spencergilbert @vectordotdev/integrations-team -src/sources/postgresql_metrics.rs @vectordotdev/integrations-team +src/sources/postgresql_metrics.rs @dsmith3197 @vectordotdev/integrations-team src/sources/prometheus/ @StephenWakely @vectordotdev/integrations-team # source_prometheus_remote_write,source_prometheus_scrape src/sources/redis/ @StephenWakely @vectordotdev/integrations-team src/sources/socket/ @neuronull @vectordotdev/integrations-team diff --git a/.github/actions/spelling/expect.txt b/.github/actions/spelling/expect.txt index 9b8f575030f13..ed5c241d38b0f 100644 --- a/.github/actions/spelling/expect.txt +++ b/.github/actions/spelling/expect.txt @@ -287,6 +287,7 @@ Doop downcasted droptest dsl +dsmith dstat dstport dtype From 9a899c5d7c40a271b17eafec2f840c1bfd082b04 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Thu, 22 Jun 2023 14:39:49 -0700 Subject: [PATCH 166/236] chore(datadog_traces sink): Add additional warning around APM stats for `peer.service` (#17733) Tracking issue for enhancement: https://github.com/vectordotdev/vector/issues/17732 Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- .../reference/components/sinks/datadog_traces.cue | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/website/cue/reference/components/sinks/datadog_traces.cue b/website/cue/reference/components/sinks/datadog_traces.cue index d37e9087270f1..c887e0d1976e5 100644 --- a/website/cue/reference/components/sinks/datadog_traces.cue +++ b/website/cue/reference/components/sinks/datadog_traces.cue @@ -64,7 +64,18 @@ components: sinks: datadog_traces: { support: { requirements: [] - warnings: ["APM stats are in Beta. Currently the sink does not support the Datadog Agent sampling feature. This must be disabled in the Agent in order for APM stats output from vector to be accurate."] + warnings: [ + """ + Support for APM statistics is in beta. + + Currently the sink does not support the Datadog Agent sampling feature. Sampling must be + disabled in the Agent in order for APM stats output from vector to be accurate. + + Currently the sink does not calculate statistics aggregated across `peer.service`. Any + functionality in Datadog's APM product that depends on this aggregation will not + function correctly. + """, + ] notices: [] } From 326ad0861215f22c83f681e725abb88b33107e2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 23 Jun 2023 09:24:38 +0000 Subject: [PATCH 167/236] chore(deps): Bump infer from 0.13.0 to 0.14.0 (#17737) Bumps [infer](https://github.com/bojand/infer) from 0.13.0 to 0.14.0.
Release notes

Sourced from infer's releases.

v0.14.0

Changelog

v0.14.0 - 2023-06-22

Bug Fixes

  • effadc4 fix style

Build

  • b871c5d update crate version

Enhancements

  • 12d6fe9 add supposer for zstd skippable frames

Commits

  • b871c5d update crate version
  • 97a4bd3 Merge pull request #88 from marcospb19/enhance_zstd_v2
  • 8c8c835 Replace std by core
  • 56bcbee check if u32 -> usize respect usize boundaries
  • f24856c remove nesting
  • effadc4 fix style
  • 12d6fe9 add supposer for zstd skippable frames
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=infer&package-manager=cargo&previous-version=0.13.0&new-version=0.14.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b40e12810da99..2a39ea6ebd573 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4121,9 +4121,9 @@ checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" [[package]] name = "infer" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f551f8c3a39f68f986517db0d1759de85881894fdc7db798bd2a9df9cb04b7fc" +checksum = "bbb78f4c4a058ef30a9ff77322e758f7e60f871274b602d7fdc1b0956b0cb88e" [[package]] name = "inotify" @@ -9172,7 +9172,7 @@ dependencies = [ "hyper-proxy", "indexmap", "indoc", - "infer 0.13.0", + "infer 0.14.0", "inventory", "itertools", "k8s-openapi 0.18.0", diff --git a/Cargo.toml b/Cargo.toml index d99eca59ae9e1..dc838a1ade3c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -265,7 +265,7 @@ hyper = { version = "0.14.26", default-features = false, features = ["client", " hyper-openssl = { version = "0.9.2", default-features = false } hyper-proxy = { version = "0.9.1", default-features = false, features = ["openssl-tls"] } indexmap = { version = "~1.9.3", default-features = false, features = ["serde"] } -infer = { version = "0.13.0", default-features = false, optional = true} +infer = { version = "0.14.0", default-features = false, optional = true} indoc = { version = "2.0.1", default-features = false } inventory = { version = "0.3.6", default-features = false } k8s-openapi = { version = "0.18.0", default-features = false, features = ["api", "v1_26"], optional = true } From cc52c0ea99e03f451c24c165b24430c045ff365d Mon Sep 17 00:00:00 2001 From: Dominic Burkart Date: Fri, 23 Jun 2023 14:00:39 +0200 Subject: [PATCH 168/236] feat(error code when shutdown fails): set exit flag to non-zero when shutdown times out (#17676) Issue: [Exit non-zero when Vector fails to gracefully shutdown](https://github.com/vectordotdev/vector/issues/13731) --- src/app.rs | 43 +++++++++++++++++++++++++++++++++++++------ src/main.rs | 13 +++++++++---- src/vector_windows.rs | 17 +++++++++++++---- 3 files changed, 59 insertions(+), 14 deletions(-) diff --git a/src/app.rs b/src/app.rs index efd87316b19e2..c1ab0ae12e0d1 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,5 +1,7 @@ #![allow(missing_docs)] -use std::{collections::HashMap, num::NonZeroUsize, path::PathBuf, time::Duration}; +use std::{ + collections::HashMap, num::NonZeroUsize, path::PathBuf, process::ExitStatus, time::Duration, +}; use exitcode::ExitCode; use futures::StreamExt; @@ -32,6 +34,11 @@ use crate::{ trace, }; +#[cfg(unix)] +use std::os::unix::process::ExitStatusExt; +#[cfg(windows)] +use std::os::windows::process::ExitStatusExt; + pub static WORKER_THREADS: OnceNonZeroUsize = OnceNonZeroUsize::new(); use crate::internal_events::{VectorQuit, VectorStarted, VectorStopped}; @@ -145,10 +152,10 @@ impl ApplicationConfig { } impl Application { - pub fn run() { + pub fn run() -> ExitStatus { let (runtime, app) = Self::prepare_start().unwrap_or_else(|code| std::process::exit(code)); - runtime.block_on(app.run()); + runtime.block_on(app.run()) } pub fn prepare_start() -> Result<(Runtime, StartedApplication), ExitCode> { @@ -242,7 +249,7 @@ pub struct StartedApplication { } impl StartedApplication { - pub async fn run(self) { + pub async fn run(self) -> ExitStatus { self.main().await.shutdown().await } @@ -317,7 +324,7 @@ pub struct FinishedApplication { } impl FinishedApplication { - pub async fn shutdown(self) { + pub async fn shutdown(self) -> ExitStatus { let FinishedApplication { signal, mut signal_rx, @@ -335,18 +342,42 @@ impl FinishedApplication { SignalTo::Shutdown => { emit!(VectorStopped); tokio::select! { - _ = topology_controller.stop() => (), // Graceful shutdown finished + _ = topology_controller.stop() => ExitStatus::from_raw({ + #[cfg(windows)] + { + exitcode::OK as u32 + } + #[cfg(unix)] + exitcode::OK + }), // Graceful shutdown finished _ = signal_rx.recv() => { // It is highly unlikely that this event will exit from topology. emit!(VectorQuit); // Dropping the shutdown future will immediately shut the server down + ExitStatus::from_raw({ + #[cfg(windows)] + { + exitcode::UNAVAILABLE as u32 + } + #[cfg(unix)] + exitcode::OK + }) } + } } SignalTo::Quit => { // It is highly unlikely that this event will exit from topology. emit!(VectorQuit); drop(topology_controller); + ExitStatus::from_raw({ + #[cfg(windows)] + { + exitcode::UNAVAILABLE as u32 + } + #[cfg(unix)] + exitcode::OK + }) } _ => unreachable!(), } diff --git a/src/main.rs b/src/main.rs index 1859eff381c06..66818155ab9ea 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,8 +3,10 @@ extern crate vector; use vector::app::Application; +use std::process::ExitCode; + #[cfg(unix)] -fn main() { +fn main() -> ExitCode { #[cfg(feature = "allocation-tracing")] { use crate::vector::internal_telemetry::allocations::{ @@ -35,14 +37,17 @@ fn main() { } } - Application::run(); + let exit_code = Application::run().code().unwrap_or(exitcode::UNAVAILABLE) as u8; + ExitCode::from(exit_code) } #[cfg(windows)] -pub fn main() { +pub fn main() -> ExitCode { // We need to be able to run vector in User Interactive mode. We first try // to run vector as a service. If we fail, we consider that we are in // interactive mode and then fallback to console mode. See // https://docs.microsoft.com/en-us/dotnet/api/system.environment.userinteractive?redirectedfrom=MSDN&view=netcore-3.1#System_Environment_UserInteractive - vector::vector_windows::run().unwrap_or_else(|_| Application::run()); + let exit_code = vector::vector_windows::run() + .unwrap_or_else(|_| Application::run().code().unwrap_or(exitcode::UNAVAILABLE)); + ExitCode::from(exit_code as u8) } diff --git a/src/vector_windows.rs b/src/vector_windows.rs index 0473dfc71c364..5ba3548ce5a25 100644 --- a/src/vector_windows.rs +++ b/src/vector_windows.rs @@ -17,6 +17,7 @@ const SERVICE_NAME: &str = "vector"; const SERVICE_TYPE: ServiceType = ServiceType::OWN_PROCESS; const NO_ERROR: u32 = 0; +const ERROR: u32 = 121; pub mod service_control { use std::{ffi::OsString, fmt, fmt::Formatter, time::Duration}; @@ -361,8 +362,9 @@ fn win_main(arguments: Vec) { if let Err(_e) = run_service(arguments) {} } -pub fn run() -> Result<()> { - service_dispatcher::start(SERVICE_NAME, ffi_service_main) +pub fn run() -> Result { + service_dispatcher::start(SERVICE_NAME, ffi_service_main).map(|()| 0_i32) + // Always returns 0 exit code as errors are handled by the service dispatcher. } fn run_service(_arguments: Vec) -> Result<()> { @@ -398,14 +400,21 @@ fn run_service(_arguments: Vec) -> Result<()> { process_id: None, })?; - runtime.block_on(app.run()); + let program_completion_status = runtime.block_on(app.run()); // Tell the system that service has stopped. status_handle.set_service_status(ServiceStatus { service_type: SERVICE_TYPE, current_state: ServiceState::Stopped, controls_accepted: ServiceControlAccept::empty(), - exit_code: ServiceExitCode::Win32(NO_ERROR), + exit_code: { + if program_completion_status.success() { + ServiceExitCode::Win32(NO_ERROR) + } else { + // we didn't gracefully shutdown within grace period. + ServiceExitCode::Win32(ERROR) + } + }, checkpoint: 0, wait_hint: Duration::default(), process_id: None, From ff6a1b4f06b1e32f3192f2bc391e8ab59f466993 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 23 Jun 2023 08:13:14 -0700 Subject: [PATCH 169/236] chore(ci): Remove upload of config schema (#17740) Since the flakey test seems to have been resolved by https://github.com/vectordotdev/vector/pull/17722 . We can bring it back if that turns out not to be the case. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- .github/workflows/test.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 76d8dc2e27f47..ca5184b5041c2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -122,12 +122,6 @@ jobs: if: needs.changes.outputs.source == 'true' || needs.changes.outputs.cue == 'true' run: cargo vdev test-vrl - - uses: actions/upload-artifact@v3 - with: - name: "config-schema.json" - path: "/tmp/vector-config-schema.json" - if: success() || failure() - # This is a required status check, so it always needs to run if prior jobs failed, in order to mark the status correctly. all-checks: name: Test Suite From 44be37843c0599abb64073fe737ce146e30b3aa5 Mon Sep 17 00:00:00 2001 From: Nathan Fox Date: Fri, 23 Jun 2023 12:57:59 -0400 Subject: [PATCH 170/236] feat: add metadata support to `set_semantic_meaning` (#17730) The `set_semantic_meaning` function didn't support setting meanings that point to metadata. This will be needed when log namespacing is enabled. --- .../functions/src/set_semantic_meaning.rs | 37 +++++++++++-------- src/transforms/remap.rs | 4 +- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/lib/vector-vrl/functions/src/set_semantic_meaning.rs b/lib/vector-vrl/functions/src/set_semantic_meaning.rs index 1f9d09f122526..26e61c1671f59 100644 --- a/lib/vector-vrl/functions/src/set_semantic_meaning.rs +++ b/lib/vector-vrl/functions/src/set_semantic_meaning.rs @@ -1,14 +1,14 @@ use std::collections::BTreeMap; use std::ops::{Deref, DerefMut}; use vrl::diagnostic::Label; -use vrl::path::OwnedValuePath; +use vrl::path::{OwnedTargetPath, PathPrefix}; use vrl::prelude::*; #[derive(Debug, Default, Clone)] -pub struct MeaningList(pub BTreeMap); +pub struct MeaningList(pub BTreeMap); impl Deref for MeaningList { - type Target = BTreeMap; + type Target = BTreeMap; fn deref(&self) -> &Self::Target { &self.0 @@ -68,36 +68,41 @@ impl Function for SetSemanticMeaning { .expect("meaning not bytes") .into_owned(); - // Semantic meaning can only be assigned to external fields. - if !query.is_external() { + let path = if let Some(path) = query.external_path() { + path + } else { + // Semantic meaning can only be assigned to external fields. let mut labels = vec![Label::primary( - "the target of this semantic meaning is non-external", + "this path must point to an event or metadata", span, )]; if let Some(variable) = query.as_variable() { labels.push(Label::context( - format!("maybe you meant \".{}\"?", variable.ident()), + format!( + "maybe you meant \".{}\" or \"%{}\"?", + variable.ident(), + variable.ident() + ), span, )); } let error = ExpressionError::Error { - message: "semantic meaning defined for non-external target".to_owned(), + message: "semantic meaning is not valid for local variables".to_owned(), labels, notes: vec![], }; return Err(Box::new(error) as Box); - } - - let path = query.path().clone(); + }; - let exists = state - .external - .target_kind() - .at_path(&path) - .contains_any_defined(); + let exists = match path.prefix { + PathPrefix::Event => state.external.target_kind(), + PathPrefix::Metadata => state.external.metadata_kind(), + } + .at_path(&path.path) + .contains_any_defined(); // Reject assigning meaning to non-existing field. if !exists { diff --git a/src/transforms/remap.rs b/src/transforms/remap.rs index 49c84faad97e2..a6b01dbc8844d 100644 --- a/src/transforms/remap.rs +++ b/src/transforms/remap.rs @@ -9,7 +9,7 @@ use std::{ use codecs::MetricTagValues; use lookup::lookup_v2::{parse_value_path, ValuePath}; -use lookup::{metadata_path, owned_value_path, path, OwnedTargetPath, PathPrefix}; +use lookup::{metadata_path, owned_value_path, path, PathPrefix}; use snafu::{ResultExt, Snafu}; use vector_common::TimeZone; use vector_config::configurable_component; @@ -274,7 +274,7 @@ impl TransformConfig for RemapConfig { // Apply any semantic meanings set in the VRL program for (id, path) in meaning { // currently only event paths are supported - new_type_def = new_type_def.with_meaning(OwnedTargetPath::event(path), &id); + new_type_def = new_type_def.with_meaning(path, &id); } new_type_def }) From 7a0dec13537211b4a7e460cdf57b079709649b5f Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 23 Jun 2023 12:55:43 -0700 Subject: [PATCH 171/236] chore(docs): Move CONTRIBUTING.md to top-level (#17744) And remove the symlink. Contributors continue to be confused by the top-level file being a symlink since in the Github UI it appears to be a mostly empty file. Recent example of confusion: https://github.com/vectordotdev/vector/pull/17743 Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- CONTRIBUTING.md | 257 ++++++++++++++++++++++++++++++++++++++++++- docs/CONTRIBUTING.md | 256 ------------------------------------------ 2 files changed, 256 insertions(+), 257 deletions(-) mode change 120000 => 100644 CONTRIBUTING.md delete mode 100644 docs/CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 120000 index 49d1b98f97e06..0000000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1 +0,0 @@ -docs/CONTRIBUTING.md \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000..7f4a52ed229f9 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,256 @@ +# Contributing + +First, thank you for contributing to Vector! The goal of this document is to +provide everything you need to start contributing to Vector. The +following TOC is sorted progressively, starting with the basics and +expanding into more specifics. Everyone from a first time contributor to a +Vector team member will find this document useful. + +- [Introduction](#introduction) +- [Your First Contribution](#your-first-contribution) + - [New sources, sinks, and transforms](#new-sources-sinks-and-transforms) +- [Workflow](#workflow) + - [Git Branches](#git-branches) + - [Git Commits](#git-commits) + - [Style](#style) + - [GitHub Pull Requests](#github-pull-requests) + - [Title](#title) + - [Reviews & Approvals](#reviews--approvals) + - [Merge Style](#merge-style) + - [CI](#ci) + - [Releasing](#releasing) + - [Testing](#testing) + - [Skipping tests](#skipping-tests) + - [Daily tests](#daily-tests) + - [Flakey tests](#flakey-tests) + - [Test harness](#test-harness) + - [Deprecations](#deprecations) + - [Dependencies](#dependencies) +- [Next steps](#next-steps) +- [Legal](#legal) + - [Contributor License Agreement](#contributor-license-agreement) + - [Granted rights and copyright assignment](#granted-rights-and-copyright-assignment) + +## Introduction + +1. **You're familiar with [GitHub](https://github.com) and the pull request + workflow.** +2. **You've read Vector's [docs](https://vector.dev/docs/).** +3. **You know about the [Vector community](https://vector.dev/community/). + Please use this for help.** + +## Your First Contribution + +1. Ensure your change has an issue! Find an + [existing issue][urls.existing_issues] or [open a new issue][urls.new_issue]. + - This is where you can get a feel if the change will be accepted or not. + Changes that are questionable will have a `needs: approval` label. +2. Once approved, [fork the Vector repository][urls.fork_repo] in your own + GitHub account (only applicable to outside contributors). +3. [Create a new Git branch][urls.create_branch]. +4. Make your changes. +5. [Submit the branch as a pull request][urls.submit_pr] to the main Vector + repo. A Vector team member should comment and/or review your pull request + within a few days. Although, depending on the circumstances, it may take + longer. + +### New sources, sinks, and transforms + +If you're thinking of contributing a new source, sink, or transform to Vector, thank you that's way cool! The answers to +the below questions are required for each newly proposed component and depending on the answers, we may elect to not +include the proposed component. If you're having trouble with any of the questions, we're available to help you. + +**Prior to beginning work on a new source or sink if a GitHub Issue does not already exist, please open one to discuss +the introduction of the new integration.** Maintainers will review the proposal with the following checklist in mind, +try and consider them when sharing your proposal to reduce the amount of time it takes to review your proposal. This +list is not exhaustive, and may be updated over time. + +- [ ] Can the proposed component’s functionality be replicated by an existing component, with a specific configuration? +(ex: Azure Event Hub as a `kafka` sink configuration) + - [ ] Alternatively implemented as a wrapper around an existing component. (ex. `axiom` wrapping `elasticsearch`) +- [ ] Can an existing component replicate the proposed component’s functionality, with non-breaking changes? +- [ ] Can an existing component be rewritten in a more generic fashion to cover both the existing and proposed functions? +- [ ] Is the proposed component generically usable or is it specific to a particular service? + - [ ] How established is the target of the integration, what is the relative market share of the integrated service? +- [ ] Is there sufficient demand for the component? + - [ ] If the integration can be served with a workaround or more generic component, how painful is this for users? +- [ ] Is the contribution from an individual or the organization owning the integrated service? (examples of +organization backed integrations: `databend` sink, `axiom` sink) + - [ ] Is the contributor committed to maintaining the integration if it is accepted? +- [ ] What is the overall complexity of the proposed design of this integration from a technical and functional +standpoint, and what is the expected ongoing maintenance burden? +- [ ] How will this integration be tested and QA’d for any changes and fixes? + - [ ] Will we have access to an account with the service if the integration is not open source? + +To merge a new source, sink, or transform, the pull request is required to: + +- [ ] Add tests, especially integration tests if your contribution connects to an external service. +- [ ] Add instrumentation so folks using your integration can get insight into how it's working and performing. You can +see some [example of instrumentation in existing integrations](https://github.com/vectordotdev/vector/tree/master/src/internal_events). +- [ ] Add documentation. You can see [examples in the `docs` directory](https://github.com/vectordotdev/vector/blob/master/docs). + +When adding new integration tests, the following changes are needed in the GitHub Workflows: + +- in `.github/workflows/integration.yml`, add another entry in the matrix definition for the new integration. +- in `.github/workflows/integration-comment.yml`, add another entry in the matrix definition for the new integration. +- in `.github/workflows/changes.yml`, add a new filter definition for files changed, and update the `changes` job +outputs to reference the filter, and finally update the outputs of `workflow_call` to include the new filter. + +## Workflow + +### Git Branches + +_All_ changes must be made in a branch and submitted as [pull requests](#github-pull-requests). +Vector does not adopt any type of branch naming style, but please use something +descriptive of your changes. + +### Git Commits + +#### Style + +Please ensure your commits are small and focused; they should tell a story of +your change. This helps reviewers to follow your changes, especially for more +complex changes. + +### GitHub Pull Requests + +Once your changes are ready you must submit your branch as a [pull request](https://github.com/vectordotdev/vector/pulls). + +#### Title + +The pull request title must follow the format outlined in the [conventional commits spec](https://www.conventionalcommits.org). +[Conventional commits](https://www.conventionalcommits.org) is a standardized +format for commit messages. Vector only requires this format for commits on +the `master` branch. And because Vector squashes commits before merging +branches, this means that only the pull request title must conform to this +format. Vector performs a pull request check to verify the pull request title +in case you forget. + +A list of allowed sub-categories is defined +[here](https://github.com/vectordotdev/vector/tree/master/.github). + +The following are all good examples of pull request titles: + +```text +feat(new sink): new `xyz` sink +feat(tcp source): add foo bar baz feature +fix(tcp source): fix foo bar baz bug +chore: improve build process +docs: fix typos +``` + +#### Reviews & Approvals + +All pull requests should be reviewed by: + +- No review required for cosmetic changes like whitespace, typos, and spelling + by a maintainer +- One Vector team member for minor changes or trivial changes from contributors +- Two Vector team members for major changes +- Three Vector team members for RFCs + +If CODEOWNERS are assigned, a review from an individual from each of the sets of owners is required. + +#### Merge Style + +All pull requests are squashed and merged. We generally discourage large pull +requests that are over 300-500 lines of diff. If you would like to propose a +change that is larger we suggest coming onto our [Discord server](https://chat.vector.dev/) and discuss it +with one of our engineers. This way we can talk through the solution and +discuss if a change that large is even needed! This will produce a quicker +response to the change and likely produce code that aligns better with our +process. + +### CI + +Currently, Vector uses GitHub Actions to run tests. The workflows are defined in +`.github/workflows`. + +#### Releasing + +GitHub Actions is responsible for releasing updated versions of Vector through +various channels. + +#### Testing + +##### Skipping tests + +Tests are run for all changes except those that have the label: + +```text +ci-condition: skip +``` + +##### Daily tests + +Some long-running tests are only run daily, rather than on every pull request. +If needed, an administrator can kick off these tests manually via the button on +the [nightly build action +page](https://github.com/vectordotdev/vector/actions?query=workflow%3Anightly) + +#### Flakey tests + +Historically, we've had some trouble with tests being flakey. If your PR does +not have passing tests: + +- Ensure that the test failures are unrelated to your change + - Is it failing on master? + - Does it fail if you rerun CI? + - Can you reproduce locally? +- Find or open an issue for the test failure + ([example](https://github.com/vectordotdev/vector/issues/3781)) +- Link the PR in the issue for the failing test so that there are more examples + +##### Test harness + +You can invoke the [test harness][urls.vector_test_harness] by commenting on +any pull request with: + +```bash +/test -t +``` + +### Deprecations + +When deprecating functionality in Vector, see [DEPRECATION.md](DEPRECATION.md). + +### Dependencies + +When adding, modifying, or removing a dependency in Vector you may find that you need to update the +inventory of third-party licenses maintained in `LICENSE-3rdparty.csv`. This file is generated using +[rust-license-tool](https://github.com/DataDog/rust-license-tool.git) and can be updated using +`cargo vdev build licenses`. + +## Next steps + +As discussed in the [`README`](README.md), you should continue to the following +documents: + +1. **[DEVELOPING.md](DEVELOPING.md)** - Everything necessary to develop +2. **[DOCUMENTING.md](DOCUMENTING.md)** - Preparing your change for Vector users +3. **[DEPRECATION.md](DEPRECATION.md)** - Deprecating functionality in Vector + +## Legal + +To protect all users of Vector, the following legal requirements are made. +If you have additional questions, please [contact us]. + +### Contributor License Agreement + +Vector requires all contributors to sign the Contributor License Agreement +(CLA). This gives Vector the right to use your contribution as well as ensuring +that you own your contributions and can use them for other purposes. + +The full text of the CLA can be found at [https://cla.datadoghq.com/vectordotdev/vector](https://cla.datadoghq.com/vectordotdev/vector). + +### Granted rights and copyright assignment + +This is covered by the CLA. + +[contact us]: https://vector.dev/community +[urls.create_branch]: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-and-deleting-branches-within-your-repository +[urls.existing_issues]: https://github.com/vectordotdev/vector/issues +[urls.fork_repo]: https://help.github.com/en/github/getting-started-with-github/fork-a-repo +[urls.new_issue]: https://github.com/vectordotdev/vector/issues/new +[urls.submit_pr]: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork +[urls.vector_test_harness]: https://github.com/vectordotdev/vector-test-harness/ diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md deleted file mode 100644 index 7f4a52ed229f9..0000000000000 --- a/docs/CONTRIBUTING.md +++ /dev/null @@ -1,256 +0,0 @@ -# Contributing - -First, thank you for contributing to Vector! The goal of this document is to -provide everything you need to start contributing to Vector. The -following TOC is sorted progressively, starting with the basics and -expanding into more specifics. Everyone from a first time contributor to a -Vector team member will find this document useful. - -- [Introduction](#introduction) -- [Your First Contribution](#your-first-contribution) - - [New sources, sinks, and transforms](#new-sources-sinks-and-transforms) -- [Workflow](#workflow) - - [Git Branches](#git-branches) - - [Git Commits](#git-commits) - - [Style](#style) - - [GitHub Pull Requests](#github-pull-requests) - - [Title](#title) - - [Reviews & Approvals](#reviews--approvals) - - [Merge Style](#merge-style) - - [CI](#ci) - - [Releasing](#releasing) - - [Testing](#testing) - - [Skipping tests](#skipping-tests) - - [Daily tests](#daily-tests) - - [Flakey tests](#flakey-tests) - - [Test harness](#test-harness) - - [Deprecations](#deprecations) - - [Dependencies](#dependencies) -- [Next steps](#next-steps) -- [Legal](#legal) - - [Contributor License Agreement](#contributor-license-agreement) - - [Granted rights and copyright assignment](#granted-rights-and-copyright-assignment) - -## Introduction - -1. **You're familiar with [GitHub](https://github.com) and the pull request - workflow.** -2. **You've read Vector's [docs](https://vector.dev/docs/).** -3. **You know about the [Vector community](https://vector.dev/community/). - Please use this for help.** - -## Your First Contribution - -1. Ensure your change has an issue! Find an - [existing issue][urls.existing_issues] or [open a new issue][urls.new_issue]. - - This is where you can get a feel if the change will be accepted or not. - Changes that are questionable will have a `needs: approval` label. -2. Once approved, [fork the Vector repository][urls.fork_repo] in your own - GitHub account (only applicable to outside contributors). -3. [Create a new Git branch][urls.create_branch]. -4. Make your changes. -5. [Submit the branch as a pull request][urls.submit_pr] to the main Vector - repo. A Vector team member should comment and/or review your pull request - within a few days. Although, depending on the circumstances, it may take - longer. - -### New sources, sinks, and transforms - -If you're thinking of contributing a new source, sink, or transform to Vector, thank you that's way cool! The answers to -the below questions are required for each newly proposed component and depending on the answers, we may elect to not -include the proposed component. If you're having trouble with any of the questions, we're available to help you. - -**Prior to beginning work on a new source or sink if a GitHub Issue does not already exist, please open one to discuss -the introduction of the new integration.** Maintainers will review the proposal with the following checklist in mind, -try and consider them when sharing your proposal to reduce the amount of time it takes to review your proposal. This -list is not exhaustive, and may be updated over time. - -- [ ] Can the proposed component’s functionality be replicated by an existing component, with a specific configuration? -(ex: Azure Event Hub as a `kafka` sink configuration) - - [ ] Alternatively implemented as a wrapper around an existing component. (ex. `axiom` wrapping `elasticsearch`) -- [ ] Can an existing component replicate the proposed component’s functionality, with non-breaking changes? -- [ ] Can an existing component be rewritten in a more generic fashion to cover both the existing and proposed functions? -- [ ] Is the proposed component generically usable or is it specific to a particular service? - - [ ] How established is the target of the integration, what is the relative market share of the integrated service? -- [ ] Is there sufficient demand for the component? - - [ ] If the integration can be served with a workaround or more generic component, how painful is this for users? -- [ ] Is the contribution from an individual or the organization owning the integrated service? (examples of -organization backed integrations: `databend` sink, `axiom` sink) - - [ ] Is the contributor committed to maintaining the integration if it is accepted? -- [ ] What is the overall complexity of the proposed design of this integration from a technical and functional -standpoint, and what is the expected ongoing maintenance burden? -- [ ] How will this integration be tested and QA’d for any changes and fixes? - - [ ] Will we have access to an account with the service if the integration is not open source? - -To merge a new source, sink, or transform, the pull request is required to: - -- [ ] Add tests, especially integration tests if your contribution connects to an external service. -- [ ] Add instrumentation so folks using your integration can get insight into how it's working and performing. You can -see some [example of instrumentation in existing integrations](https://github.com/vectordotdev/vector/tree/master/src/internal_events). -- [ ] Add documentation. You can see [examples in the `docs` directory](https://github.com/vectordotdev/vector/blob/master/docs). - -When adding new integration tests, the following changes are needed in the GitHub Workflows: - -- in `.github/workflows/integration.yml`, add another entry in the matrix definition for the new integration. -- in `.github/workflows/integration-comment.yml`, add another entry in the matrix definition for the new integration. -- in `.github/workflows/changes.yml`, add a new filter definition for files changed, and update the `changes` job -outputs to reference the filter, and finally update the outputs of `workflow_call` to include the new filter. - -## Workflow - -### Git Branches - -_All_ changes must be made in a branch and submitted as [pull requests](#github-pull-requests). -Vector does not adopt any type of branch naming style, but please use something -descriptive of your changes. - -### Git Commits - -#### Style - -Please ensure your commits are small and focused; they should tell a story of -your change. This helps reviewers to follow your changes, especially for more -complex changes. - -### GitHub Pull Requests - -Once your changes are ready you must submit your branch as a [pull request](https://github.com/vectordotdev/vector/pulls). - -#### Title - -The pull request title must follow the format outlined in the [conventional commits spec](https://www.conventionalcommits.org). -[Conventional commits](https://www.conventionalcommits.org) is a standardized -format for commit messages. Vector only requires this format for commits on -the `master` branch. And because Vector squashes commits before merging -branches, this means that only the pull request title must conform to this -format. Vector performs a pull request check to verify the pull request title -in case you forget. - -A list of allowed sub-categories is defined -[here](https://github.com/vectordotdev/vector/tree/master/.github). - -The following are all good examples of pull request titles: - -```text -feat(new sink): new `xyz` sink -feat(tcp source): add foo bar baz feature -fix(tcp source): fix foo bar baz bug -chore: improve build process -docs: fix typos -``` - -#### Reviews & Approvals - -All pull requests should be reviewed by: - -- No review required for cosmetic changes like whitespace, typos, and spelling - by a maintainer -- One Vector team member for minor changes or trivial changes from contributors -- Two Vector team members for major changes -- Three Vector team members for RFCs - -If CODEOWNERS are assigned, a review from an individual from each of the sets of owners is required. - -#### Merge Style - -All pull requests are squashed and merged. We generally discourage large pull -requests that are over 300-500 lines of diff. If you would like to propose a -change that is larger we suggest coming onto our [Discord server](https://chat.vector.dev/) and discuss it -with one of our engineers. This way we can talk through the solution and -discuss if a change that large is even needed! This will produce a quicker -response to the change and likely produce code that aligns better with our -process. - -### CI - -Currently, Vector uses GitHub Actions to run tests. The workflows are defined in -`.github/workflows`. - -#### Releasing - -GitHub Actions is responsible for releasing updated versions of Vector through -various channels. - -#### Testing - -##### Skipping tests - -Tests are run for all changes except those that have the label: - -```text -ci-condition: skip -``` - -##### Daily tests - -Some long-running tests are only run daily, rather than on every pull request. -If needed, an administrator can kick off these tests manually via the button on -the [nightly build action -page](https://github.com/vectordotdev/vector/actions?query=workflow%3Anightly) - -#### Flakey tests - -Historically, we've had some trouble with tests being flakey. If your PR does -not have passing tests: - -- Ensure that the test failures are unrelated to your change - - Is it failing on master? - - Does it fail if you rerun CI? - - Can you reproduce locally? -- Find or open an issue for the test failure - ([example](https://github.com/vectordotdev/vector/issues/3781)) -- Link the PR in the issue for the failing test so that there are more examples - -##### Test harness - -You can invoke the [test harness][urls.vector_test_harness] by commenting on -any pull request with: - -```bash -/test -t -``` - -### Deprecations - -When deprecating functionality in Vector, see [DEPRECATION.md](DEPRECATION.md). - -### Dependencies - -When adding, modifying, or removing a dependency in Vector you may find that you need to update the -inventory of third-party licenses maintained in `LICENSE-3rdparty.csv`. This file is generated using -[rust-license-tool](https://github.com/DataDog/rust-license-tool.git) and can be updated using -`cargo vdev build licenses`. - -## Next steps - -As discussed in the [`README`](README.md), you should continue to the following -documents: - -1. **[DEVELOPING.md](DEVELOPING.md)** - Everything necessary to develop -2. **[DOCUMENTING.md](DOCUMENTING.md)** - Preparing your change for Vector users -3. **[DEPRECATION.md](DEPRECATION.md)** - Deprecating functionality in Vector - -## Legal - -To protect all users of Vector, the following legal requirements are made. -If you have additional questions, please [contact us]. - -### Contributor License Agreement - -Vector requires all contributors to sign the Contributor License Agreement -(CLA). This gives Vector the right to use your contribution as well as ensuring -that you own your contributions and can use them for other purposes. - -The full text of the CLA can be found at [https://cla.datadoghq.com/vectordotdev/vector](https://cla.datadoghq.com/vectordotdev/vector). - -### Granted rights and copyright assignment - -This is covered by the CLA. - -[contact us]: https://vector.dev/community -[urls.create_branch]: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-and-deleting-branches-within-your-repository -[urls.existing_issues]: https://github.com/vectordotdev/vector/issues -[urls.fork_repo]: https://help.github.com/en/github/getting-started-with-github/fork-a-repo -[urls.new_issue]: https://github.com/vectordotdev/vector/issues/new -[urls.submit_pr]: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork -[urls.vector_test_harness]: https://github.com/vectordotdev/vector-test-harness/ From 7d10fc97f32c053f9336d1d69d530f39ef258268 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 23 Jun 2023 14:54:20 -0700 Subject: [PATCH 172/236] chore(docs): Clarify `bytes` framing for streams (#17745) I'm not aware of any sources that separate "steam segments" so I updated the language a bit to account for how the `tcp` mode of the `socket` source handles `bytes` framing. Reference: https://github.com/vectordotdev/vector/issues/17136 Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- website/cue/reference/components/sources.cue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/cue/reference/components/sources.cue b/website/cue/reference/components/sources.cue index 9e3682067ee26..87053f76a4d3d 100644 --- a/website/cue/reference/components/sources.cue +++ b/website/cue/reference/components/sources.cue @@ -94,7 +94,7 @@ components: sources: [Name=string]: { type: string: { default: features.codecs.default_framing enum: { - bytes: "Byte frames are passed through as-is according to the underlying I/O boundaries (e.g. split between messages or stream segments)." + bytes: "Byte frames are passed through as-is according to the underlying I/O boundaries (e.g. split between messages, payloads, or streams)." character_delimited: "Byte frames which are delimited by a chosen character." length_delimited: "Byte frames which are prefixed by an unsigned big-endian 32-bit integer indicating the length." newline_delimited: "Byte frames which are delimited by a newline character." From 92a36e0119e0e1f50b8bfcdcaf1c536018b69d5f Mon Sep 17 00:00:00 2001 From: neuronull Date: Fri, 23 Jun 2023 15:59:45 -0600 Subject: [PATCH 173/236] enhancement(ci): refactor logic for int test file path changes detection (#17725) - The intent is to better be able to manage the file paths per integration, as they are contained in a specific place for that integration rather in one massive list. --- .github/workflows/changes.yml | 353 +++++------------- .github/workflows/integration.yml | 2 + scripts/integration/amqp/test.yaml | 10 + scripts/integration/appsignal/test.yaml | 6 + scripts/integration/aws/test.yaml | 12 + scripts/integration/axiom/test.yaml | 7 + scripts/integration/azure/test.yaml | 7 + scripts/integration/clickhouse/test.yaml | 7 + scripts/integration/databend/test.yaml | 7 + scripts/integration/datadog-agent/test.yaml | 8 + scripts/integration/datadog-logs/test.yaml | 9 + scripts/integration/datadog-metrics/test.yaml | 9 + scripts/integration/datadog-traces/test.yaml | 9 + scripts/integration/dnstap/test.yaml | 8 + scripts/integration/docker-logs/test.yaml | 9 + scripts/integration/elasticsearch/test.yaml | 7 + scripts/integration/eventstoredb/test.yaml | 8 + scripts/integration/fluent/test.yaml | 8 + scripts/integration/gcp/test.yaml | 11 + scripts/integration/http-client/test.yaml | 7 + scripts/integration/humio/test.yaml | 7 + scripts/integration/influxdb/test.yaml | 8 + scripts/integration/kafka/test.yaml | 11 + scripts/integration/logstash/test.yaml | 7 + scripts/integration/loki/test.yaml | 8 + scripts/integration/mongodb/test.yaml | 8 + scripts/integration/nats/test.yaml | 11 + scripts/integration/nginx/test.yaml | 8 + scripts/integration/opentelemetry/test.yaml | 7 + scripts/integration/postgres/test.yaml | 8 + scripts/integration/prometheus/test.yaml | 10 + scripts/integration/pulsar/test.yaml | 8 + scripts/integration/redis/test.yaml | 10 + scripts/integration/splunk/test.yaml | 10 + scripts/integration/webhdfs/test.yaml | 7 + vdev/src/commands/integration/ci_paths.rs | 30 ++ vdev/src/commands/integration/mod.rs | 1 + vdev/src/testing/config.rs | 2 + 38 files changed, 413 insertions(+), 252 deletions(-) create mode 100644 vdev/src/commands/integration/ci_paths.rs diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index bc230f3fadf59..db6c4225073e0 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -15,92 +15,100 @@ on: head_ref: required: true type: string + int_tests: + required: false + type: boolean + default: false + source: + required: false + type: boolean + default: true outputs: source: - value: ${{ jobs.changes.outputs.source }} + value: ${{ jobs.source.outputs.source }} dependencies: - value: ${{ jobs.changes.outputs.dependencies }} + value: ${{ jobs.source.outputs.dependencies }} internal_events: - value: ${{ jobs.changes.outputs.internal_events }} + value: ${{ jobs.source.outputs.internal_events }} cue: - value: ${{ jobs.changes.outputs.cue }} + value: ${{ jobs.source.outputs.cue }} component_docs: - value: ${{ jobs.changes.outputs.component_docs }} + value: ${{ jobs.source.outputs.component_docs }} markdown: - value: ${{ jobs.changes.outputs.markdown }} + value: ${{ jobs.source.outputs.markdown }} install: - value: ${{ jobs.changes.outputs.install }} + value: ${{ jobs.source.outputs.install }} k8s: - value: ${{ jobs.changes.outputs.k8s }} + value: ${{ jobs.source.outputs.k8s }} all-int: - value: ${{ jobs.changes.outputs.all-int }} + value: ${{ jobs.int_tests.outputs.all-int }} amqp: - value: ${{ jobs.changes.outputs.amqp }} + value: ${{ jobs.int_tests.outputs.amqp }} appsignal: - value: ${{ jobs.changes.outputs.appsignal }} + value: ${{ jobs.int_tests.outputs.appsignal }} aws: - value: ${{ jobs.changes.outputs.aws }} + value: ${{ jobs.int_tests.outputs.aws }} axiom: - value: ${{ jobs.changes.outputs.axiom }} + value: ${{ jobs.int_tests.outputs.axiom }} azure: - value: ${{ jobs.changes.outputs.azure }} + value: ${{ jobs.int_tests.outputs.azure }} clickhouse: - value: ${{ jobs.changes.outputs.clickhouse }} + value: ${{ jobs.int_tests.outputs.clickhouse }} databend: - value: ${{ jobs.changes.outputs.databend }} + value: ${{ jobs.int_tests.outputs.databend }} datadog: - value: ${{ jobs.changes.outputs.datadog }} + value: ${{ jobs.int_tests.outputs.datadog }} dnstap: - value: ${{ jobs.changes.outputs.dnstap }} + value: ${{ jobs.int_tests.outputs.dnstap }} docker-logs: - value: ${{ jobs.changes.outputs.docker-logs }} + value: ${{ jobs.int_tests.outputs.docker-logs }} elasticsearch: - value: ${{ jobs.changes.outputs.elasticsearch }} + value: ${{ jobs.int_tests.outputs.elasticsearch }} eventstoredb: - value: ${{ jobs.changes.outputs.eventstoredb }} + value: ${{ jobs.int_tests.outputs.eventstoredb }} fluent: - value: ${{ jobs.changes.outputs.fluent }} + value: ${{ jobs.int_tests.outputs.fluent }} gcp: - value: ${{ jobs.changes.outputs.gcp }} + value: ${{ jobs.int_tests.outputs.gcp }} humio: - value: ${{ jobs.changes.outputs.humio }} + value: ${{ jobs.int_tests.outputs.humio }} http-client: - value: ${{ jobs.changes.outputs.http-client }} + value: ${{ jobs.int_tests.outputs.http-client }} influxdb: - value: ${{ jobs.changes.outputs.influxdb }} + value: ${{ jobs.int_tests.outputs.influxdb }} kafka: - value: ${{ jobs.changes.outputs.kafka }} + value: ${{ jobs.int_tests.outputs.kafka }} logstash: - value: ${{ jobs.changes.outputs.logstash }} + value: ${{ jobs.int_tests.outputs.logstash }} loki: - value: ${{ jobs.changes.outputs.loki }} + value: ${{ jobs.int_tests.outputs.loki }} mongodb: - value: ${{ jobs.changes.outputs.mongodb }} + value: ${{ jobs.int_tests.outputs.mongodb }} nats: - value: ${{ jobs.changes.outputs.nats }} + value: ${{ jobs.int_tests.outputs.nats }} nginx: - value: ${{ jobs.changes.outputs.nginx }} + value: ${{ jobs.int_tests.outputs.nginx }} opentelemetry: - value: ${{ jobs.changes.outputs.opentelemetry }} + value: ${{ jobs.int_tests.outputs.opentelemetry }} postgres: - value: ${{ jobs.changes.outputs.postgres }} + value: ${{ jobs.int_tests.outputs.postgres }} prometheus: - value: ${{ jobs.changes.outputs.prometheus }} + value: ${{ jobs.int_tests.outputs.prometheus }} pulsar: - value: ${{ jobs.changes.outputs.pulsar }} + value: ${{ jobs.int_tests.outputs.pulsar }} redis: - value: ${{ jobs.changes.outputs.redis }} + value: ${{ jobs.int_tests.outputs.redis }} splunk: - value: ${{ jobs.changes.outputs.splunk }} + value: ${{ jobs.int_tests.outputs.splunk }} webhdfs: - value: ${{ jobs.changes.outputs.webhdfs }} + value: ${{ jobs.int_tests.outputs.webhdfs }} jobs: - changes: + # Detects changes that are not specific to integration tests + source: runs-on: ubuntu-20.04 - # Set job outputs to values from filter step + if: ${{ inputs.source }} outputs: - # General source code source: ${{ steps.filter.outputs.source }} dependencies: ${{ steps.filter.outputs.dependencies }} internal_events: ${{ steps.filter.outputs.internal_events }} @@ -108,40 +116,7 @@ jobs: component_docs: ${{ steps.filter.outputs.component_docs }} markdown: ${{ steps.filter.outputs.markdown }} install: ${{ steps.filter.outputs.install }} - # K8s k8s: ${{ steps.filter.outputs.k8s }} - # Integrations - all-int: ${{ steps.filter.outputs.all-int }} - amqp: ${{ steps.filter.outputs.amqp }} - appsignal: ${{ steps.filter.outputs.appsignal}} - aws: ${{ steps.filter.outputs.aws }} - axiom: ${{ steps.filter.outputs.axiom }} - azure: ${{ steps.filter.outputs.azure }} - clickhouse: ${{ steps.filter.outputs.clickhouse }} - databend: ${{ steps.filter.outputs.databend }} - datadog: ${{ steps.filter.outputs.datadog }} - dnstap: ${{ steps.filter.outputs.dnstap }} - docker-logs: ${{ steps.filter.outputs.docker-logs }} - elasticsearch: ${{ steps.filter.outputs.elasticsearch }} - eventstoredb: ${{ steps.filter.outputs.eventstoredb }} - fluent: ${{ steps.filter.outputs.fluent }} - gcp: ${{ steps.filter.outputs.gcp }} - humio: ${{ steps.filter.outputs.humio }} - http-client: ${{ steps.filter.outputs.http-client }} - influxdb: ${{ steps.filter.outputs.influxdb }} - kafka: ${{ steps.filter.outputs.kafka }} - logstash: ${{ steps.filter.outputs.logstash }} - loki: ${{ steps.filter.outputs.loki }} - mongodb: ${{ steps.filter.outputs.mongodb }} - nats: ${{ steps.filter.outputs.nats }} - nginx: ${{ steps.filter.outputs.nginx }} - opentelemetry: ${{ steps.filter.outputs.opentelemetry }} - postgres: ${{ steps.filter.outputs.postgres }} - prometheus: ${{ steps.filter.outputs.prometheus }} - pulsar: ${{ steps.filter.outputs.pulsar }} - redis: ${{ steps.filter.outputs.redis }} - splunk: ${{ steps.filter.outputs.splunk }} - webhdfs: ${{ steps.filter.outputs.webhdfs }} steps: - uses: actions/checkout@v3 @@ -198,181 +173,55 @@ jobs: - "distribution/install.sh" k8s: - "src/sources/kubernetes_logs/**" - all-int: - - "lib/vector-core/**" - amqp: - - "src/amqp.rs" - - "src/internal_events/amqp.rs" - - "src/sinks/amqp/**" - - "src/sources/amqp.rs" - - "src/sources/util/**" - - "src/sinks/util/**" - - "scripts/integration/amqp/**" - appsignal: - - "src/sinks/appsignal/**" - - "src/sinks/util/**" - - "scripts/integration/appsignal/**" - aws: - - "src/aws/**" - - "src/internal_events/aws*" - - "src/sources/aws_ecs_metrics/**" - - "src/sources/aws_kinesis_firehose/**" - - "src/sources/aws_s3/**" - - "src/sources/aws_sqs/**" - - "src/sources/util/**" - - "src/sinks/aws_cloudwatch_logs/**" - - "src/sinks/aws_cloudwatch_metrics/**" - - "src/sinks/aws_kinesis/**" - - "src/sinks/aws_s3/**" - - "src/sinks/aws_sqs/**" - - "src/sinks/util/**" - - "src/transforms/aws*" - - "scripts/integration/aws/**" - axiom: - - "src/sinks/axiom.rs" - - "src/sinks/util/**" - - "scripts/integration/axiom/**" - azure: - - "src/sinks/azure_**" - - "src/sinks/util/**" - - "scripts/integration/azure/**" - clickhouse: - - "src/sinks/clickhouse/**" - - "src/sinks/util/**" - - "scripts/integration/clickhouse/**" - databend: - - "src/sinks/databend/**" - - "src/sinks/util/**" - - "scripts/integration/databend/**" - datadog: - - "src/common/datadog.rs" - - "src/internal_events/datadog_*" - - "src/sources/datadog_agent/**" - - "src/sinks/datadog/**" - - "src/sinks/datadog_archives.rs" - - "src/sinks/util/**" - - "scripts/integration/datadog-agent/**" - - "scripts/integration/datadog-logs/**" - - "scripts/integration/datadog-metrics/**" - - "scripts/integration/datadog-traces/**" - dnstap: - - "src/internal_events/dnstap.rs" - - "src/sources/dnstap/**" - - "scripts/integration/dnstap/**" - docker-logs: - - "src/docker.rs" - - "src/internal_events/docker_logs.rs" - - "src/sources/docker_logs/**" - - "src/sources/util/**" - - "scripts/integration/docker-logs/**" - elasticsearch: - - "src/sinks/elasticsearch/**" - - "src/sinks/util/**" - - "scripts/integration/elasticsearch/**" - eventstoredb: - - "src/internal_events/eventstoredb_metrics.rs" - - "src/sources/eventstoredb_metrics/**" - - "src/sources/util/**" - - "scripts/integration/eventstoredb/**" - fluent: - - "src/internal_events/fluent.rs" - - "src/sources/fluent/**" - - "src/sources/util/**" - - "scripts/integration/fluent/**" - gcp: - - "src/internal_events/gcp_pubsub.rs" - - "src/sources/gcp_pubsub.rs" - - "src/sources/util/**" - - "src/sinks/gcp/**" - - "src/sinks/util/**" - - "src/gcp.rs" - - "scripts/integration/gcp/**" - - "scripts/integration/chronicle/**" - humio: - - "src/sinks/humio/**" - - "src/sinks/util/**" - - "scripts/integration/humio/**" - http-client: - - "src/sinks/http-client/**" - - "src/sinks/util/**" - - "scripts/integration/http-client/**" - influxdb: - - "src/internal_events/influxdb.rs" - - "src/sinks/influxdb/**" - - "src/sinks/util/**" - - "scripts/integration/influxdb/**" - kafka: - - "src/internal_events/kafka.rs" - - "src/sinks/kafka/**" - - "src/sinks/util/**" - - "src/sources/kafka.rs" - - "src/sources/util/**" - - "src/kafka.rs" - - "scripts/integration/kafka/**" - logstash: - - "src/sources/logstash.rs" - - "src/sources/util/**" - - "scripts/integration/logstash/**" - loki: - - "src/internal_events/loki.rs" - - "src/sinks/loki/**" - - "src/sinks/util/**" - - "scripts/integration/loki/**" - mongodb: - - "src/internal_events/mongodb_metrics.rs" - - "src/sources/mongodb_metrics/**" - - "src/sources/util/**" - - "scripts/integration/mongodb/**" - nats: - - "src/internal_events/nats.rs" - - "src/sources/nats.rs" - - "src/sources/util/**" - - "src/sinks/nats.rs" - - "src/sinks/util/**" - - "src/nats.rs" - - "scripts/integration/nats/**" - nginx: - - "src/internal_events/nginx_metrics.rs" - - "src/sources/nginx_metrics/**" - - "src/sources/util/**" - - "scripts/integration/nginx/**" - opentelemetry: - - "src/sources/opentelemetry/**" - - "src/sources/util/**" - - "scripts/integration/opentelemetry/**" - postgres: - - "src/internal_events/postgresql_metrics.rs" - - "src/sources/postgresql_metrics.rs" - - "src/sources/util/**" - - "scripts/integration/postgres/**" - prometheus: - - "src/internal_events/prometheus.rs" - - "src/sources/prometheus/**" - - "src/sources/util/**" - - "src/sinks/prometheus/**" - - "src/sinks/util/**" - - "scripts/integration/prometheus/**" - pulsar: - - "src/internal_events/pulsar.rs" - - "src/sinks/pulsar/**" - - "src/sinks/util/**" - - "scripts/integration/pulsar/**" - redis: - - "src/internal_events/redis.rs" - - "src/sources/redis/**" - - "src/sources/util/**" - - "src/sinks/redis.rs" - - "src/sinks/util/**" - - "scripts/integration/redis/**" - splunk: - - "src/internal_events/splunk_hec.rs" - - "src/sources/splunk_hec/**" - - "src/sources/util/**" - - "src/sinks/splunk_hec/**" - - "src/sinks/util/**" - - "scripts/integration/splunk/**" - webhdfs: - - "src/sinks/webhdfs/**" - - "src/sinks/util/**" - - "scripts/integration/webhdfs/**" + # Detects changes that are specific to integration tests + int_tests: + runs-on: ubuntu-latest + if: ${{ inputs.int_tests }} + outputs: + all-int: ${{ steps.filter.outputs.all-int}} + amqp: ${{ steps.filter.outputs.amqp }} + appsignal: ${{ steps.filter.outputs.appsignal}} + aws: ${{ steps.filter.outputs.aws }} + axiom: ${{ steps.filter.outputs.axiom }} + azure: ${{ steps.filter.outputs.azure }} + clickhouse: ${{ steps.filter.outputs.clickhouse }} + databend: ${{ steps.filter.outputs.databend }} + datadog: ${{ steps.filter.outputs.datadog }} + dnstap: ${{ steps.filter.outputs.dnstap }} + docker-logs: ${{ steps.filter.outputs.docker-logs }} + elasticsearch: ${{ steps.filter.outputs.elasticsearch }} + eventstoredb: ${{ steps.filter.outputs.eventstoredb }} + fluent: ${{ steps.filter.outputs.fluent }} + gcp: ${{ steps.filter.outputs.gcp }} + humio: ${{ steps.filter.outputs.humio }} + http-client: ${{ steps.filter.outputs.http-client }} + influxdb: ${{ steps.filter.outputs.influxdb }} + kafka: ${{ steps.filter.outputs.kafka }} + logstash: ${{ steps.filter.outputs.logstash }} + loki: ${{ steps.filter.outputs.loki }} + mongodb: ${{ steps.filter.outputs.mongodb }} + nats: ${{ steps.filter.outputs.nats }} + nginx: ${{ steps.filter.outputs.nginx }} + opentelemetry: ${{ steps.filter.outputs.opentelemetry }} + postgres: ${{ steps.filter.outputs.postgres }} + prometheus: ${{ steps.filter.outputs.prometheus }} + pulsar: ${{ steps.filter.outputs.pulsar }} + redis: ${{ steps.filter.outputs.redis }} + splunk: ${{ steps.filter.outputs.splunk }} + webhdfs: ${{ steps.filter.outputs.webhdfs }} + steps: + - uses: actions/checkout@v3 + + # creates a yaml file that contains the filters for each integration, + # extracted from the output of the `vdev int ci-paths` command, which + # sources the paths from the scripts/integration/.../test.yaml files + - name: Create filter rules for integrations + run: cargo vdev int ci-paths > int_test_filters.yaml + + - uses: dorny/paths-filter@v2 + id: filter + with: + base: ${{ inputs.base_ref }} + ref: ${{ inputs.head_ref }} + filters: int_test_filters.yaml diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 898d4c6f4a649..7f85870e7dff4 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -38,6 +38,8 @@ jobs: with: base_ref: ${{ github.event.pull_request.base.ref }} head_ref: ${{ github.event.pull_request.head.ref }} + source: false + int_tests: true secrets: inherit # Calls the Integration Test workflow for each integration that was detected to have files changed that impact it. diff --git a/scripts/integration/amqp/test.yaml b/scripts/integration/amqp/test.yaml index 5f13537f896c0..94c080a18559d 100644 --- a/scripts/integration/amqp/test.yaml +++ b/scripts/integration/amqp/test.yaml @@ -5,3 +5,13 @@ test_filter: '::amqp::' matrix: version: ['3.8'] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/amqp.rs" +- "src/internal_events/amqp.rs" +- "src/sinks/amqp/**" +- "src/sources/amqp.rs" +- "src/sources/util/**" +- "src/sinks/util/**" diff --git a/scripts/integration/appsignal/test.yaml b/scripts/integration/appsignal/test.yaml index 0f0970f75e4ac..d110306916df0 100644 --- a/scripts/integration/appsignal/test.yaml +++ b/scripts/integration/appsignal/test.yaml @@ -9,3 +9,9 @@ runner: matrix: version: [latest] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sinks/appsignal/**" +- "src/sinks/util/**" diff --git a/scripts/integration/aws/test.yaml b/scripts/integration/aws/test.yaml index 424e6d90c85e9..6e554fca22c1a 100644 --- a/scripts/integration/aws/test.yaml +++ b/scripts/integration/aws/test.yaml @@ -17,3 +17,15 @@ env: matrix: version: [latest] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/aws_**" +- "src/internal_events/aws_**" +- "src/sources/aws_**" +- "src/sources/util/**" +- "src/sinks/aws_**" +- "src/sinks/util/**" +- "src/transforms/aws_**" +- "scripts/integration/aws/**" diff --git a/scripts/integration/axiom/test.yaml b/scripts/integration/axiom/test.yaml index 59bd599e4a92e..1e8c3e1d8eec5 100644 --- a/scripts/integration/axiom/test.yaml +++ b/scripts/integration/axiom/test.yaml @@ -12,3 +12,10 @@ runner: matrix: postgres: [13-alpine] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sinks/axiom.rs" +- "src/sinks/util/**" +- "scripts/integration/axiom/**" diff --git a/scripts/integration/azure/test.yaml b/scripts/integration/azure/test.yaml index f871b052747ae..fe9226e2beeed 100644 --- a/scripts/integration/azure/test.yaml +++ b/scripts/integration/azure/test.yaml @@ -10,3 +10,10 @@ env: matrix: version: [3.14.0] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sinks/azure_**" +- "src/sinks/util/**" +- "scripts/integration/azure/**" diff --git a/scripts/integration/clickhouse/test.yaml b/scripts/integration/clickhouse/test.yaml index 7f59a79e07196..7da786d257251 100644 --- a/scripts/integration/clickhouse/test.yaml +++ b/scripts/integration/clickhouse/test.yaml @@ -8,3 +8,10 @@ env: matrix: version: ['19'] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sinks/clickhouse/**" +- "src/sinks/util/**" +- "scripts/integration/clickhouse/**" diff --git a/scripts/integration/databend/test.yaml b/scripts/integration/databend/test.yaml index 9ce69f72ba690..f84a979cb4b11 100644 --- a/scripts/integration/databend/test.yaml +++ b/scripts/integration/databend/test.yaml @@ -11,3 +11,10 @@ runner: matrix: version: ['latest'] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sinks/databend/**" +- "src/sinks/util/**" +- "scripts/integration/databend/**" diff --git a/scripts/integration/datadog-agent/test.yaml b/scripts/integration/datadog-agent/test.yaml index cbf55cd6ee8ca..6c4b399fdfb3e 100644 --- a/scripts/integration/datadog-agent/test.yaml +++ b/scripts/integration/datadog-agent/test.yaml @@ -12,3 +12,11 @@ env: matrix: version: ['7'] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/common/datadog.rs" +- "src/internal_events/datadog_*" +- "src/sources/datadog_agent/**" +- "scripts/integration/datadog-agent/**" diff --git a/scripts/integration/datadog-logs/test.yaml b/scripts/integration/datadog-logs/test.yaml index 87dbbecae66cf..30a99f8a87ae7 100644 --- a/scripts/integration/datadog-logs/test.yaml +++ b/scripts/integration/datadog-logs/test.yaml @@ -9,3 +9,12 @@ runner: matrix: version: [latest] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/common/datadog.rs" +- "src/internal_events/datadog_*" +- "src/sinks/datadog/logs/**" +- "src/sinks/util/**" +- "scripts/integration/datadog-logs/**" diff --git a/scripts/integration/datadog-metrics/test.yaml b/scripts/integration/datadog-metrics/test.yaml index cfdf63fab0783..237008a0d2551 100644 --- a/scripts/integration/datadog-metrics/test.yaml +++ b/scripts/integration/datadog-metrics/test.yaml @@ -9,3 +9,12 @@ runner: matrix: version: [latest] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/common/datadog.rs" +- "src/internal_events/datadog_*" +- "src/sinks/datadog/metrics/**" +- "src/sinks/util/**" +- "scripts/integration/datadog-metrics/**" diff --git a/scripts/integration/datadog-traces/test.yaml b/scripts/integration/datadog-traces/test.yaml index 9a62b19cbf832..31c4c0f97ef11 100644 --- a/scripts/integration/datadog-traces/test.yaml +++ b/scripts/integration/datadog-traces/test.yaml @@ -13,3 +13,12 @@ env: matrix: version: [latest] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/common/datadog.rs" +- "src/internal_events/datadog_*" +- "src/sinks/datadog/**" +- "src/sinks/util/**" +- "scripts/integration/datadog-traces/**" diff --git a/scripts/integration/dnstap/test.yaml b/scripts/integration/dnstap/test.yaml index 6475a242cf476..dab2f93e5e2ff 100644 --- a/scripts/integration/dnstap/test.yaml +++ b/scripts/integration/dnstap/test.yaml @@ -12,3 +12,11 @@ runner: matrix: version: ['latest'] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/dnstap.rs" +- "src/sources/dnstap/**" +- "src/sources/util/**" +- "scripts/integration/dnstap/**" diff --git a/scripts/integration/docker-logs/test.yaml b/scripts/integration/docker-logs/test.yaml index 5ad677b4b1259..15a432f955c70 100644 --- a/scripts/integration/docker-logs/test.yaml +++ b/scripts/integration/docker-logs/test.yaml @@ -8,3 +8,12 @@ runner: matrix: default: ["default"] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/docker.rs" +- "src/internal_events/docker_logs.rs" +- "src/sources/docker_logs/**" +- "src/sources/util/**" +- "scripts/integration/docker-logs/**" diff --git a/scripts/integration/elasticsearch/test.yaml b/scripts/integration/elasticsearch/test.yaml index 98a8ee59e46fa..6b27bca77e0dd 100644 --- a/scripts/integration/elasticsearch/test.yaml +++ b/scripts/integration/elasticsearch/test.yaml @@ -12,3 +12,10 @@ env: matrix: version: [7.13.1] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sinks/elasticsearch/**" +- "src/sinks/util/**" +- "scripts/integration/elasticsearch/**" diff --git a/scripts/integration/eventstoredb/test.yaml b/scripts/integration/eventstoredb/test.yaml index f0dbb90b377e0..43370281b158f 100644 --- a/scripts/integration/eventstoredb/test.yaml +++ b/scripts/integration/eventstoredb/test.yaml @@ -5,3 +5,11 @@ test_filter: '::eventstoredb_metrics::' matrix: version: [latest] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/eventstoredb_metrics.rs" +- "src/sources/eventstoredb_metrics/**" +- "src/sources/util/**" +- "scripts/integration/eventstoredb/**" diff --git a/scripts/integration/fluent/test.yaml b/scripts/integration/fluent/test.yaml index 8457454baf7d4..6593f8379b216 100644 --- a/scripts/integration/fluent/test.yaml +++ b/scripts/integration/fluent/test.yaml @@ -10,3 +10,11 @@ runner: matrix: default: ["default"] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/fluent.rs" +- "src/sources/fluent/**" +- "src/sources/util/**" +- "scripts/integration/fluent/**" diff --git a/scripts/integration/gcp/test.yaml b/scripts/integration/gcp/test.yaml index c63a4de29b613..7e516b716fafb 100644 --- a/scripts/integration/gcp/test.yaml +++ b/scripts/integration/gcp/test.yaml @@ -8,3 +8,14 @@ env: matrix: version: [latest] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/gcp_pubsub.rs" +- "src/sources/gcp_pubsub.rs" +- "src/sources/util/**" +- "src/sinks/gcp/**" +- "src/sinks/util/**" +- "src/gcp.rs" +- "scripts/integration/gcp/**" diff --git a/scripts/integration/http-client/test.yaml b/scripts/integration/http-client/test.yaml index edc88113b7b6b..0ae2b49bf2c76 100644 --- a/scripts/integration/http-client/test.yaml +++ b/scripts/integration/http-client/test.yaml @@ -10,3 +10,10 @@ env: matrix: version: ["v0.34.1"] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sources/http_client/**" +- "src/sources/util/**" +- "scripts/integration/http-client/**" diff --git a/scripts/integration/humio/test.yaml b/scripts/integration/humio/test.yaml index 95597eea7d8c3..344ab6a7fc5fc 100644 --- a/scripts/integration/humio/test.yaml +++ b/scripts/integration/humio/test.yaml @@ -9,3 +9,10 @@ runner: matrix: version: [1.13.1] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sinks/humio/**" +- "src/sinks/util/**" +- "scripts/integration/humio/**" diff --git a/scripts/integration/influxdb/test.yaml b/scripts/integration/influxdb/test.yaml index ef7bc7bcae349..3dad78af2e2e8 100644 --- a/scripts/integration/influxdb/test.yaml +++ b/scripts/integration/influxdb/test.yaml @@ -10,3 +10,11 @@ env: matrix: version: ['1.8'] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/influxdb.rs" +- "src/sinks/influxdb/**" +- "src/sinks/util/**" +- "scripts/integration/influxdb/**" diff --git a/scripts/integration/kafka/test.yaml b/scripts/integration/kafka/test.yaml index 39a2e2be58af1..a52131e6e5a1b 100644 --- a/scripts/integration/kafka/test.yaml +++ b/scripts/integration/kafka/test.yaml @@ -8,3 +8,14 @@ env: matrix: version: [latest] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/kafka.rs" +- "src/sinks/kafka/**" +- "src/sinks/util/**" +- "src/sources/kafka.rs" +- "src/sources/util/**" +- "src/kafka.rs" +- "scripts/integration/kafka/**" diff --git a/scripts/integration/logstash/test.yaml b/scripts/integration/logstash/test.yaml index 550daed1f7ce1..a73a040b6a996 100644 --- a/scripts/integration/logstash/test.yaml +++ b/scripts/integration/logstash/test.yaml @@ -9,3 +9,10 @@ env: matrix: version: [7.12.1] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sources/logstash.rs" +- "src/sources/util/**" +- "scripts/integration/logstash/**" diff --git a/scripts/integration/loki/test.yaml b/scripts/integration/loki/test.yaml index 156392182a509..60f762dd569b7 100644 --- a/scripts/integration/loki/test.yaml +++ b/scripts/integration/loki/test.yaml @@ -8,3 +8,11 @@ env: matrix: version: [2.4.1] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/loki.rs" +- "src/sinks/loki/**" +- "src/sinks/util/**" +- "scripts/integration/loki/**" diff --git a/scripts/integration/mongodb/test.yaml b/scripts/integration/mongodb/test.yaml index 3c2a40b264ecc..76ddefab16f46 100644 --- a/scripts/integration/mongodb/test.yaml +++ b/scripts/integration/mongodb/test.yaml @@ -9,3 +9,11 @@ env: matrix: version: [4.2.10] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/mongodb_metrics.rs" +- "src/sources/mongodb_metrics/**" +- "src/sources/util/**" +- "scripts/integration/mongodb/**" diff --git a/scripts/integration/nats/test.yaml b/scripts/integration/nats/test.yaml index 9a11fabbf14de..1615b5f244ff6 100644 --- a/scripts/integration/nats/test.yaml +++ b/scripts/integration/nats/test.yaml @@ -14,3 +14,14 @@ env: matrix: version: [latest] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/nats.rs" +- "src/sources/nats.rs" +- "src/sources/util/**" +- "src/sinks/nats.rs" +- "src/sinks/util/**" +- "src/nats.rs" +- "scripts/integration/nats/**" diff --git a/scripts/integration/nginx/test.yaml b/scripts/integration/nginx/test.yaml index 61519b9f4fff4..934873608d5ef 100644 --- a/scripts/integration/nginx/test.yaml +++ b/scripts/integration/nginx/test.yaml @@ -11,3 +11,11 @@ runner: matrix: version: [1.19.4] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/nginx_metrics.rs" +- "src/sources/nginx_metrics/**" +- "src/sources/util/**" +- "scripts/integration/nginx/**" diff --git a/scripts/integration/opentelemetry/test.yaml b/scripts/integration/opentelemetry/test.yaml index 140958c15a079..e586c444affac 100644 --- a/scripts/integration/opentelemetry/test.yaml +++ b/scripts/integration/opentelemetry/test.yaml @@ -10,3 +10,10 @@ runner: matrix: version: [0.56.0] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sources/opentelemetry/**" +- "src/sources/util/**" +- "scripts/integration/opentelemetry/**" diff --git a/scripts/integration/postgres/test.yaml b/scripts/integration/postgres/test.yaml index 33df718d2ad56..67aa2ddc10b50 100644 --- a/scripts/integration/postgres/test.yaml +++ b/scripts/integration/postgres/test.yaml @@ -13,3 +13,11 @@ runner: matrix: version: ['13.1'] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/postgresql_metrics.rs" +- "src/sources/postgresql_metrics.rs" +- "src/sources/util/**" +- "scripts/integration/postgres/**" diff --git a/scripts/integration/prometheus/test.yaml b/scripts/integration/prometheus/test.yaml index fb3a52d529a35..d2db2d9282b6b 100644 --- a/scripts/integration/prometheus/test.yaml +++ b/scripts/integration/prometheus/test.yaml @@ -9,3 +9,13 @@ env: matrix: prometheus: ['v2.33.4'] influxdb: ['1.8'] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/prometheus.rs" +- "src/sources/prometheus/**" +- "src/sources/util/**" +- "src/sinks/prometheus/**" +- "src/sinks/util/**" +- "scripts/integration/prometheus/**" diff --git a/scripts/integration/pulsar/test.yaml b/scripts/integration/pulsar/test.yaml index a63f7da772e4b..824f0e0f290d4 100644 --- a/scripts/integration/pulsar/test.yaml +++ b/scripts/integration/pulsar/test.yaml @@ -8,3 +8,11 @@ env: matrix: version: [latest] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/pulsar.rs" +- "src/sinks/pulsar/**" +- "src/sinks/util/**" +- "scripts/integration/pulsar/**" diff --git a/scripts/integration/redis/test.yaml b/scripts/integration/redis/test.yaml index 4456e4829082f..d2d0577e844ca 100644 --- a/scripts/integration/redis/test.yaml +++ b/scripts/integration/redis/test.yaml @@ -8,3 +8,13 @@ env: matrix: version: [6-alpine] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/redis.rs" +- "src/sources/redis/**" +- "src/sources/util/**" +- "src/sinks/redis.rs" +- "src/sinks/util/**" +- "scripts/integration/redis/**" diff --git a/scripts/integration/splunk/test.yaml b/scripts/integration/splunk/test.yaml index 4d45c8364ae00..85de787cbe1e6 100644 --- a/scripts/integration/splunk/test.yaml +++ b/scripts/integration/splunk/test.yaml @@ -9,3 +9,13 @@ env: matrix: version: ["8.2.4", "7.3"] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/internal_events/splunk_hec.rs" +- "src/sources/splunk_hec/**" +- "src/sources/util/**" +- "src/sinks/splunk_hec/**" +- "src/sinks/util/**" +- "scripts/integration/splunk/**" diff --git a/scripts/integration/webhdfs/test.yaml b/scripts/integration/webhdfs/test.yaml index fe0c52cd47af7..fc6a3193f9139 100644 --- a/scripts/integration/webhdfs/test.yaml +++ b/scripts/integration/webhdfs/test.yaml @@ -8,3 +8,10 @@ env: matrix: hadoop: ['2.0.0-hadoop3.2.1-java8'] + +# changes to these files/paths will invoke the integration test in CI +# expressions are evaluated using https://github.com/micromatch/picomatch +paths: +- "src/sinks/webhdfs/**" +- "src/sinks/util/**" +- "scripts/integration/webhdfs/**" diff --git a/vdev/src/commands/integration/ci_paths.rs b/vdev/src/commands/integration/ci_paths.rs new file mode 100644 index 0000000000000..f95573c189571 --- /dev/null +++ b/vdev/src/commands/integration/ci_paths.rs @@ -0,0 +1,30 @@ +use anyhow::Result; +use clap::Args; + +use crate::testing::config::IntegrationTestConfig; + +/// Output paths in the repository that are associated with an integration. +/// If any changes are made to these paths, that integration should be tested. +#[derive(Args, Debug)] +#[command()] +pub struct Cli {} + +impl Cli { + pub fn exec(&self) -> Result<()> { + // changes to vector-core should test all integrations + println!("all-int:"); + println!("- \"lib/vector-core/**\""); + + // paths for each integration are defined in their respective config files. + for (integration, config) in IntegrationTestConfig::collect_all()? { + if let Some(paths) = config.paths { + println!("{integration}:"); + for path in paths { + println!("- \"{path}\""); + } + } + } + + Ok(()) + } +} diff --git a/vdev/src/commands/integration/mod.rs b/vdev/src/commands/integration/mod.rs index acdb8a0a0380d..0591d7f3869a1 100644 --- a/vdev/src/commands/integration/mod.rs +++ b/vdev/src/commands/integration/mod.rs @@ -9,4 +9,5 @@ These test setups are organized into a set of integrations, located in subdirect mod start, mod stop, mod test, + mod ci_paths, } diff --git a/vdev/src/testing/config.rs b/vdev/src/testing/config.rs index 560c7faba7b18..8dab503a493f8 100644 --- a/vdev/src/testing/config.rs +++ b/vdev/src/testing/config.rs @@ -108,6 +108,8 @@ pub struct IntegrationTestConfig { pub test: Option, pub test_filter: Option, + + pub paths: Option>, } #[derive(Clone, Debug, Default, Deserialize)] From 4ebc3e1171cba4f00023f0ef860a6b66c98763a9 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 23 Jun 2023 15:05:16 -0700 Subject: [PATCH 174/236] fix(loki sink, observability): Drop non-fatal template render errors to warnings (#17746) If a render error doesn't result in a dropped event, it seems more like a warning than an error. For the places that currently emit template errors with `drop_event: false`: * `loki` sink: skips inserting label if key or value fails to render; falls back to `None` for partitioning using `tenant_id` * `throttle` transform: falls back to `None` for throttle key * `log_to_metric` transform: skips tag addition * `papertrail` sink: falls back to `vector` for the `process` field * `splunk_hec_logs` sink: falls back to `None` for partition keys (source, sourcetype, index) * `splunk_hec_metrics` sink: falls back to `None` for source, sourcetype, index Fixes: #17487 Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- src/internal_events/template.rs | 34 ++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/src/internal_events/template.rs b/src/internal_events/template.rs index 549120d6e38bb..b9ecd702d0dca 100644 --- a/src/internal_events/template.rs +++ b/src/internal_events/template.rs @@ -19,25 +19,33 @@ impl<'a> InternalEvent for TemplateRenderingError<'a> { } msg.push('.'); - error!( - message = %msg, - error = %self.error, - error_type = error_type::TEMPLATE_FAILED, - stage = error_stage::PROCESSING, - internal_log_rate_limit = true, - ); + if self.drop_event { + error!( + message = %msg, + error = %self.error, + error_type = error_type::TEMPLATE_FAILED, + stage = error_stage::PROCESSING, + internal_log_rate_limit = true, + ); - counter!( - "component_errors_total", 1, - "error_type" => error_type::TEMPLATE_FAILED, - "stage" => error_stage::PROCESSING, - ); + counter!( + "component_errors_total", 1, + "error_type" => error_type::TEMPLATE_FAILED, + "stage" => error_stage::PROCESSING, + ); - if self.drop_event { emit!(ComponentEventsDropped:: { count: 1, reason: "Failed to render template.", }); + } else { + warn!( + message = %msg, + error = %self.error, + error_type = error_type::TEMPLATE_FAILED, + stage = error_stage::PROCESSING, + internal_log_rate_limit = true, + ); } } } From c35ebd167b029eb0fb6c180301e8ff911f938f9f Mon Sep 17 00:00:00 2001 From: neuronull Date: Fri, 23 Jun 2023 16:27:25 -0600 Subject: [PATCH 175/236] chore(administration): add domain label for vdev (#17748) --- .github/labeler.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/labeler.yml b/.github/labeler.yml index 4ef2df868cff8..4f8b8a336c965 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -23,6 +23,9 @@ "domain: ci": - scripts/**/* +"domain: vdev": +- vdev/**/* + "domain: releasing": - distribution/**/* - scripts/package-* From 6e1878b1c151a19d7a99fd6c8c8a847cc69db3c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 10:31:06 +0100 Subject: [PATCH 176/236] chore(deps): Bump itertools from 0.10.5 to 0.11.0 (#17736) Bumps [itertools](https://github.com/rust-itertools/itertools) from 0.10.5 to 0.11.0.
Changelog

Sourced from itertools's changelog.

0.11.0

Breaking

  • Make Itertools::merge_join_by also accept functions returning bool (#704)
  • Implement PeekingNext transitively over mutable references (#643)
  • Change with_position to yield (Position, Item) instead of Position<Item> (#699)

Added

  • Add Itertools::take_while_inclusive (#616)
  • Implement PeekingNext for PeekingTakeWhile (#644)
  • Add EitherOrBoth::{just_left, just_right, into_left, into_right, as_deref, as_deref_mut, left_or_insert, right_or_insert, left_or_insert_with, right_or_insert_with, insert_left, insert_right, insert_both} (#629)
  • Implement Clone for CircularTupleWindows (#686)
  • Implement Clone for Chunks (#683)
  • Add Itertools::process_results (#680)

Changed

  • Use Cell instead of RefCell in Format and FormatWith (#608)
  • CI tweaks (#674, #675)
  • Document and test the difference between stable and unstable sorts (#653)
  • Fix documentation error on Itertools::max_set_by_key (#692)
  • Move MSRV metadata to Cargo.toml (#672)
  • Implement equal with Iterator::eq (#591)
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=itertools&package-manager=cargo&previous-version=0.10.5&new-version=0.11.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Signed-off-by: Stephen Wakely Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Stephen Wakely --- Cargo.lock | 31 ++++++++++++++++++++----------- Cargo.toml | 4 ++-- vdev/Cargo.toml | 2 +- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2a39ea6ebd573..100fa2b1c55a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2271,7 +2271,7 @@ dependencies = [ "criterion-plot", "futures 0.3.28", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits", "once_cell", "oorandom", @@ -2293,7 +2293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -4253,6 +4253,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.6" @@ -4525,7 +4534,7 @@ dependencies = [ "diff", "ena", "is-terminal", - "itertools", + "itertools 0.10.5", "lalrpop-util", "petgraph", "regex", @@ -5150,7 +5159,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af5a8477ac96877b5bd1fd67e0c28736c12943aba24eda92b127e036b0c8f400" dependencies = [ "indexmap", - "itertools", + "itertools 0.10.5", "ndarray", "noisy_float", "num-integer", @@ -5567,7 +5576,7 @@ dependencies = [ "base64 0.13.1", "chrono", "http", - "itertools", + "itertools 0.10.5", "log", "num-bigint", "oauth2", @@ -6097,7 +6106,7 @@ checksum = "1ba7d6ead3e3966038f68caa9fc1f860185d95a793180bbcfe0d0da47b3961ed" dependencies = [ "anstyle 0.3.1", "difflib", - "itertools", + "itertools 0.10.5", "predicates-core", ] @@ -6279,7 +6288,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes 1.4.0", "heck 0.4.0", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -6300,7 +6309,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2 1.0.60", "quote 1.0.28", "syn 1.0.109", @@ -9070,7 +9079,7 @@ dependencies = [ "hex", "indexmap", "indicatif", - "itertools", + "itertools 0.11.0", "log", "once_cell", "os_info", @@ -9174,7 +9183,7 @@ dependencies = [ "indoc", "infer 0.14.0", "inventory", - "itertools", + "itertools 0.11.0", "k8s-openapi 0.18.0", "kube", "lapin", @@ -9626,7 +9635,7 @@ dependencies = [ "hostname", "indexmap", "indoc", - "itertools", + "itertools 0.10.5", "lalrpop", "lalrpop-util", "md-5", diff --git a/Cargo.toml b/Cargo.toml index dc838a1ade3c4..3f01ce38a9d5c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -212,7 +212,7 @@ lapin = { version = "2.2.1", default-features = false, features = ["native-tls"] # API async-graphql = { version = "5.0.10", default-features = false, optional = true, features = ["chrono"] } async-graphql-warp = { version = "5.0.10", default-features = false, optional = true } -itertools = { version = "0.10.5", default-features = false, optional = true } +itertools = { version = "0.11.0", default-features = false, optional = true } # API client crossterm = { version = "0.26.1", default-features = false, features = ["event-stream"], optional = true } @@ -345,7 +345,7 @@ azure_storage_blobs = { git = "https://github.com/Azure/azure-sdk-for-rust.git", azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["azurite_workaround"] } base64 = "0.21.2" criterion = { version = "0.5.1", features = ["html_reports", "async_tokio"] } -itertools = { version = "0.10.5", default-features = false } +itertools = { version = "0.11.0", default-features = false, features = ["use_alloc"] } libc = "0.2.146" similar-asserts = "1.4.2" proptest = "1.2" diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index c7f306b0e3783..b9d39b2c405ad 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -23,7 +23,7 @@ glob = { version = "0.3.1", default-features = false } hex = "0.4.3" indexmap = { version = "1.9", default-features = false, features = ["serde"] } indicatif = { version = "0.17.5", features = ["improved_unicode"] } -itertools = "0.10.5" +itertools = "0.11.0" log = "0.4.19" once_cell = "1.18" os_info = { version = "3.7.0", default-features = false } From 6a6b42bedbd27dec0c91e274698785cc73f805df Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Mon, 26 Jun 2023 08:38:47 -0400 Subject: [PATCH 177/236] chore: Upgrade aws-smithy and aws-sdk crates (#17731) This PR changes the source of our AWS dependencies to track a fork to circumvent a currently unresolved bug upstream. Smithy crates are upgraded to `0.54.x` and sdk crates are upgraded to `0.24.x` Closes https://github.com/vectordotdev/vector/issues/17728 --------- Signed-off-by: Spencer Gilbert --- Cargo.lock | 491 +++++++++--------- Cargo.toml | 33 +- LICENSE-3rdparty.csv | 12 +- src/aws/auth.rs | 53 +- src/aws/mod.rs | 208 ++++---- src/aws/region.rs | 9 +- src/common/s3.rs | 6 +- src/sinks/aws_cloudwatch_logs/config.rs | 2 +- .../aws_cloudwatch_logs/integration_tests.rs | 9 +- src/sinks/aws_cloudwatch_logs/request.rs | 42 +- src/sinks/aws_cloudwatch_logs/retry.rs | 21 +- src/sinks/aws_cloudwatch_metrics/mod.rs | 2 +- src/sinks/aws_kinesis/firehose/config.rs | 6 +- .../aws_kinesis/firehose/integration_tests.rs | 4 +- src/sinks/aws_kinesis/streams/config.rs | 7 +- .../aws_kinesis/streams/integration_tests.rs | 2 +- src/sinks/aws_s3/integration_tests.rs | 4 +- src/sinks/aws_sqs/config.rs | 2 +- src/sinks/aws_sqs/integration_tests.rs | 6 +- src/sinks/elasticsearch/common.rs | 2 +- src/sinks/elasticsearch/service.rs | 2 +- src/sinks/prometheus/remote_write.rs | 2 +- src/sinks/s3_common/config.rs | 8 +- src/sources/aws_s3/mod.rs | 9 +- src/sources/aws_sqs/config.rs | 2 +- src/sources/aws_sqs/integration_tests.rs | 9 +- src/sources/aws_sqs/source.rs | 2 +- 27 files changed, 487 insertions(+), 468 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 100fa2b1c55a9..eceb55ace3aac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -282,6 +282,17 @@ dependencies = [ "term", ] +[[package]] +name = "assert-json-diff" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4259cbe96513d2f1073027a259fc2ca917feb3026a5a8d984e3628e490255cc0" +dependencies = [ + "extend", + "serde", + "serde_json", +] + [[package]] name = "assert-json-diff" version = "2.0.2" @@ -641,19 +652,19 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "aws-config" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56a636c44c77fa18bdba56126a34d30cfe5538fe88f7d34988fa731fee143ddd" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-http", "aws-sdk-sso", "aws-sdk-sts", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", "aws-smithy-json", - "aws-smithy-types 0.51.0", + "aws-smithy-types", "aws-types", "bytes 1.4.0", "hex", @@ -667,14 +678,25 @@ dependencies = [ "zeroize", ] +[[package]] +name = "aws-credential-types" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" +dependencies = [ + "aws-smithy-async", + "aws-smithy-types", + "tokio", + "tracing 0.1.37", + "zeroize", +] + [[package]] name = "aws-endpoint" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ca8f374874f6459aaa88dc861d7f5d834ca1ff97668eae190e97266b5f6c3fb" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ - "aws-smithy-http 0.51.0", - "aws-smithy-types 0.51.0", + "aws-smithy-http", + "aws-smithy-types", "aws-types", "http", "regex", @@ -683,12 +705,12 @@ dependencies = [ [[package]] name = "aws-http" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78d41e19e779b73463f5f0c21b3aacc995f4ba783ab13a7ae9f5dfb159a551b4" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ - "aws-smithy-http 0.51.0", - "aws-smithy-types 0.51.0", + "aws-credential-types", + "aws-smithy-http", + "aws-smithy-types", "aws-types", "bytes 1.4.0", "http", @@ -701,218 +723,236 @@ dependencies = [ [[package]] name = "aws-sdk-cloudwatch" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520b1ac14f0850d0d6a69136d15ba7702d41ee7f4014a5d2d1bf4a86e74f7a6b" +version = "0.24.0" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", "aws-smithy-query", - "aws-smithy-types 0.51.0", + "aws-smithy-types", "aws-smithy-xml", "aws-types", "bytes 1.4.0", "http", + "regex", "tokio-stream", "tower", ] [[package]] name = "aws-sdk-cloudwatchlogs" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89415e55b57044a09a7eb0a885c2d0af1aa7f95b373e0e898f71a28d7e7d10f9" +version = "0.24.0" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", "aws-smithy-json", - "aws-smithy-types 0.51.0", + "aws-smithy-types", "aws-types", "bytes 1.4.0", "http", + "regex", "tokio-stream", "tower", ] [[package]] name = "aws-sdk-elasticsearch" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9f4cc10278701dbc0d386ddd8cddfda2695eae7103a54eae11b981f28779ff2" +version = "0.24.0" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", "aws-smithy-json", - "aws-smithy-types 0.51.0", + "aws-smithy-types", "aws-types", "bytes 1.4.0", "http", + "regex", "tokio-stream", "tower", ] [[package]] name = "aws-sdk-firehose" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68310f9d7860b4fe73c58e5cec4d7a310a658d1a983fdf176eb35149939896a" +version = "0.24.0" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", "aws-smithy-json", - "aws-smithy-types 0.51.0", + "aws-smithy-types", "aws-types", "bytes 1.4.0", "http", + "regex", "tower", ] [[package]] name = "aws-sdk-kinesis" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37766fdf50feab317b4f939b1c9ee58a2a1c51785974328ce84cff1eea7a1bb8" +version = "0.24.0" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", "aws-smithy-json", - "aws-smithy-types 0.51.0", + "aws-smithy-types", "aws-types", "bytes 1.4.0", "http", + "regex", "tokio-stream", "tower", ] [[package]] name = "aws-sdk-s3" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9f08665c8e03aca8cb092ef01e617436ebfa977fddc1240e1b062488ab5d48a" +version = "0.24.0" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", - "aws-sigv4 0.51.0", + "aws-sigv4", "aws-smithy-async", "aws-smithy-checksums", "aws-smithy-client", "aws-smithy-eventstream", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", - "aws-smithy-types 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", + "aws-smithy-types", "aws-smithy-xml", "aws-types", "bytes 1.4.0", "bytes-utils", + "fastrand", "http", "http-body", + "once_cell", + "percent-encoding", + "regex", "tokio-stream", "tower", "tracing 0.1.37", + "url", ] [[package]] name = "aws-sdk-sqs" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b26bb3d12238492cb12bde0de8486679b007daada21fdb110913b32a2a38275" +version = "0.24.0" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", "aws-smithy-query", - "aws-smithy-types 0.51.0", + "aws-smithy-types", "aws-smithy-xml", "aws-types", "bytes 1.4.0", "http", + "regex", "tokio-stream", "tower", ] [[package]] name = "aws-sdk-sso" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86dcb1cb71aa8763b327542ead410424515cff0cde5b753eedd2917e09c63734" +version = "0.24.0" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", "aws-smithy-json", - "aws-smithy-types 0.51.0", + "aws-smithy-types", "aws-types", "bytes 1.4.0", "http", + "regex", "tokio-stream", "tower", ] [[package]] name = "aws-sdk-sts" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdfcf584297c666f6b472d5368a78de3bc714b6e0a53d7fbf76c3e347c292ab1" +version = "0.24.0" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-endpoint", "aws-http", "aws-sig-auth", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-json", "aws-smithy-query", - "aws-smithy-types 0.51.0", + "aws-smithy-types", "aws-smithy-xml", "aws-types", "bytes 1.4.0", "http", + "regex", "tower", + "tracing 0.1.37", ] [[package]] name = "aws-sig-auth" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cbe7b2be9e185c1fbce27fc9c41c66b195b32d89aa099f98768d9544221308" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ - "aws-sigv4 0.51.0", + "aws-credential-types", + "aws-sigv4", "aws-smithy-eventstream", - "aws-smithy-http 0.51.0", + "aws-smithy-http", "aws-types", "http", "tracing 0.1.37", @@ -920,33 +960,14 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ff4cff8c4a101962d593ba94e72cd83891aecd423f0c6e3146bff6fb92c9e3" +version = "0.54.2" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ "aws-smithy-eventstream", - "aws-smithy-http 0.51.0", + "aws-smithy-http", "bytes 1.4.0", "form_urlencoded", "hex", - "http", - "once_cell", - "percent-encoding", - "regex", - "ring", - "time", - "tracing 0.1.37", -] - -[[package]] -name = "aws-sigv4" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d2ce6f507be68e968a33485ced670111d1cbad161ddbbab1e313c03d37d8f4c" -dependencies = [ - "aws-smithy-http 0.55.3", - "form_urlencoded", - "hex", "hmac", "http", "once_cell", @@ -959,9 +980,8 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b3442b4c5d3fc39891a2e5e625735fba6b24694887d49c6518460fde98247a9" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ "futures-util", "pin-project-lite", @@ -971,12 +991,11 @@ dependencies = [ [[package]] name = "aws-smithy-checksums" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc227e36e346f45298288359f37123e1a92628d1cec6b11b5eb335553278bd9e" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ - "aws-smithy-http 0.51.0", - "aws-smithy-types 0.51.0", + "aws-smithy-http", + "aws-smithy-types", "bytes 1.4.0", "crc32c", "crc32fast", @@ -992,21 +1011,24 @@ dependencies = [ [[package]] name = "aws-smithy-client" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff28d553714f8f54cd921227934fc13a536a1c03f106e56b362fd57e16d450ad" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ "aws-smithy-async", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.51.0", - "aws-smithy-types 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-protocol-test", + "aws-smithy-types", "bytes 1.4.0", "fastrand", "http", "http-body", "hyper", + "hyper-rustls 0.23.1", "hyper-tls", + "lazy_static", "pin-project-lite", + "serde", "tokio", "tower", "tracing 0.1.37", @@ -1014,63 +1036,21 @@ dependencies = [ [[package]] name = "aws-smithy-eventstream" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7ea0df7161ce65b5c8ca6eb709a1a907376fa18226976e41c748ce02ccccf24" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ - "aws-smithy-types 0.51.0", + "aws-smithy-types", "bytes 1.4.0", "crc32fast", ] [[package]] name = "aws-smithy-http" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf58ed4fefa61dbf038e5421a521cbc2c448ef69deff0ab1d915d8a10eda5664" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ "aws-smithy-eventstream", - "aws-smithy-types 0.51.0", - "bytes 1.4.0", - "bytes-utils", - "futures-core", - "http", - "http-body", - "hyper", - "once_cell", - "percent-encoding", - "pin-project-lite", - "pin-utils", - "tracing 0.1.37", -] - -[[package]] -name = "aws-smithy-http" -version = "0.54.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "873f316f1833add0d3aa54ed1b0cd252ddd88c792a0cf839886400099971e844" -dependencies = [ - "aws-smithy-types 0.54.4", - "bytes 1.4.0", - "bytes-utils", - "futures-core", - "http", - "http-body", - "hyper", - "once_cell", - "percent-encoding", - "pin-project-lite", - "pin-utils", - "tracing 0.1.37", -] - -[[package]] -name = "aws-smithy-http" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b3b693869133551f135e1f2c77cb0b8277d9e3e17feaf2213f735857c4f0d28" -dependencies = [ - "aws-smithy-types 0.55.3", + "aws-smithy-types", "bytes 1.4.0", "bytes-utils", "futures-core", @@ -1086,11 +1066,11 @@ dependencies = [ [[package]] name = "aws-smithy-http-tower" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20c96d7bd35e7cf96aca1134b2f81b1b59ffe493f7c6539c051791cbbf7a42d3" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ - "aws-smithy-http 0.51.0", + "aws-smithy-http", + "aws-smithy-types", "bytes 1.4.0", "http", "http-body", @@ -1100,70 +1080,40 @@ dependencies = [ ] [[package]] -name = "aws-smithy-http-tower" -version = "0.54.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38231d3f5dac9ac7976f44e12803add1385119ffca9e5f050d8e980733d164" +name = "aws-smithy-json" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ - "aws-smithy-http 0.54.4", - "aws-smithy-types 0.54.4", - "bytes 1.4.0", - "http", - "http-body", - "pin-project-lite", - "tower", - "tracing 0.1.37", + "aws-smithy-types", ] [[package]] -name = "aws-smithy-json" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8324ba98c8a94187723cc16c37aefa09504646ee65c3d2c3af495bab5ea701b" +name = "aws-smithy-protocol-test" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ - "aws-smithy-types 0.51.0", + "assert-json-diff 1.1.0", + "http", + "pretty_assertions", + "regex", + "roxmltree 0.14.1", + "serde_json", + "thiserror", ] [[package]] name = "aws-smithy-query" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83834ed2ff69ea6f6657baf205267dc2c0abe940703503a3e5d60ce23be3d306" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ - "aws-smithy-types 0.51.0", + "aws-smithy-types", "urlencoding", ] [[package]] name = "aws-smithy-types" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b02e06ea63498c43bc0217ea4d16605d4e58d85c12fc23f6572ff6d0a840c61" -dependencies = [ - "itoa", - "num-integer", - "ryu", - "time", -] - -[[package]] -name = "aws-smithy-types" -version = "0.54.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8161232eda10290f5136610a1eb9de56aceaccd70c963a26a260af20ac24794f" -dependencies = [ - "base64-simd", - "itoa", - "num-integer", - "ryu", - "time", -] - -[[package]] -name = "aws-smithy-types" -version = "0.55.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16a3d0bf4f324f4ef9793b86a1701d9700fbcdbd12a846da45eed104c634c6e8" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ "base64-simd", "itoa", @@ -1174,27 +1124,25 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246e9f83dd1fdf5d347fa30ae4ad30a9d1d42ce4cd74a93d94afa874646f94cd" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ "xmlparser", ] [[package]] name = "aws-types" -version = "0.51.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05701d32da168b44f7ee63147781aed8723e792cc131cb9b18363b5393f17f70" +version = "0.54.1" +source = "git+https://github.com/vectordotdev/aws-sdk-rust?rev=3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670#3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670" dependencies = [ + "aws-credential-types", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-types 0.51.0", + "aws-smithy-http", + "aws-smithy-types", "http", "rustc_version 0.4.0", "tracing 0.1.37", - "zeroize", ] [[package]] @@ -1384,12 +1332,11 @@ checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64-simd" -version = "0.8.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339abbe78e73178762e23bea9dfd08e697eb3f3301cd4be981c0f78ba5859195" +checksum = "781dd20c3aff0bd194fe7d2a977dd92f21c173891f3a03b677359e5fa457e5d5" dependencies = [ - "outref", - "vsimd", + "simd-abstraction", ] [[package]] @@ -2428,6 +2375,16 @@ dependencies = [ "memchr", ] +[[package]] +name = "ctor" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" +dependencies = [ + "quote 1.0.28", + "syn 1.0.109", +] + [[package]] name = "ctr" version = "0.9.2" @@ -3079,6 +3036,18 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de853764b47027c2e862a995c34978ffa63c1501f2e15f987ba11bd4f9bba193" +[[package]] +name = "extend" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f47da3a72ec598d9c8937a7ebca8962a5c7a1f28444e38c2b33c771ba3f55f05" +dependencies = [ + "proc-macro-error", + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 1.0.109", +] + [[package]] name = "fakedata" version = "0.1.0" @@ -5706,11 +5675,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "output_vt100" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" +dependencies = [ + "winapi", +] + [[package]] name = "outref" -version = "0.5.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" +checksum = "7f222829ae9293e33a9f5e9f440c6760a3d450a64affe1846486b140db81c1f4" [[package]] name = "overload" @@ -6126,6 +6104,18 @@ dependencies = [ "termtree", ] +[[package]] +name = "pretty_assertions" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" +dependencies = [ + "ctor", + "diff", + "output_vt100", + "yansi", +] + [[package]] name = "prettydiff" version = "0.6.4" @@ -6909,6 +6899,15 @@ dependencies = [ "retain_mut", ] +[[package]] +name = "roxmltree" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "921904a62e410e37e215c40381b7117f830d9d89ba60ab5236170541dd25646b" +dependencies = [ + "xmlparser", +] + [[package]] name = "roxmltree" version = "0.18.0" @@ -7625,6 +7624,15 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" +[[package]] +name = "simd-abstraction" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cadb29c57caadc51ff8346233b5cec1d240b68ce55cf1afc764818791876987" +dependencies = [ + "outref", +] + [[package]] name = "similar" version = "2.2.1" @@ -9117,6 +9125,7 @@ dependencies = [ "async-trait", "atty", "aws-config", + "aws-credential-types", "aws-sdk-cloudwatch", "aws-sdk-cloudwatchlogs", "aws-sdk-elasticsearch", @@ -9124,12 +9133,12 @@ dependencies = [ "aws-sdk-kinesis", "aws-sdk-s3", "aws-sdk-sqs", - "aws-sigv4 0.55.3", + "aws-sigv4", "aws-smithy-async", "aws-smithy-client", - "aws-smithy-http 0.51.0", - "aws-smithy-http-tower 0.54.4", - "aws-smithy-types 0.51.0", + "aws-smithy-http", + "aws-smithy-http-tower", + "aws-smithy-types", "aws-types", "axum", "azure_core", @@ -9396,7 +9405,7 @@ dependencies = [ name = "vector-config" version = "0.1.0" dependencies = [ - "assert-json-diff", + "assert-json-diff 2.0.2", "chrono", "chrono-tz", "encoding_rs", @@ -9656,7 +9665,7 @@ dependencies = [ "quoted_printable", "rand 0.8.5", "regex", - "roxmltree", + "roxmltree 0.18.0", "rust_decimal", "rustyline", "seahash", @@ -9680,12 +9689,6 @@ dependencies = [ "zstd 0.12.3+zstd.1.5.2", ] -[[package]] -name = "vsimd" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" - [[package]] name = "vte" version = "0.10.1" @@ -10171,7 +10174,7 @@ version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6f71803d3a1c80377a06221e0530be02035d5b3e854af56c6ece7ac20ac441d" dependencies = [ - "assert-json-diff", + "assert-json-diff 2.0.2", "async-trait", "base64 0.21.2", "deadpool", @@ -10227,6 +10230,12 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "zerocopy" version = "0.6.1" diff --git a/Cargo.toml b/Cargo.toml index 3f01ce38a9d5c..9743c88b77259 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -158,21 +158,23 @@ metrics = "0.21.0" metrics-tracing-context = { version = "0.14.0", default-features = false } # AWS - Official SDK -aws-sdk-s3 = { version = "0.21.0", default-features = false, features = ["native-tls"], optional = true } -aws-sdk-sqs = { version = "0.21.0", default-features = false, features = ["native-tls"], optional = true } -aws-sdk-cloudwatch = { version = "0.21.0", default-features = false, features = ["native-tls"], optional = true } -aws-sdk-cloudwatchlogs = { version = "0.21.0", default-features = false, features = ["native-tls"], optional = true } -aws-sdk-elasticsearch = {version = "0.21.0", default-features = false, features = ["native-tls"], optional = true } -aws-sdk-firehose = { version = "0.21.0", default-features = false, features = ["native-tls"], optional = true } -aws-sdk-kinesis = { version = "0.21.0", default-features = false, features = ["native-tls"], optional = true } -aws-types = { version = "0.51.0", default-features = false, features = ["hardcoded-credentials"], optional = true } -aws-sigv4 = { version = "0.55.3", default-features = false, features = ["sign-http"], optional = true } -aws-config = { version = "0.51.0", default-features = false, features = ["native-tls"], optional = true } -aws-smithy-async = { version = "0.51.0", default-features = false, optional = true } -aws-smithy-client = { version = "0.51.0", default-features = false, features = ["client-hyper"], optional = true} -aws-smithy-http = { version = "0.51.0", default-features = false, features = ["event-stream"], optional = true } -aws-smithy-http-tower = { version = "0.54.4", default-features = false, optional = true } -aws-smithy-types = { version = "0.51.0", default-features = false, optional = true } +# depending on a fork to circumvent https://github.com/awslabs/aws-sdk-rust/issues/749 +aws-sdk-s3 = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["native-tls"], optional = true } +aws-sdk-sqs = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["native-tls"], optional = true } +aws-sdk-cloudwatch = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["native-tls"], optional = true } +aws-sdk-cloudwatchlogs = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["native-tls"], optional = true } +aws-sdk-elasticsearch = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["native-tls"], optional = true } +aws-sdk-firehose = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["native-tls"], optional = true } +aws-sdk-kinesis = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["native-tls"], optional = true } +aws-types = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, optional = true } +aws-sigv4 = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["sign-http"], optional = true } +aws-config = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["native-tls"], optional = true } +aws-credential-types = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["hardcoded-credentials"], optional = true } +aws-smithy-async = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, optional = true } +aws-smithy-client = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["client-hyper"], optional = true} +aws-smithy-http = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, features = ["event-stream"], optional = true } +aws-smithy-http-tower = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, optional = true } +aws-smithy-types = { git = "https://github.com/vectordotdev/aws-sdk-rust", rev = "3d6aefb7fcfced5fc2a7e761a87e4ddbda1ee670", default-features = false, optional = true } # Azure azure_core = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, features = ["enable_reqwest"], optional = true } @@ -434,6 +436,7 @@ api-client = [ aws-core = [ "aws-config", + "dep:aws-credential-types", "dep:aws-sigv4", "dep:aws-types", "dep:aws-smithy-async", diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 2af031a4db1ea..31740f86ce27b 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -18,6 +18,7 @@ arc-swap,https://github.com/vorner/arc-swap,MIT OR Apache-2.0,Michal 'vorner' Va arr_macro,https://github.com/JoshMcguigan/arr_macro,MIT OR Apache-2.0,Josh Mcguigan arrayvec,https://github.com/bluss/arrayvec,MIT OR Apache-2.0,bluss ascii,https://github.com/tomprogrammer/rust-ascii,Apache-2.0 OR MIT,"Thomas Bahn , Torbjørn Birch Moltu , Simon Sapin " +assert-json-diff,https://github.com/davidpdrsn/assert-json-diff,MIT,David Pedersen async-channel,https://github.com/smol-rs/async-channel,Apache-2.0 OR MIT,Stjepan Glavina async-compat,https://github.com/smol-rs/async-compat,Apache-2.0 OR MIT,Stjepan Glavina async-compression,https://github.com/Nemo157/async-compression,MIT OR Apache-2.0,"Wim Looman , Allen Bui " @@ -38,6 +39,7 @@ async-trait,https://github.com/dtolnay/async-trait,MIT OR Apache-2.0,David Tolna atomic-waker,https://github.com/stjepang/atomic-waker,Apache-2.0 OR MIT,Stjepan Glavina atty,https://github.com/softprops/atty,MIT,softprops aws-config,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust SDK Team , Russell Cohen " +aws-credential-types,https://github.com/awslabs/smithy-rs,Apache-2.0,AWS Rust SDK Team aws-endpoint,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust SDK Team , Russell Cohen " aws-http,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust SDK Team , Russell Cohen " aws-sdk-cloudwatch,https://github.com/awslabs/aws-sdk-rust,Apache-2.0,"AWS Rust SDK Team , Russell Cohen " @@ -57,6 +59,7 @@ aws-smithy-eventstream,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust aws-smithy-http,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust SDK Team , Russell Cohen " aws-smithy-http-tower,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust SDK Team , Russell Cohen " aws-smithy-json,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust SDK Team , John DiSanti " +aws-smithy-protocol-test,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust SDK Team , Russell Cohen " aws-smithy-query,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust SDK Team , John DiSanti " aws-smithy-types,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust SDK Team , Russell Cohen " aws-smithy-xml,https://github.com/awslabs/smithy-rs,Apache-2.0,"AWS Rust SDK Team , Russell Cohen " @@ -138,6 +141,7 @@ crossterm,https://github.com/crossterm-rs/crossterm,MIT,T. Post crossterm_winapi,https://github.com/crossterm-rs/crossterm-winapi,MIT,T. Post crypto-common,https://github.com/RustCrypto/traits,MIT OR Apache-2.0,RustCrypto Developers csv,https://github.com/BurntSushi/rust-csv,Unlicense OR MIT,Andrew Gallant +ctor,https://github.com/mmastrac/rust-ctor,Apache-2.0 OR MIT,Matt Mastracci ctr,https://github.com/RustCrypto/block-modes,MIT OR Apache-2.0,RustCrypto Developers cty,https://github.com/japaric/cty,MIT OR Apache-2.0,Jorge Aparicio curve25519-dalek,https://github.com/dalek-cryptography/curve25519-dalek,BSD-3-Clause,"Isis Lovecruft , Henry de Valence " @@ -151,6 +155,7 @@ der,https://github.com/RustCrypto/formats/tree/master/der,Apache-2.0 OR MIT,Rust derivative,https://github.com/mcarton/rust-derivative,MIT OR Apache-2.0,mcarton derive_arbitrary,https://github.com/rust-fuzz/arbitrary,MIT OR Apache-2.0,"The Rust-Fuzz Project Developers, Nick Fitzgerald , Manish Goregaokar , Andre Bogus , Corey Farwell " derive_more,https://github.com/JelteF/derive_more,MIT,Jelte Fennema +diff,https://github.com/utkarshkukreti/diff.rs,MIT OR Apache-2.0,Utkarsh Kukreti digest,https://github.com/RustCrypto/traits,MIT OR Apache-2.0,RustCrypto Developers dirs,https://github.com/soc/dirs-rs,MIT OR Apache-2.0,Simon Ochsenreither dirs-next,https://github.com/xdg-rs/dirs,MIT OR Apache-2.0,The @xdg-rs members @@ -177,6 +182,7 @@ error-code,https://github.com/DoumanAsh/error-code,BSL-1.0,Douman executor-trait,https://github.com/amqp-rs/executor-trait,Apache-2.0 OR MIT,Marc-Antoine Perennou exitcode,https://github.com/benwilber/exitcode,Apache-2.0,Ben Wilber +extend,https://github.com/davidpdrsn/ext,MIT,David Pedersen fakedata_generator,https://github.com/kevingimbel/fakedata_generator,MIT,Kevin Gimbel fallible-iterator,https://github.com/sfackler/rust-fallible-iterator,MIT OR Apache-2.0,Steven Fackler fastrand,https://github.com/smol-rs/fastrand,Apache-2.0 OR MIT,Stjepan Glavina @@ -351,6 +357,7 @@ openssl-macros,https://github.com/sfackler/rust-openssl,MIT OR Apache-2.0,The op openssl-probe,https://github.com/alexcrichton/openssl-probe,MIT OR Apache-2.0,Alex Crichton openssl-sys,https://github.com/sfackler/rust-openssl,MIT,"Alex Crichton , Steven Fackler " ordered-float,https://github.com/reem/rust-ordered-float,MIT,"Jonathan Reem , Matt Brubeck " +output_vt100,https://github.com/Phundrak/output-vt100-rs,MIT,Phuntsok Drak-pa outref,https://github.com/Nugine/outref,MIT,The outref Authors overload,https://github.com/danaugrs/overload,MIT,Daniel Salvadori pad,https://github.com/ogham/rust-pad,MIT,Ben S @@ -376,6 +383,7 @@ postgres-openssl,https://github.com/sfackler/rust-postgres,MIT OR Apache-2.0,Ste postgres-protocol,https://github.com/sfackler/rust-postgres,MIT OR Apache-2.0,Steven Fackler postgres-types,https://github.com/sfackler/rust-postgres,MIT OR Apache-2.0,Steven Fackler ppv-lite86,https://github.com/cryptocorrosion/cryptocorrosion,MIT OR Apache-2.0,The CryptoCorrosion Contributors +pretty_assertions,https://github.com/rust-pretty-assertions/rust-pretty-assertions,MIT OR Apache-2.0,"Colin Kiegel , Florent Fayolle , Tom Milligan " prettydiff,https://github.com/romankoblov/prettydiff,MIT,Roman Koblov prettytable-rs,https://github.com/phsym/prettytable-rs,BSD-3-Clause,Pierre-Henri Symoneaux proc-macro-crate,https://github.com/bkchr/proc-macro-crate,Apache-2.0 OR MIT,Bastian Köcher @@ -420,6 +428,7 @@ rmp,https://github.com/3Hren/msgpack-rust,MIT,Evgeny Safronov rmpv,https://github.com/3Hren/msgpack-rust,MIT,Evgeny Safronov roaring,https://github.com/RoaringBitmap/roaring-rs,MIT OR Apache-2.0,"Wim Looman , Kerollmops " +roxmltree,https://github.com/RazrFalcon/roxmltree,MIT OR Apache-2.0,Evgeniy Reizner roxmltree,https://github.com/RazrFalcon/roxmltree,MIT OR Apache-2.0,Yevhenii Reizner rust_decimal,https://github.com/paupino/rust-decimal,MIT,Paul Mason rustc-hash,https://github.com/rust-lang-nursery/rustc-hash,Apache-2.0 OR MIT,The Rust Project Developers @@ -473,6 +482,7 @@ signal-hook,https://github.com/vorner/signal-hook,Apache-2.0 OR MIT,"Michal 'vor signal-hook-registry,https://github.com/vorner/signal-hook,Apache-2.0 OR MIT,"Michal 'vorner' Vaner , Masaki Hara " signatory,https://github.com/iqlusioninc/crates/tree/main/signatory,Apache-2.0 OR MIT,Tony Arcieri signature,https://github.com/RustCrypto/traits/tree/master/signature,Apache-2.0 OR MIT,RustCrypto Developers +simd-abstraction,https://github.com/Nugine/simd,MIT,The simd-abstraction Authors simpl,https://github.com/durch/simplerr,MIT,Drazen Urch siphasher,https://github.com/jedisct1/rust-siphash,MIT OR Apache-2.0,Frank Denis sketches-ddsketch,https://github.com/mheffner/rust-sketches-ddsketch,Apache-2.0,Mike Heffner @@ -571,7 +581,6 @@ valuable,https://github.com/tokio-rs/valuable,MIT,The valuable Authors vec_map,https://github.com/contain-rs/vec-map,MIT OR Apache-2.0,"Alex Crichton , Jorge Aparicio , Alexis Beingessner , Brian Anderson <>, tbu- <>, Manish Goregaokar <>, Aaron Turon , Adolfo Ochagavía <>, Niko Matsakis <>, Steven Fackler <>, Chase Southwood , Eduard Burtescu <>, Florian Wilkens <>, Félix Raimundo <>, Tibor Benke <>, Markus Siemens , Josh Branchaud , Huon Wilson , Corey Farwell , Aaron Liblong <>, Nick Cameron , Patrick Walton , Felix S Klock II <>, Andrew Paseltiner , Sean McArthur , Vadim Petrochenkov <>" void,https://github.com/reem/rust-void,MIT,Jonathan Reem vrl,https://github.com/vectordotdev/vrl,MPL-2.0,Vector Contributors -vsimd,https://github.com/Nugine/simd,MIT,The vsimd Authors vte,https://github.com/alacritty/vte,Apache-2.0 OR MIT,"Joe Wilm , Christian Duerr " vte_generate_state_changes,https://github.com/jwilm/vte,Apache-2.0 OR MIT,Christian Duerr wait-timeout,https://github.com/alexcrichton/wait-timeout,MIT OR Apache-2.0,Alex Crichton @@ -613,6 +622,7 @@ wyz,https://github.com/myrrlyn/wyz,MIT,myrrlyn xml-rs,https://github.com/kornelski/xml-rs,MIT,Vladimir Matveev xmlparser,https://github.com/RazrFalcon/xmlparser,MIT OR Apache-2.0,Evgeniy Reizner yaml-rust,https://github.com/chyh1990/yaml-rust,MIT OR Apache-2.0,Yuheng Chen +yansi,https://github.com/SergioBenitez/yansi,MIT OR Apache-2.0,Sergio Benitez zerocopy,https://fuchsia.googlesource.com/fuchsia/+/HEAD/src/lib/zerocopy,BSD-2-Clause,Joshua Liebow-Feeser zerocopy-derive,https://github.com/google/zerocopy,BSD-2-Clause,Joshua Liebow-Feeser zeroize,https://github.com/RustCrypto/utils/tree/master/zeroize,Apache-2.0 OR MIT,The RustCrypto Project Developers diff --git a/src/aws/auth.rs b/src/aws/auth.rs index 899ca39087bd9..f95256fcf69a1 100644 --- a/src/aws/auth.rs +++ b/src/aws/auth.rs @@ -9,7 +9,10 @@ use aws_config::{ }, sts::AssumeRoleProviderBuilder, }; -use aws_types::{credentials::SharedCredentialsProvider, region::Region, Credentials}; +use aws_credential_types::{ + cache::CredentialsCache, provider::SharedCredentialsProvider, Credentials, +}; +use aws_types::region::Region; use serde_with::serde_as; use vector_common::sensitive_string::SensitiveString; use vector_config::configurable_component; @@ -165,6 +168,28 @@ fn default_profile() -> String { } impl AwsAuthentication { + pub async fn credentials_cache(&self) -> crate::Result { + match self { + AwsAuthentication::Role { + load_timeout_secs, .. + } + | AwsAuthentication::Default { + load_timeout_secs, .. + } => { + let credentials_cache = CredentialsCache::lazy_builder() + .load_timeout( + load_timeout_secs + .map(Duration::from_secs) + .unwrap_or(DEFAULT_LOAD_TIMEOUT), + ) + .into_credentials_cache(); + + Ok(credentials_cache) + } + _ => Ok(CredentialsCache::lazy()), + } + } + pub async fn credentials_provider( &self, service_region: Region, @@ -207,28 +232,20 @@ impl AwsAuthentication { } AwsAuthentication::Role { assume_role, - load_timeout_secs, imds, region, + .. } => { let auth_region = region.clone().map(Region::new).unwrap_or(service_region); let provider = AssumeRoleProviderBuilder::new(assume_role) .region(auth_region.clone()) - .build( - default_credentials_provider(auth_region, *load_timeout_secs, *imds) - .await?, - ); + .build(default_credentials_provider(auth_region, *imds).await?); Ok(SharedCredentialsProvider::new(provider)) } - AwsAuthentication::Default { - load_timeout_secs, - imds, - region, - } => Ok(SharedCredentialsProvider::new( + AwsAuthentication::Default { imds, region, .. } => Ok(SharedCredentialsProvider::new( default_credentials_provider( region.clone().map(Region::new).unwrap_or(service_region), - *load_timeout_secs, *imds, ) .await?, @@ -249,7 +266,6 @@ impl AwsAuthentication { async fn default_credentials_provider( region: Region, - load_timeout_secs: Option, imds: ImdsAuthentication, ) -> crate::Result { let client = imds::Client::builder() @@ -259,16 +275,13 @@ async fn default_credentials_provider( .build() .await?; - let chain = DefaultCredentialsChain::builder() + let credentials_provider = DefaultCredentialsChain::builder() .region(region) .imds_client(client) - .load_timeout( - load_timeout_secs - .map(Duration::from_secs) - .unwrap_or(DEFAULT_LOAD_TIMEOUT), - ); + .build() + .await; - Ok(SharedCredentialsProvider::new(chain.build().await)) + Ok(SharedCredentialsProvider::new(credentials_provider)) } #[cfg(test)] diff --git a/src/aws/mod.rs b/src/aws/mod.rs index fcd2c1c05f117..ec0351f82fcc1 100644 --- a/src/aws/mod.rs +++ b/src/aws/mod.rs @@ -2,6 +2,7 @@ pub mod auth; pub mod region; +use std::error::Error; use std::future::Future; use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -11,22 +12,23 @@ use std::time::SystemTime; pub use auth::{AwsAuthentication, ImdsAuthentication}; use aws_config::meta::region::ProvideRegion; +use aws_credential_types::provider::{ProvideCredentials, SharedCredentialsProvider}; use aws_sigv4::http_request::{SignableRequest, SigningSettings}; use aws_sigv4::SigningParams; use aws_smithy_async::rt::sleep::TokioSleep; use aws_smithy_client::bounds::SmithyMiddleware; use aws_smithy_client::erase::{DynConnector, DynMiddleware}; use aws_smithy_client::{Builder, SdkError}; -use aws_smithy_http::callback::BodyCallback; -use aws_smithy_http::endpoint::Endpoint; -use aws_smithy_http::event_stream::BoxError; +use aws_smithy_http::body::{BoxBody, SdkBody}; use aws_smithy_http::operation::{Request, Response}; use aws_smithy_types::retry::RetryConfig; -use aws_types::credentials::{ProvideCredentials, SharedCredentialsProvider}; use aws_types::region::Region; use aws_types::SdkConfig; use bytes::Bytes; +use http::HeaderMap; +use http_body::Body; use once_cell::sync::OnceCell; +use pin_project::pin_project; use regex::RegexSet; pub use region::RegionOrEndpoint; use tower::{Layer, Service, ServiceBuilder}; @@ -42,41 +44,48 @@ pub fn is_retriable_error(error: &SdkError) -> bool { match error { SdkError::TimeoutError(_) | SdkError::DispatchFailure(_) => true, SdkError::ConstructionFailure(_) => false, - SdkError::ResponseError { err: _, raw } | SdkError::ServiceError { err: _, raw } => { - // This header is a direct indication that we should retry the request. Eventually it'd - // be nice to actually schedule the retry after the given delay, but for now we just - // check that it contains a positive value. - let retry_header = raw.http().headers().get("x-amz-retry-after").is_some(); - - // Certain 400-level responses will contain an error code indicating that the request - // should be retried. Since we don't retry 400-level responses by default, we'll look - // for these specifically before falling back to more general heuristics. Because AWS - // services use a mix of XML and JSON response bodies and the AWS SDK doesn't give us - // a parsed representation, we resort to a simple string match. - // - // S3: RequestTimeout - // SQS: RequestExpired, ThrottlingException - // ECS: RequestExpired, ThrottlingException - // Kinesis: RequestExpired, ThrottlingException - // Cloudwatch: RequestExpired, ThrottlingException - // - // Now just look for those when it's a client_error - let re = RETRIABLE_CODES.get_or_init(|| { - RegexSet::new(["RequestTimeout", "RequestExpired", "ThrottlingException"]) - .expect("invalid regex") - }); - - let status = raw.http().status(); - let response_body = String::from_utf8_lossy(raw.http().body().bytes().unwrap_or(&[])); - - retry_header - || status.is_server_error() - || status == http::StatusCode::TOO_MANY_REQUESTS - || (status.is_client_error() && re.is_match(response_body.as_ref())) + SdkError::ResponseError(err) => check_response(err.raw()), + SdkError::ServiceError(err) => check_response(err.raw()), + _ => { + warn!("AWS returned unknown error, retrying request."); + true } } } +fn check_response(res: &Response) -> bool { + // This header is a direct indication that we should retry the request. Eventually it'd + // be nice to actually schedule the retry after the given delay, but for now we just + // check that it contains a positive value. + let retry_header = res.http().headers().get("x-amz-retry-after").is_some(); + + // Certain 400-level responses will contain an error code indicating that the request + // should be retried. Since we don't retry 400-level responses by default, we'll look + // for these specifically before falling back to more general heuristics. Because AWS + // services use a mix of XML and JSON response bodies and the AWS SDK doesn't give us + // a parsed representation, we resort to a simple string match. + // + // S3: RequestTimeout + // SQS: RequestExpired, ThrottlingException + // ECS: RequestExpired, ThrottlingException + // Kinesis: RequestExpired, ThrottlingException + // Cloudwatch: RequestExpired, ThrottlingException + // + // Now just look for those when it's a client_error + let re = RETRIABLE_CODES.get_or_init(|| { + RegexSet::new(["RequestTimeout", "RequestExpired", "ThrottlingException"]) + .expect("invalid regex") + }); + + let status = res.http().status(); + let response_body = String::from_utf8_lossy(res.http().body().bytes().unwrap_or(&[])); + + retry_header + || status.is_server_error() + || status == http::StatusCode::TOO_MANY_REQUESTS + || (status.is_client_error() && re.is_match(response_body.as_ref())) +} + pub trait ClientBuilder { type Config; type Client; @@ -84,7 +93,7 @@ pub trait ClientBuilder { fn default_middleware() -> Self::DefaultMiddleware; - fn build(client: aws_smithy_client::Client, config: &aws_types::SdkConfig) -> Self::Client; + fn build(client: aws_smithy_client::Client, config: &SdkConfig) -> Self::Client; } pub async fn create_smithy_client( @@ -99,11 +108,11 @@ pub async fn create_smithy_client( let connector = if proxy.enabled { let proxy = build_proxy_connector(tls_settings, proxy)?; let hyper_client = aws_smithy_client::hyper_ext::Adapter::builder().build(proxy); - aws_smithy_client::erase::DynConnector::new(hyper_client) + DynConnector::new(hyper_client) } else { let tls_connector = build_tls_connector(tls_settings)?; let hyper_client = aws_smithy_client::hyper_ext::Adapter::builder().build(tls_connector); - aws_smithy_client::erase::DynConnector::new(hyper_client) + DynConnector::new(hyper_client) }; let middleware_builder = ServiceBuilder::new() @@ -135,7 +144,7 @@ pub async fn resolve_region(region: Option) -> crate::Result { pub async fn create_client( auth: &AwsAuthentication, region: Option, - endpoint: Option, + endpoint: Option, proxy: &ProxyConfig, tls_options: &Option, is_sink: bool, @@ -148,12 +157,13 @@ pub async fn create_client( // Build the configuration first. let mut config_builder = SdkConfig::builder() + .credentials_cache(auth.credentials_cache().await?) .credentials_provider(auth.credentials_provider(region.clone()).await?) .region(region.clone()) .retry_config(retry_config.clone()); if let Some(endpoint_override) = endpoint { - config_builder = config_builder.endpoint_resolver(endpoint_override); + config_builder = config_builder.endpoint_url(endpoint_override); } let config = config_builder.build(); @@ -230,23 +240,36 @@ where { type Response = S::Response; type Error = S::Error; - type Future = - Pin> + Send>>; + type Future = Pin> + Send>>; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner.poll_ready(cx) } - fn call(&mut self, mut req: Request) -> Self::Future { + fn call(&mut self, req: Request) -> Self::Future { // Attach a body callback that will capture the bytes sent by interrogating the body chunks that get read as it // sends the request out over the wire. We'll read the shared atomic counter, which will contain the number of // bytes "read", aka the bytes it actually sent, if and only if we get back a successful response. - let maybe_bytes_sent = self.enabled.then(|| { - let (callback, shared_bytes_sent) = BodyCaptureCallback::new(); - req.http_mut().body_mut().with_callback(Box::new(callback)); + let (req, maybe_bytes_sent) = if self.enabled { + let shared_bytes_sent = Arc::new(AtomicUsize::new(0)); + let (request, properties) = req.into_parts(); + let (parts, body) = request.into_parts(); + + let body = { + let shared_bytes_sent = Arc::clone(&shared_bytes_sent); + + body.map_immutable(move |body| { + let body = MeasuredBody::new(body, Arc::clone(&shared_bytes_sent)); + SdkBody::from_dyn(BoxBody::new(body)) + }) + }; - shared_bytes_sent - }); + let req = Request::from_parts(http::Request::from_parts(parts, body), properties); + + (req, Some(shared_bytes_sent)) + } else { + (req, None) + }; let region = self.region.clone(); let fut = self.inner.call(req); @@ -275,69 +298,48 @@ where } } -struct BodyCaptureCallback { - bytes_sent: usize, +#[pin_project] +struct MeasuredBody { + #[pin] + inner: SdkBody, shared_bytes_sent: Arc, } -impl BodyCaptureCallback { - fn new() -> (Self, Arc) { - let shared_bytes_sent = Arc::new(AtomicUsize::new(0)); - - ( - Self { - bytes_sent: 0, - shared_bytes_sent: Arc::clone(&shared_bytes_sent), - }, +impl MeasuredBody { + fn new(body: SdkBody, shared_bytes_sent: Arc) -> Self { + Self { + inner: body, shared_bytes_sent, - ) + } } } -impl BodyCallback for BodyCaptureCallback { - fn update(&mut self, bytes: &[u8]) -> Result<(), BoxError> { - // This gets called every time a chunk is read from the request body, which includes both static chunks and - // streaming bodies. Just add the chunk's length to our running tally. - self.bytes_sent += bytes.len(); - Ok(()) - } - - fn trailers(&self) -> Result>, BoxError> { - Ok(None) - } - - fn make_new(&self) -> Box { - // We technically don't use retries within the AWS side of the API clients, but we have to satisfy this trait - // method, because `aws_smithy_http` uses the retry layer from `tower`, which clones the request regardless - // before it even executes the first attempt... so there's no reason not to make it technically correct. - Box::new(Self { - bytes_sent: 0, - shared_bytes_sent: Arc::clone(&self.shared_bytes_sent), - }) +impl Body for MeasuredBody { + type Data = Bytes; + type Error = Box; + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + let this = self.project(); + + match this.inner.poll_data(cx) { + Poll::Ready(Some(Ok(data))) => { + this.shared_bytes_sent + .fetch_add(data.len(), Ordering::Release); + Poll::Ready(Some(Ok(data))) + } + Poll::Ready(None) => Poll::Ready(None), + Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))), + Poll::Pending => Poll::Pending, + } } -} -impl Drop for BodyCaptureCallback { - fn drop(&mut self) { - // This is where we actually emit. We specifically emit here, and not in `trailers`, because despite the - // documentation that `trailers` is called after all chunks of the body are successfully read, `hyper` won't - // continue polling a body if it knows it's gotten all the available bytes i.e. it doesn't necessarily drive it - // until `poll_data` returns `None`. This means the only consistent place to know that the body is "done" is - // when it's dropped. - // - // We update our shared atomic counter with the total bytes sent that we accumulated, and it will read the - // atomic if the response indicates that the request was successful. Since we know the body will go out-of-scope - // before a response can possibly be generated, we know the atomic will in turn be updated before it is read. - // - // This design also copes with the fact that, technically, `aws_smithy_client` supports retries and could clone - // this callback for each copy of the request... which it already does at least once per request since the retry - // middleware has to clone the request before trying it. As requests are retried sequentially, only after the - // previous attempt failed, we know that we'll end up in a "last write wins" scenario, so this is still sound. - // - // In the future, we may track every single byte sent in order to generate "raw bytes over the wire, regardless - // of status" metrics, but right now, this is purely "how many bytes have we sent as part of _successful_ - // sends?" - self.shared_bytes_sent - .store(self.bytes_sent, Ordering::Release); + fn poll_trailers( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + Poll::Ready(Ok(None)) } } diff --git a/src/aws/region.rs b/src/aws/region.rs index fa8a45c6f38a0..b9db93a56bf5f 100644 --- a/src/aws/region.rs +++ b/src/aws/region.rs @@ -1,8 +1,4 @@ -use std::str::FromStr; - -use aws_smithy_http::endpoint::Endpoint; use aws_types::region::Region; -use http::Uri; use vector_config::configurable_component; /// Configuration of the region/endpoint to use when interacting with an AWS service. @@ -37,9 +33,8 @@ impl RegionOrEndpoint { } } - pub fn endpoint(&self) -> crate::Result> { - let uri = self.endpoint.as_deref().map(Uri::from_str).transpose()?; - Ok(uri.map(Endpoint::immutable)) + pub fn endpoint(&self) -> Option { + self.endpoint.clone() } pub fn region(&self) -> Option { diff --git a/src/common/s3.rs b/src/common/s3.rs index 4376908f9c6ea..c8eb80572a412 100644 --- a/src/common/s3.rs +++ b/src/common/s3.rs @@ -12,6 +12,10 @@ impl ClientBuilder for S3ClientBuilder { } fn build(client: aws_smithy_client::Client, config: &aws_types::SdkConfig) -> Self::Client { - aws_sdk_s3::client::Client::with_config(client, config.into()) + let config = aws_sdk_s3::config::Builder::from(config) + .force_path_style(true) + .build(); + + aws_sdk_s3::client::Client::with_config(client, config) } } diff --git a/src/sinks/aws_cloudwatch_logs/config.rs b/src/sinks/aws_cloudwatch_logs/config.rs index a03811e4612ba..0be371f8b67cd 100644 --- a/src/sinks/aws_cloudwatch_logs/config.rs +++ b/src/sinks/aws_cloudwatch_logs/config.rs @@ -139,7 +139,7 @@ impl CloudwatchLogsSinkConfig { create_client::( &self.auth, self.region.region(), - self.region.endpoint()?, + self.region.endpoint(), proxy, &self.tls, true, diff --git a/src/sinks/aws_cloudwatch_logs/integration_tests.rs b/src/sinks/aws_cloudwatch_logs/integration_tests.rs index a8b85e8428504..ece20f2fa1311 100644 --- a/src/sinks/aws_cloudwatch_logs/integration_tests.rs +++ b/src/sinks/aws_cloudwatch_logs/integration_tests.rs @@ -1,12 +1,10 @@ use std::convert::TryFrom; -use std::str::FromStr; use aws_sdk_cloudwatchlogs::Client as CloudwatchLogsClient; -use aws_sdk_cloudwatchlogs::{Endpoint, Region}; +use aws_sdk_cloudwatchlogs::Region; use chrono::Duration; use codecs::TextSerializerConfig; use futures::{stream, StreamExt}; -use http::Uri; use similar_asserts::assert_eq; use super::*; @@ -461,10 +459,7 @@ async fn cloudwatch_healthcheck() { async fn create_client_test() -> CloudwatchLogsClient { let auth = AwsAuthentication::test_auth(); let region = Some(Region::new("localstack")); - let watchlogs_address = watchlogs_address(); - let endpoint = Some(Endpoint::immutable( - Uri::from_str(&watchlogs_address).unwrap(), - )); + let endpoint = Some(watchlogs_address()); let proxy = ProxyConfig::default(); create_client::(&auth, region, endpoint, &proxy, &None, true) diff --git a/src/sinks/aws_cloudwatch_logs/request.rs b/src/sinks/aws_cloudwatch_logs/request.rs index 6e7ff7397a620..bd0942e89789a 100644 --- a/src/sinks/aws_cloudwatch_logs/request.rs +++ b/src/sinks/aws_cloudwatch_logs/request.rs @@ -14,8 +14,9 @@ use aws_sdk_cloudwatchlogs::model::InputLogEvent; use aws_sdk_cloudwatchlogs::output::{DescribeLogStreamsOutput, PutLogEventsOutput}; use aws_sdk_cloudwatchlogs::types::SdkError; use aws_sdk_cloudwatchlogs::Client as CloudwatchLogsClient; -use aws_smithy_http::operation::{Operation, Request}; use futures::{future::BoxFuture, FutureExt}; +use http::header::HeaderName; +use http::HeaderValue; use indexmap::IndexMap; use tokio::sync::oneshot; @@ -101,9 +102,9 @@ impl Future for CloudwatchFuture { let response = match ready!(fut.poll_unpin(cx)) { Ok(response) => response, Err(err) => { - if let SdkError::ServiceError { err, raw: _ } = &err { + if let SdkError::ServiceError(inner) = &err { if let DescribeLogStreamsErrorKind::ResourceNotFoundException(_) = - err.kind + inner.err().kind { if self.create_missing_group { info!("Log group provided does not exist; creating a new one."); @@ -148,8 +149,8 @@ impl Future for CloudwatchFuture { Ok(_) => {} Err(err) => { let resource_already_exists = match &err { - SdkError::ServiceError { err, raw: _ } => matches!( - err.kind, + SdkError::ServiceError(inner) => matches!( + inner.err().kind, CreateLogGroupErrorKind::ResourceAlreadyExistsException(_) ), _ => false, @@ -173,8 +174,8 @@ impl Future for CloudwatchFuture { Ok(_) => {} Err(err) => { let resource_already_exists = match &err { - SdkError::ServiceError { err, raw: _ } => matches!( - err.kind, + SdkError::ServiceError(inner) => matches!( + inner.err().kind, CreateLogStreamErrorKind::ResourceAlreadyExistsException(_) ), _ => false, @@ -227,39 +228,34 @@ impl Client { let group_name = self.group_name.clone(); let stream_name = self.stream_name.clone(); let headers = self.headers.clone(); + Box::pin(async move { // #12760 this is a relatively convoluted way of changing the headers of a request // about to be sent. https://github.com/awslabs/aws-sdk-rust/issues/537 should // eventually make this better. - let op = PutLogEvents::builder() + let mut op = PutLogEvents::builder() .set_log_events(Some(log_events)) .set_sequence_token(sequence_token) .log_group_name(group_name) .log_stream_name(stream_name) .build() - .map_err(|err| SdkError::ConstructionFailure(err.into()))? + .map_err(SdkError::construction_failure)? .make_operation(cw_client.conf()) .await - .map_err(|err| SdkError::ConstructionFailure(err.into()))?; + .map_err(SdkError::construction_failure)?; - let (req, parts) = op.into_request_response(); - let (mut body, props) = req.into_parts(); for (header, value) in headers.iter() { let owned_header = header.clone(); let owned_value = value.clone(); - body.headers_mut().insert( - http::header::HeaderName::from_bytes(owned_header.as_bytes()) - .map_err(|err| SdkError::ConstructionFailure(err.into()))?, - http::HeaderValue::from_str(owned_value.as_str()) - .map_err(|err| SdkError::ConstructionFailure(err.into()))?, + op.request_mut().headers_mut().insert( + HeaderName::from_bytes(owned_header.as_bytes()) + .map_err(SdkError::construction_failure)?, + HeaderValue::from_str(owned_value.as_str()) + .map_err(SdkError::construction_failure)?, ); } - client - .call(Operation::from_parts( - Request::from_parts(body, props), - parts, - )) - .await + + client.call(op).await }) } diff --git a/src/sinks/aws_cloudwatch_logs/retry.rs b/src/sinks/aws_cloudwatch_logs/retry.rs index f3a03e48645e4..c089f532f5dd0 100644 --- a/src/sinks/aws_cloudwatch_logs/retry.rs +++ b/src/sinks/aws_cloudwatch_logs/retry.rs @@ -32,11 +32,13 @@ impl RetryLogic for CloudwatchRetryLogic { type Error = CloudwatchError; type Response = T; + // TODO this match may not be necessary given the logic in `is_retriable_error()` #[allow(clippy::cognitive_complexity)] // long, but just a hair over our limit fn is_retriable_error(&self, error: &Self::Error) -> bool { match error { CloudwatchError::Put(err) => { - if let SdkError::ServiceError { err, raw: _ } = err { + if let SdkError::ServiceError(inner) = err { + let err = inner.err(); if let PutLogEventsErrorKind::ServiceUnavailableException(_) = err.kind { return true; } @@ -44,7 +46,8 @@ impl RetryLogic for CloudwatchRetryLogic { is_retriable_error(err) } CloudwatchError::Describe(err) => { - if let SdkError::ServiceError { err, raw: _ } = err { + if let SdkError::ServiceError(inner) = err { + let err = inner.err(); if let DescribeLogStreamsErrorKind::ServiceUnavailableException(_) = err.kind { return true; } @@ -52,7 +55,8 @@ impl RetryLogic for CloudwatchRetryLogic { is_retriable_error(err) } CloudwatchError::CreateStream(err) => { - if let SdkError::ServiceError { err, raw: _ } = err { + if let SdkError::ServiceError(inner) = err { + let err = inner.err(); if let CreateLogStreamErrorKind::ServiceUnavailableException(_) = err.kind { return true; } @@ -66,7 +70,7 @@ impl RetryLogic for CloudwatchRetryLogic { #[cfg(test)] mod test { - use aws_sdk_cloudwatchlogs::error::{PutLogEventsError, PutLogEventsErrorKind}; + use aws_sdk_cloudwatchlogs::error::PutLogEventsError; use aws_sdk_cloudwatchlogs::types::SdkError; use aws_smithy_http::body::SdkBody; use aws_smithy_http::operation::Response; @@ -89,13 +93,10 @@ mod test { *http_response.status_mut() = http::StatusCode::BAD_REQUEST; let raw = Response::new(http_response); - let err = CloudwatchError::Put(SdkError::ServiceError { - err: PutLogEventsError::new( - PutLogEventsErrorKind::Unhandled(Box::new(meta_err.clone())), - meta_err, - ), + let err = CloudwatchError::Put(SdkError::service_error( + PutLogEventsError::unhandled(meta_err), raw, - }); + )); assert!(retry_logic.is_retriable_error(&err)); } } diff --git a/src/sinks/aws_cloudwatch_metrics/mod.rs b/src/sinks/aws_cloudwatch_metrics/mod.rs index a1f22ca212e53..9a507d91a3795 100644 --- a/src/sinks/aws_cloudwatch_metrics/mod.rs +++ b/src/sinks/aws_cloudwatch_metrics/mod.rs @@ -172,7 +172,7 @@ impl CloudWatchMetricsSinkConfig { create_client::( &self.auth, region, - self.region.endpoint()?, + self.region.endpoint(), proxy, &self.tls, true, diff --git a/src/sinks/aws_kinesis/firehose/config.rs b/src/sinks/aws_kinesis/firehose/config.rs index d5bbd93f06c78..ed601bf488bd1 100644 --- a/src/sinks/aws_kinesis/firehose/config.rs +++ b/src/sinks/aws_kinesis/firehose/config.rs @@ -112,7 +112,7 @@ impl KinesisFirehoseSinkConfig { create_client::( &self.base.auth, self.base.region.region(), - self.base.region.endpoint()?, + self.base.region.endpoint(), proxy, &self.base.tls, true, @@ -183,8 +183,8 @@ impl RetryLogic for KinesisRetryLogic { type Response = KinesisResponse; fn is_retriable_error(&self, error: &Self::Error) -> bool { - if let SdkError::ServiceError { err, raw: _ } = error { - if let PutRecordBatchErrorKind::ServiceUnavailableException(_) = err.kind { + if let SdkError::ServiceError(inner) = error { + if let PutRecordBatchErrorKind::ServiceUnavailableException(_) = inner.err().kind { return true; } } diff --git a/src/sinks/aws_kinesis/firehose/integration_tests.rs b/src/sinks/aws_kinesis/firehose/integration_tests.rs index 9a4b903811ba7..42d72009cace6 100644 --- a/src/sinks/aws_kinesis/firehose/integration_tests.rs +++ b/src/sinks/aws_kinesis/firehose/integration_tests.rs @@ -139,7 +139,7 @@ async fn firehose_client() -> aws_sdk_firehose::Client { create_client::( &auth, region_endpoint.region(), - region_endpoint.endpoint().unwrap(), + region_endpoint.endpoint(), &proxy, &None, true, @@ -158,7 +158,7 @@ async fn ensure_elasticsearch_domain(domain_name: String) -> String { .await .unwrap(), ) - .endpoint_resolver(test_region_endpoint().endpoint().unwrap().unwrap()) + .endpoint_url(test_region_endpoint().endpoint().unwrap()) .region(test_region_endpoint().region()) .build(), ); diff --git a/src/sinks/aws_kinesis/streams/config.rs b/src/sinks/aws_kinesis/streams/config.rs index 8949f90d40352..4ebcf2a724e64 100644 --- a/src/sinks/aws_kinesis/streams/config.rs +++ b/src/sinks/aws_kinesis/streams/config.rs @@ -119,7 +119,7 @@ impl KinesisStreamsSinkConfig { create_client::( &self.base.auth, self.base.region.region(), - self.base.region.endpoint()?, + self.base.region.endpoint(), proxy, &self.base.tls, true, @@ -190,14 +190,15 @@ impl RetryLogic for KinesisRetryLogic { type Response = KinesisResponse; fn is_retriable_error(&self, error: &Self::Error) -> bool { - if let SdkError::ServiceError { err, raw: _ } = error { + if let SdkError::ServiceError(inner) = error { // Note that if the request partially fails (records sent to one // partition fail but the others do not, for example), Vector // does not retry. This line only covers a failure for the entire // request. // // https://github.com/vectordotdev/vector/issues/359 - if let PutRecordsErrorKind::ProvisionedThroughputExceededException(_) = err.kind { + if let PutRecordsErrorKind::ProvisionedThroughputExceededException(_) = inner.err().kind + { return true; } } diff --git a/src/sinks/aws_kinesis/streams/integration_tests.rs b/src/sinks/aws_kinesis/streams/integration_tests.rs index 8793aa520c024..6f25a733d7d08 100644 --- a/src/sinks/aws_kinesis/streams/integration_tests.rs +++ b/src/sinks/aws_kinesis/streams/integration_tests.rs @@ -175,7 +175,7 @@ async fn client() -> aws_sdk_kinesis::Client { create_client::( &auth, region.region(), - region.endpoint().unwrap(), + region.endpoint(), &proxy, &None, true, diff --git a/src/sinks/aws_s3/integration_tests.rs b/src/sinks/aws_s3/integration_tests.rs index 4b5c082b7aa91..e9ae49a4db181 100644 --- a/src/sinks/aws_s3/integration_tests.rs +++ b/src/sinks/aws_s3/integration_tests.rs @@ -488,7 +488,7 @@ async fn client() -> S3Client { create_client::( &auth, region.region(), - region.endpoint().unwrap(), + region.endpoint(), &proxy, &tls_options, true, @@ -545,7 +545,7 @@ async fn create_bucket(bucket: &str, object_lock_enabled: bool) { { Ok(_) => {} Err(err) => match err { - SdkError::ServiceError { err, raw: _ } => match err.kind { + SdkError::ServiceError(inner) => match &inner.err().kind { CreateBucketErrorKind::BucketAlreadyOwnedByYou(_) => {} err => panic!("Failed to create bucket: {:?}", err), }, diff --git a/src/sinks/aws_sqs/config.rs b/src/sinks/aws_sqs/config.rs index 45579f52925bc..984f40aa7bf40 100644 --- a/src/sinks/aws_sqs/config.rs +++ b/src/sinks/aws_sqs/config.rs @@ -153,7 +153,7 @@ impl SqsSinkConfig { create_client::( &self.auth, self.region.region(), - self.region.endpoint()?, + self.region.endpoint(), proxy, &self.tls, true, diff --git a/src/sinks/aws_sqs/integration_tests.rs b/src/sinks/aws_sqs/integration_tests.rs index 7726844f2e073..e8d37f1266c79 100644 --- a/src/sinks/aws_sqs/integration_tests.rs +++ b/src/sinks/aws_sqs/integration_tests.rs @@ -1,11 +1,9 @@ #![cfg(all(test, feature = "aws-sqs-integration-tests"))] use std::collections::HashMap; -use std::str::FromStr; -use aws_sdk_sqs::{model::QueueAttributeName, Client as SqsClient, Endpoint, Region}; +use aws_sdk_sqs::{model::QueueAttributeName, Client as SqsClient, Region}; use codecs::TextSerializerConfig; -use http::Uri; use tokio::time::{sleep, Duration}; use super::{config::SqsSinkConfig, sink::SqsSink}; @@ -32,7 +30,7 @@ async fn create_test_client() -> SqsClient { create_client::( &auth, Some(Region::new("localstack")), - Some(Endpoint::immutable(Uri::from_str(&endpoint).unwrap())), + Some(endpoint), &proxy, &None, true, diff --git a/src/sinks/elasticsearch/common.rs b/src/sinks/elasticsearch/common.rs index c4da722838543..f1a90ac4c9576 100644 --- a/src/sinks/elasticsearch/common.rs +++ b/src/sinks/elasticsearch/common.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use aws_types::credentials::SharedCredentialsProvider; +use aws_credential_types::provider::SharedCredentialsProvider; use aws_types::region::Region; use bytes::{Buf, Bytes}; use http::{Response, StatusCode, Uri}; diff --git a/src/sinks/elasticsearch/service.rs b/src/sinks/elasticsearch/service.rs index 1baea684c07b2..bdf0824915de6 100644 --- a/src/sinks/elasticsearch/service.rs +++ b/src/sinks/elasticsearch/service.rs @@ -4,7 +4,7 @@ use std::{ task::{Context, Poll}, }; -use aws_types::credentials::SharedCredentialsProvider; +use aws_credential_types::provider::SharedCredentialsProvider; use aws_types::region::Region; use bytes::Bytes; use futures::future::BoxFuture; diff --git a/src/sinks/prometheus/remote_write.rs b/src/sinks/prometheus/remote_write.rs index 28c043080bc0a..1418cc2627924 100644 --- a/src/sinks/prometheus/remote_write.rs +++ b/src/sinks/prometheus/remote_write.rs @@ -2,7 +2,7 @@ use std::io::Read; use std::sync::Arc; use std::task; -use aws_types::credentials::SharedCredentialsProvider; +use aws_credential_types::provider::SharedCredentialsProvider; use aws_types::region::Region; use bytes::{Bytes, BytesMut}; use futures::{future::BoxFuture, stream, FutureExt, SinkExt}; diff --git a/src/sinks/s3_common/config.rs b/src/sinks/s3_common/config.rs index 9781af37a87ee..82f739f0605fa 100644 --- a/src/sinks/s3_common/config.rs +++ b/src/sinks/s3_common/config.rs @@ -299,9 +299,7 @@ impl From for ObjectCannedAcl { S3CannedAcl::AuthenticatedRead => ObjectCannedAcl::AuthenticatedRead, S3CannedAcl::BucketOwnerRead => ObjectCannedAcl::BucketOwnerRead, S3CannedAcl::BucketOwnerFullControl => ObjectCannedAcl::BucketOwnerFullControl, - S3CannedAcl::LogDeliveryWrite => { - ObjectCannedAcl::Unknown("log-delivery-write".to_string()) - } + S3CannedAcl::LogDeliveryWrite => ObjectCannedAcl::from("log-delivery-write"), } } } @@ -340,7 +338,7 @@ pub fn build_healthcheck(bucket: String, client: S3Client) -> crate::Result Ok(()), Err(error) => Err(match error { - SdkError::ServiceError { err: _, raw } => match raw.http().status() { + SdkError::ServiceError(inner) => match inner.into_raw().http().status() { StatusCode::FORBIDDEN => HealthcheckError::InvalidCredentials.into(), StatusCode::NOT_FOUND => HealthcheckError::UnknownBucket { bucket }.into(), status => HealthcheckError::UnknownStatus { status }.into(), @@ -359,7 +357,7 @@ pub async fn create_service( proxy: &ProxyConfig, tls_options: &Option, ) -> crate::Result { - let endpoint = region.endpoint()?; + let endpoint = region.endpoint(); let region = region.region(); let client = create_client::(auth, region.clone(), endpoint, proxy, tls_options, true) diff --git a/src/sources/aws_s3/mod.rs b/src/sources/aws_s3/mod.rs index 02a6cbcf6a9b5..0890a3869467a 100644 --- a/src/sources/aws_s3/mod.rs +++ b/src/sources/aws_s3/mod.rs @@ -228,10 +228,7 @@ impl AwsS3Config { .region() .ok_or(CreateSqsIngestorError::RegionMissing)?; - let endpoint = self - .region - .endpoint() - .map_err(|_| CreateSqsIngestorError::InvalidEndpoint)?; + let endpoint = self.region.endpoint(); let s3_client = create_client::( &self.auth, @@ -925,7 +922,7 @@ mod integration_tests { create_client::( &auth, region_endpoint.region(), - region_endpoint.endpoint().unwrap(), + region_endpoint.endpoint(), &proxy_config, &None, false, @@ -944,7 +941,7 @@ mod integration_tests { create_client::( &auth, region_endpoint.region(), - region_endpoint.endpoint().unwrap(), + region_endpoint.endpoint(), &proxy_config, &None, false, diff --git a/src/sources/aws_sqs/config.rs b/src/sources/aws_sqs/config.rs index 4f156cd47a3db..a4f8bbb7629c0 100644 --- a/src/sources/aws_sqs/config.rs +++ b/src/sources/aws_sqs/config.rs @@ -162,7 +162,7 @@ impl AwsSqsConfig { create_client::( &self.auth, self.region.region(), - self.region.endpoint()?, + self.region.endpoint(), &cx.proxy, &self.tls, false, diff --git a/src/sources/aws_sqs/integration_tests.rs b/src/sources/aws_sqs/integration_tests.rs index ee5d793840091..62ba72cbe63b0 100644 --- a/src/sources/aws_sqs/integration_tests.rs +++ b/src/sources/aws_sqs/integration_tests.rs @@ -1,12 +1,11 @@ #![cfg(feature = "aws-sqs-integration-tests")] #![cfg(test)] -use std::{collections::HashSet, str::FromStr, time::Duration}; +use std::{collections::HashSet, time::Duration}; -use aws_sdk_sqs::{output::CreateQueueOutput, Endpoint}; +use aws_sdk_sqs::output::CreateQueueOutput; use aws_types::region::Region; use futures::StreamExt; -use http::Uri; use tokio::time::timeout; use crate::{ @@ -58,9 +57,7 @@ async fn get_sqs_client() -> aws_sdk_sqs::Client { .await .unwrap(), ) - .endpoint_resolver(Endpoint::immutable( - Uri::from_str(sqs_address().as_str()).unwrap(), - )) + .endpoint_url(sqs_address()) .region(Some(Region::new("us-east-1"))) .build(); diff --git a/src/sources/aws_sqs/source.rs b/src/sources/aws_sqs/source.rs index 2d01647516e54..f05784ab4e5cb 100644 --- a/src/sources/aws_sqs/source.rs +++ b/src/sources/aws_sqs/source.rs @@ -110,7 +110,7 @@ impl SqsSource { .visibility_timeout(self.visibility_timeout_secs as i32) // I think this should be a known attribute // https://github.com/awslabs/aws-sdk-rust/issues/411 - .attribute_names(QueueAttributeName::Unknown(String::from("SentTimestamp"))) + .attribute_names(QueueAttributeName::from("SentTimestamp")) .send() .await; From dcf7f9ae538c821eb7b3baf494d3e8938083832c Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Mon, 26 Jun 2023 14:03:39 +0100 Subject: [PATCH 178/236] chore(observability): emit `component_sent` events by `source` and `service` (#17549) Closes #17580 Closes #17581 This is still in draft until I can get the following done. - [ ] ~~There are way more clones that I am happy with here, especially since this is in a hot path. These need reducing.~~ The remaining clones that I would like to remove are in the `get_tags` functions. This didn't seem trivial, and given the fairly positive regression numbers, I think it should be ok to defer for now. - [x] Function documentation. - [ ] Currently source schemas aren't being attached to the event at runtime, so the service meaning can't be retrieved. That won't work until this has been done. This will be a separate PR - #17692 - [x] I've only tested this with the kafka sink so far. I think it should work with all Stream sinks without needing any further modification - but further testing is needed. - [x] Tests. A bunch of tests need writing. - [x] The Vector source tests are failing I think because we now have `EventsSent` and `TaggedEventsSent` which both emit `component_sent_event` events and the test framework doesn't like this. This needs fixed. - [ ] We will need to review every sink to ensure they work with this. All the stream based sinks should, but the others are highly likely to need some work. --------- Signed-off-by: Stephen Wakely --- docs/tutorials/sinks/2_http_sink.md | 4 +- lib/vector-common/Cargo.toml | 2 +- .../src/internal_event/cached_event.rs | 69 ++++ .../src/internal_event/events_sent.rs | 65 +++- lib/vector-common/src/internal_event/mod.rs | 40 ++- .../src/internal_event/optional_tag.rs | 14 + lib/vector-common/src/request_metadata.rs | 312 +++++++++++++++++- lib/vector-core/src/config/global_options.rs | 15 + lib/vector-core/src/config/mod.rs | 2 + lib/vector-core/src/config/telemetry.rs | 93 ++++++ lib/vector-core/src/event/array.rs | 5 +- lib/vector-core/src/event/log_event.rs | 24 +- lib/vector-core/src/event/metadata.rs | 16 +- lib/vector-core/src/event/metric/mod.rs | 30 +- lib/vector-core/src/event/mod.rs | 26 +- lib/vector-core/src/event/trace.rs | 12 +- lib/vector-core/src/stream/driver.rs | 55 +-- src/app.rs | 2 + src/config/mod.rs | 2 +- src/sinks/amqp/request_builder.rs | 5 +- src/sinks/amqp/service.rs | 21 +- src/sinks/amqp/sink.rs | 19 -- .../aws_cloudwatch_logs/request_builder.rs | 10 +- src/sinks/aws_cloudwatch_logs/service.rs | 42 +-- src/sinks/aws_cloudwatch_logs/sink.rs | 13 +- src/sinks/aws_kinesis/firehose/record.rs | 3 +- src/sinks/aws_kinesis/request_builder.rs | 10 +- src/sinks/aws_kinesis/service.rs | 15 +- src/sinks/aws_kinesis/sink.rs | 15 +- src/sinks/aws_kinesis/streams/record.rs | 3 +- src/sinks/aws_sqs/request_builder.rs | 10 +- src/sinks/aws_sqs/service.rs | 17 +- src/sinks/azure_common/config.rs | 19 +- src/sinks/azure_common/service.rs | 5 +- src/sinks/databend/service.rs | 20 +- src/sinks/datadog/events/request_builder.rs | 10 +- src/sinks/datadog/events/service.rs | 15 +- src/sinks/datadog/logs/service.rs | 30 +- src/sinks/datadog/logs/tests.rs | 58 +++- src/sinks/datadog/metrics/service.rs | 30 +- src/sinks/datadog/traces/service.rs | 30 +- src/sinks/elasticsearch/encoder.rs | 11 +- src/sinks/elasticsearch/retry.rs | 6 +- src/sinks/elasticsearch/service.rs | 23 +- src/sinks/gcp/chronicle_unstructured.rs | 10 +- src/sinks/gcs_common/service.rs | 19 +- src/sinks/kafka/request_builder.rs | 2 +- src/sinks/kafka/service.rs | 20 +- src/sinks/kafka/tests.rs | 105 +++++- src/sinks/loki/event.rs | 12 +- src/sinks/loki/service.rs | 18 +- src/sinks/loki/sink.rs | 3 + src/sinks/new_relic/service.rs | 20 +- src/sinks/opendal_common.rs | 23 +- src/sinks/prelude.rs | 4 +- src/sinks/pulsar/request_builder.rs | 2 +- src/sinks/pulsar/service.rs | 17 +- src/sinks/s3_common/service.rs | 35 +- src/sinks/splunk_hec/common/request.rs | 8 +- src/sinks/splunk_hec/common/response.rs | 9 +- src/sinks/splunk_hec/common/service.rs | 7 +- src/sinks/statsd/service.rs | 18 +- src/sinks/util/metadata.rs | 63 +++- src/sinks/util/processed_event.rs | 14 +- src/sinks/vector/service.rs | 35 +- src/sources/kubernetes_logs/mod.rs | 5 +- src/sources/vector/mod.rs | 26 +- src/test_util/components.rs | 39 +++ src/topology/builder.rs | 5 +- src/topology/test/compliance.rs | 12 +- src/topology/test/mod.rs | 32 +- src/transforms/aggregate.rs | 4 +- src/transforms/dedupe.rs | 30 +- src/transforms/filter.rs | 3 +- src/transforms/log_to_metric.rs | 23 +- src/transforms/metric_to_log.rs | 13 +- src/transforms/tag_cardinality_limit/tests.rs | 24 +- 77 files changed, 1387 insertions(+), 501 deletions(-) create mode 100644 lib/vector-common/src/internal_event/cached_event.rs create mode 100644 lib/vector-common/src/internal_event/optional_tag.rs create mode 100644 lib/vector-core/src/config/telemetry.rs diff --git a/docs/tutorials/sinks/2_http_sink.md b/docs/tutorials/sinks/2_http_sink.md index 7090ef41a88d1..66fcb2e4d6f97 100644 --- a/docs/tutorials/sinks/2_http_sink.md +++ b/docs/tutorials/sinks/2_http_sink.md @@ -366,9 +366,9 @@ impl DriverResponse for BasicResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { + fn events_sent(&self) -> RequestCountByteSize { // (events count, byte size) - CountByteSize(1, self.byte_size) + CountByteSize(1, self.byte_size).into() } } ``` diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 1fde826696bad..0f38b78fdea30 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -46,7 +46,7 @@ bytes = { version = "1.4.0", default-features = false, optional = true } chrono-tz = { version = "0.8.2", default-features = false, features = ["serde"] } chrono = { version = "0.4", default-features = false, optional = true, features = ["clock"] } crossbeam-utils = { version = "0.8.16", default-features = false } -derivative = "2.1.3" +derivative = { version = "2.2.0", default-features = false } futures = { version = "0.3.28", default-features = false, features = ["std"] } indexmap = { version = "~1.9.3", default-features = false } metrics = "0.21.0" diff --git a/lib/vector-common/src/internal_event/cached_event.rs b/lib/vector-common/src/internal_event/cached_event.rs new file mode 100644 index 0000000000000..e672848c93584 --- /dev/null +++ b/lib/vector-common/src/internal_event/cached_event.rs @@ -0,0 +1,69 @@ +use std::{ + collections::BTreeMap, + sync::{Arc, RwLock}, +}; + +use derivative::Derivative; + +use super::{InternalEventHandle, RegisterInternalEvent}; + +/// Metrics (eg. `component_sent_event_bytes_total`) may need to emit tags based on +/// values contained within the events. These tags can't be determined in advance. +/// +/// Metrics need to be registered and the handle needs to be held onto in order to +/// prevent them from expiring and being dropped (this would result in the counter +/// resetting to zero). +/// `CachedEvent` is used to maintain a store of these registered metrics. When a +/// new event is emitted for a previously unseen set of tags an event is registered +/// and stored in the cache. +#[derive(Derivative)] +#[derivative(Clone(bound = ""), Default(bound = ""))] +pub struct RegisteredEventCache { + cache: Arc< + RwLock< + BTreeMap< + ::Tags, + ::Handle, + >, + >, + >, +} + +/// This trait must be implemented by events that emit dynamic tags. `register` must +/// be implemented to register an event based on the set of tags passed. +pub trait RegisterTaggedInternalEvent: RegisterInternalEvent { + /// The type that will contain the data necessary to extract the tags + /// that will be used when registering the event. + type Tags; + + fn register(tags: Self::Tags) -> ::Handle; +} + +impl RegisteredEventCache +where + Data: Sized, + EventHandle: InternalEventHandle, + Tags: Ord + Clone, + Event: RegisterInternalEvent + RegisterTaggedInternalEvent, +{ + /// Emits the event with the given tags. + /// It will register the event and store in the cache if this has not already + /// been done. + /// + /// # Panics + /// + /// This will panic if the lock is poisoned. + pub fn emit(&self, tags: &Tags, value: Data) { + let read = self.cache.read().unwrap(); + if let Some(event) = read.get(tags) { + event.emit(value); + } else { + let event = ::register(tags.clone()); + event.emit(value); + + // Ensure the read lock is dropped so we can write. + drop(read); + self.cache.write().unwrap().insert(tags.clone(), event); + } + } +} diff --git a/lib/vector-common/src/internal_event/events_sent.rs b/lib/vector-common/src/internal_event/events_sent.rs index d329562afe7fc..d12a22bf17e8a 100644 --- a/lib/vector-common/src/internal_event/events_sent.rs +++ b/lib/vector-common/src/internal_event/events_sent.rs @@ -1,7 +1,11 @@ +use std::sync::Arc; + use metrics::{register_counter, Counter}; use tracing::trace; -use super::{CountByteSize, Output, SharedString}; +use crate::{config::ComponentKey, request_metadata::EventCountTags}; + +use super::{CountByteSize, OptionalTag, Output, SharedString}; pub const DEFAULT_OUTPUT: &str = "_default"; @@ -44,3 +48,62 @@ impl From for EventsSent { Self { output: output.0 } } } + +/// Makes a list of the tags to use with the events sent event. +fn make_tags( + source: &OptionalTag>, + service: &OptionalTag, +) -> Vec<(&'static str, String)> { + let mut tags = Vec::new(); + if let OptionalTag::Specified(tag) = source { + tags.push(( + "source", + tag.as_ref() + .map_or_else(|| "-".to_string(), |tag| tag.id().to_string()), + )); + } + + if let OptionalTag::Specified(tag) = service { + tags.push(("service", tag.clone().unwrap_or("-".to_string()))); + } + + tags +} + +crate::registered_event!( + TaggedEventsSent { + source: OptionalTag>, + service: OptionalTag, + } => { + events: Counter = { + register_counter!("component_sent_events_total", &make_tags(&self.source, &self.service)) + }, + event_bytes: Counter = { + register_counter!("component_sent_event_bytes_total", &make_tags(&self.source, &self.service)) + }, + } + + fn emit(&self, data: CountByteSize) { + let CountByteSize(count, byte_size) = data; + trace!(message = "Events sent.", %count, %byte_size); + + self.events.increment(count as u64); + self.event_bytes.increment(byte_size.get() as u64); + } + + fn register(tags: EventCountTags) { + super::register(TaggedEventsSent::new( + tags, + )) + } +); + +impl TaggedEventsSent { + #[must_use] + pub fn new(tags: EventCountTags) -> Self { + Self { + source: tags.source, + service: tags.service, + } + } +} diff --git a/lib/vector-common/src/internal_event/mod.rs b/lib/vector-common/src/internal_event/mod.rs index 7af70cc1322ee..4ce2f5335ac49 100644 --- a/lib/vector-common/src/internal_event/mod.rs +++ b/lib/vector-common/src/internal_event/mod.rs @@ -1,18 +1,25 @@ mod bytes_received; mod bytes_sent; +mod cached_event; pub mod component_events_dropped; mod events_received; mod events_sent; +mod optional_tag; mod prelude; pub mod service; +use std::ops::{Add, AddAssign}; + pub use metrics::SharedString; pub use bytes_received::BytesReceived; pub use bytes_sent::BytesSent; +#[allow(clippy::module_name_repetitions)] +pub use cached_event::{RegisterTaggedInternalEvent, RegisteredEventCache}; pub use component_events_dropped::{ComponentEventsDropped, INTENTIONAL, UNINTENTIONAL}; pub use events_received::EventsReceived; -pub use events_sent::{EventsSent, DEFAULT_OUTPUT}; +pub use events_sent::{EventsSent, TaggedEventsSent, DEFAULT_OUTPUT}; +pub use optional_tag::OptionalTag; pub use prelude::{error_stage, error_type}; pub use service::{CallError, PollReadyError}; @@ -109,9 +116,24 @@ pub struct ByteSize(pub usize); pub struct Count(pub usize); /// Holds the tuple `(count_of_events, estimated_json_size_of_events)`. -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct CountByteSize(pub usize, pub JsonSize); +impl AddAssign for CountByteSize { + fn add_assign(&mut self, rhs: Self) { + self.0 += rhs.0; + self.1 += rhs.1; + } +} + +impl Add for CountByteSize { + type Output = CountByteSize; + + fn add(self, rhs: CountByteSize) -> Self::Output { + CountByteSize(self.0 + rhs.0, self.1 + rhs.1) + } +} + // Wrapper types used to hold parameters for registering events pub struct Output(pub Option); @@ -196,6 +218,9 @@ macro_rules! registered_event { fn emit(&$slf:ident, $data_name:ident: $data:ident) $emit_body:block + + $(fn register($tags_name:ident: $tags:ty) + $register_body:block)? ) => { paste::paste!{ #[derive(Clone)] @@ -223,6 +248,17 @@ macro_rules! registered_event { fn emit(&$slf, $data_name: $data) $emit_body } + + $(impl $crate::internal_event::cached_event::RegisterTaggedInternalEvent for $event { + type Tags = $tags; + + fn register( + $tags_name: $tags, + ) -> ::Handle { + $register_body + } + })? + } }; } diff --git a/lib/vector-common/src/internal_event/optional_tag.rs b/lib/vector-common/src/internal_event/optional_tag.rs new file mode 100644 index 0000000000000..400bc554630d1 --- /dev/null +++ b/lib/vector-common/src/internal_event/optional_tag.rs @@ -0,0 +1,14 @@ +/// The user can configure whether a tag should be emitted. If they configure it to +/// be emitted, but the value doesn't exist - we should emit the tag but with a value +/// of `-`. +#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)] +pub enum OptionalTag { + Ignored, + Specified(Option), +} + +impl From> for OptionalTag { + fn from(value: Option) -> Self { + Self::Specified(value) + } +} diff --git a/lib/vector-common/src/request_metadata.rs b/lib/vector-common/src/request_metadata.rs index cce6124361b60..d28d7da681a58 100644 --- a/lib/vector-common/src/request_metadata.rs +++ b/lib/vector-common/src/request_metadata.rs @@ -1,16 +1,207 @@ use std::ops::Add; +use std::{collections::HashMap, sync::Arc}; -use crate::json_size::JsonSize; +use crate::{ + config::ComponentKey, + internal_event::{ + CountByteSize, InternalEventHandle, OptionalTag, RegisterTaggedInternalEvent, + RegisteredEventCache, + }, + json_size::JsonSize, +}; + +/// Tags that are used to group the events within a batch for emitting telemetry. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct EventCountTags { + pub source: OptionalTag>, + pub service: OptionalTag, +} + +impl EventCountTags { + #[must_use] + pub fn new_empty() -> Self { + Self { + source: OptionalTag::Specified(None), + service: OptionalTag::Specified(None), + } + } +} + +/// Must be implemented by events to get the tags that will be attached to +/// the `component_sent_event_*` emitted metrics. +pub trait GetEventCountTags { + fn get_tags(&self) -> EventCountTags; +} + +/// Keeps track of the estimated json size of a given batch of events by +/// source and service. +#[derive(Clone, Debug)] +pub enum GroupedCountByteSize { + /// When we need to keep track of the events by certain tags we use this + /// variant. + Tagged { + sizes: HashMap, + }, + /// If we don't need to track the events by certain tags we can use + /// this variant to avoid allocating a `HashMap`, + Untagged { size: CountByteSize }, +} + +impl Default for GroupedCountByteSize { + fn default() -> Self { + Self::Untagged { + size: CountByteSize(0, JsonSize::zero()), + } + } +} + +impl GroupedCountByteSize { + /// Creates a new Tagged variant for when we need to track events by + /// certain tags. + #[must_use] + pub fn new_tagged() -> Self { + Self::Tagged { + sizes: HashMap::new(), + } + } + + /// Creates a new Tagged variant for when we do not need to track events by + /// tags. + #[must_use] + pub fn new_untagged() -> Self { + Self::Untagged { + size: CountByteSize(0, JsonSize::zero()), + } + } + + /// Returns a `HashMap` of tags => event counts for when we are tracking by tags. + /// Returns `None` if we are not tracking by tags. + #[must_use] + #[cfg(test)] + pub fn sizes(&self) -> Option<&HashMap> { + match self { + Self::Tagged { sizes } => Some(sizes), + Self::Untagged { .. } => None, + } + } + + /// Returns a single count for when we are not tracking by tags. + #[must_use] + #[cfg(test)] + fn size(&self) -> Option { + match self { + Self::Tagged { .. } => None, + Self::Untagged { size } => Some(*size), + } + } + + /// Adds the given estimated json size of the event to current count. + pub fn add_event(&mut self, event: &E, json_size: JsonSize) + where + E: GetEventCountTags, + { + match self { + Self::Tagged { sizes } => { + let size = CountByteSize(1, json_size); + let tags = event.get_tags(); + + match sizes.get_mut(&tags) { + Some(current) => { + *current += size; + } + None => { + sizes.insert(tags, size); + } + } + } + Self::Untagged { size } => { + *size += CountByteSize(1, json_size); + } + } + } + + /// Emits our counts to a `RegisteredEvent` cached event. + pub fn emit_event(&self, event_cache: &RegisteredEventCache) + where + T: RegisterTaggedInternalEvent, + H: InternalEventHandle, + { + match self { + GroupedCountByteSize::Tagged { sizes } => { + for (tags, size) in sizes { + event_cache.emit(tags, *size); + } + } + GroupedCountByteSize::Untagged { size } => { + event_cache.emit(&EventCountTags::new_empty(), *size); + } + } + } +} + +impl From for GroupedCountByteSize { + fn from(value: CountByteSize) -> Self { + Self::Untagged { size: value } + } +} + +impl<'a> Add<&'a GroupedCountByteSize> for GroupedCountByteSize { + type Output = GroupedCountByteSize; + + fn add(self, other: &'a Self::Output) -> Self::Output { + match (self, other) { + (Self::Tagged { sizes: mut us }, Self::Tagged { sizes: them }) => { + for (key, value) in them { + match us.get_mut(key) { + Some(size) => *size += *value, + None => { + us.insert(key.clone(), *value); + } + } + } + + Self::Tagged { sizes: us } + } + + (Self::Untagged { size: us }, Self::Untagged { size: them }) => { + Self::Untagged { size: us + *them } + } + + // The following two scenarios shouldn't really occur in practice, but are provided for completeness. + (Self::Tagged { mut sizes }, Self::Untagged { size }) => { + match sizes.get_mut(&EventCountTags::new_empty()) { + Some(empty_size) => *empty_size += *size, + None => { + sizes.insert(EventCountTags::new_empty(), *size); + } + } + + Self::Tagged { sizes } + } + (Self::Untagged { size }, Self::Tagged { sizes }) => { + let mut sizes = sizes.clone(); + match sizes.get_mut(&EventCountTags::new_empty()) { + Some(empty_size) => *empty_size += size, + None => { + sizes.insert(EventCountTags::new_empty(), size); + } + } + + Self::Tagged { sizes } + } + } + } +} /// Metadata for batch requests. -#[derive(Clone, Copy, Debug, Default)] +#[derive(Clone, Debug, Default)] pub struct RequestMetadata { /// Number of events represented by this batch request. event_count: usize, /// Size, in bytes, of the in-memory representation of all events in this batch request. events_byte_size: usize, /// Size, in bytes, of the estimated JSON-encoded representation of all events in this batch request. - events_estimated_json_encoded_byte_size: JsonSize, + events_estimated_json_encoded_byte_size: GroupedCountByteSize, /// Uncompressed size, in bytes, of the encoded events in this batch request. request_encoded_size: usize, /// On-the-wire size, in bytes, of the batch request itself after compression, etc. @@ -19,7 +210,6 @@ pub struct RequestMetadata { request_wire_size: usize, } -// TODO: Make this struct the object which emits the actual internal telemetry i.e. events sent, bytes sent, etc. impl RequestMetadata { #[must_use] pub fn new( @@ -27,7 +217,7 @@ impl RequestMetadata { events_byte_size: usize, request_encoded_size: usize, request_wire_size: usize, - events_estimated_json_encoded_byte_size: JsonSize, + events_estimated_json_encoded_byte_size: GroupedCountByteSize, ) -> Self { Self { event_count, @@ -49,7 +239,14 @@ impl RequestMetadata { } #[must_use] - pub const fn events_estimated_json_encoded_byte_size(&self) -> JsonSize { + pub fn events_estimated_json_encoded_byte_size(&self) -> &GroupedCountByteSize { + &self.events_estimated_json_encoded_byte_size + } + + /// Consumes the object and returns the byte size of the request grouped by + /// the tags (source and service). + #[must_use] + pub fn into_events_estimated_json_encoded_byte_size(self) -> GroupedCountByteSize { self.events_estimated_json_encoded_byte_size } @@ -66,7 +263,7 @@ impl RequestMetadata { /// Constructs a `RequestMetadata` by summation of the "batch" of `RequestMetadata` provided. #[must_use] pub fn from_batch>(metadata_iter: T) -> Self { - let mut metadata_sum = RequestMetadata::new(0, 0, 0, 0, JsonSize::zero()); + let mut metadata_sum = RequestMetadata::new(0, 0, 0, 0, GroupedCountByteSize::default()); for metadata in metadata_iter { metadata_sum = metadata_sum + &metadata; @@ -84,7 +281,7 @@ impl<'a> Add<&'a RequestMetadata> for RequestMetadata { event_count: self.event_count + other.event_count, events_byte_size: self.events_byte_size + other.events_byte_size, events_estimated_json_encoded_byte_size: self.events_estimated_json_encoded_byte_size - + other.events_estimated_json_encoded_byte_size, + + &other.events_estimated_json_encoded_byte_size, request_encoded_size: self.request_encoded_size + other.request_encoded_size, request_wire_size: self.request_wire_size + other.request_wire_size, } @@ -94,5 +291,102 @@ impl<'a> Add<&'a RequestMetadata> for RequestMetadata { /// Objects implementing this trait have metadata that describes the request. pub trait MetaDescriptive { /// Returns the `RequestMetadata` associated with this object. - fn get_metadata(&self) -> RequestMetadata; + fn get_metadata(&self) -> &RequestMetadata; + + // Returns a mutable reference to the `RequestMetadata` associated with this object. + fn metadata_mut(&mut self) -> &mut RequestMetadata; +} + +#[cfg(test)] +mod tests { + use super::*; + + struct DummyEvent { + source: OptionalTag>, + service: OptionalTag, + } + + impl GetEventCountTags for DummyEvent { + fn get_tags(&self) -> EventCountTags { + EventCountTags { + source: self.source.clone(), + service: self.service.clone(), + } + } + } + + #[test] + fn add_request_count_bytesize_event_untagged() { + let mut bytesize = GroupedCountByteSize::new_untagged(); + let event = DummyEvent { + source: Some(Arc::new(ComponentKey::from("carrot"))).into(), + service: Some("cabbage".to_string()).into(), + }; + + bytesize.add_event(&event, JsonSize::new(42)); + + let event = DummyEvent { + source: Some(Arc::new(ComponentKey::from("pea"))).into(), + service: Some("potato".to_string()).into(), + }; + + bytesize.add_event(&event, JsonSize::new(36)); + + assert_eq!(Some(CountByteSize(2, JsonSize::new(78))), bytesize.size()); + assert_eq!(None, bytesize.sizes()); + } + + #[test] + fn add_request_count_bytesize_event_tagged() { + let mut bytesize = GroupedCountByteSize::new_tagged(); + let event = DummyEvent { + source: OptionalTag::Ignored, + service: Some("cabbage".to_string()).into(), + }; + + bytesize.add_event(&event, JsonSize::new(42)); + + let event = DummyEvent { + source: OptionalTag::Ignored, + service: Some("cabbage".to_string()).into(), + }; + + bytesize.add_event(&event, JsonSize::new(36)); + + let event = DummyEvent { + source: OptionalTag::Ignored, + service: Some("tomato".to_string()).into(), + }; + + bytesize.add_event(&event, JsonSize::new(23)); + + assert_eq!(None, bytesize.size()); + let mut sizes = bytesize + .sizes() + .unwrap() + .clone() + .into_iter() + .collect::>(); + sizes.sort(); + + assert_eq!( + vec![ + ( + EventCountTags { + source: OptionalTag::Ignored, + service: Some("cabbage".to_string()).into() + }, + CountByteSize(2, JsonSize::new(78)) + ), + ( + EventCountTags { + source: OptionalTag::Ignored, + service: Some("tomato".to_string()).into() + }, + CountByteSize(1, JsonSize::new(23)) + ), + ], + sizes + ); + } } diff --git a/lib/vector-core/src/config/global_options.rs b/lib/vector-core/src/config/global_options.rs index 3e63d863f5ab0..af86b177095d5 100644 --- a/lib/vector-core/src/config/global_options.rs +++ b/lib/vector-core/src/config/global_options.rs @@ -5,6 +5,7 @@ use vector_common::TimeZone; use vector_config::configurable_component; use super::super::default_data_dir; +use super::Telemetry; use super::{proxy::ProxyConfig, AcknowledgementsConfig, LogSchema}; use crate::serde::bool_or_struct; @@ -55,6 +56,16 @@ pub struct GlobalOptions { )] pub log_schema: LogSchema, + /// Telemetry options. + /// + /// Determines whether `source` and `service` tags should be emitted with the + /// `component_sent_*` and `component_received_*` events. + #[serde( + default, + skip_serializing_if = "crate::serde::skip_serializing_if_default" + )] + pub telemetry: Telemetry, + /// The name of the time zone to apply to timestamp conversions that do not contain an explicit time zone. /// /// The time zone name may be any name in the [TZ database][tzdb] or `local` to indicate system @@ -218,10 +229,14 @@ impl GlobalOptions { errors.extend(merge_errors); } + let mut telemetry = self.telemetry.clone(); + telemetry.merge(&with.telemetry); + if errors.is_empty() { Ok(Self { data_dir, log_schema, + telemetry, acknowledgements: self.acknowledgements.merge_default(&with.acknowledgements), timezone: self.timezone.or(with.timezone), proxy: self.proxy.merge(&with.proxy), diff --git a/lib/vector-core/src/config/mod.rs b/lib/vector-core/src/config/mod.rs index 97cbc091d8f5e..3ff5152a293a7 100644 --- a/lib/vector-core/src/config/mod.rs +++ b/lib/vector-core/src/config/mod.rs @@ -8,6 +8,7 @@ mod global_options; mod log_schema; pub mod output_id; pub mod proxy; +mod telemetry; use crate::event::LogEvent; pub use global_options::GlobalOptions; @@ -15,6 +16,7 @@ pub use log_schema::{init_log_schema, log_schema, LogSchema}; use lookup::{lookup_v2::ValuePath, path, PathPrefix}; pub use output_id::OutputId; use serde::{Deserialize, Serialize}; +pub use telemetry::{init_telemetry, telemetry, Tags, Telemetry}; pub use vector_common::config::ComponentKey; use vector_config::configurable_component; use vrl::value::Value; diff --git a/lib/vector-core/src/config/telemetry.rs b/lib/vector-core/src/config/telemetry.rs new file mode 100644 index 0000000000000..71348c509ef94 --- /dev/null +++ b/lib/vector-core/src/config/telemetry.rs @@ -0,0 +1,93 @@ +use once_cell::sync::{Lazy, OnceCell}; +use vector_common::request_metadata::GroupedCountByteSize; +use vector_config::configurable_component; + +static TELEMETRY: OnceCell = OnceCell::new(); +static TELEMETRY_DEFAULT: Lazy = Lazy::new(Telemetry::default); + +/// Loads the telemetry options from configurations and sets the global options. +/// Once this is done, configurations can be correctly loaded using configured +/// log schema defaults. +/// +/// # Errors +/// +/// This function will fail if the `builder` fails. +/// +/// # Panics +/// +/// If deny is set, will panic if telemetry has already been set. +pub fn init_telemetry(telemetry: Telemetry, deny_if_set: bool) { + assert!( + !(TELEMETRY.set(telemetry).is_err() && deny_if_set), + "Couldn't set telemetry" + ); +} + +/// Returns the telemetry configuration options. +pub fn telemetry() -> &'static Telemetry { + TELEMETRY.get().unwrap_or(&TELEMETRY_DEFAULT) +} + +/// Sets options for the telemetry that Vector emits. +#[configurable_component] +#[derive(Clone, Debug, Eq, PartialEq, Default)] +#[serde(default)] +pub struct Telemetry { + #[configurable(derived)] + pub tags: Tags, +} + +impl Telemetry { + /// Merge two `Telemetry` instances together. + pub fn merge(&mut self, other: &Telemetry) { + self.tags.emit_service = self.tags.emit_service || other.tags.emit_service; + self.tags.emit_source = self.tags.emit_source || other.tags.emit_source; + } + + /// Returns true if any of the tag options are true. + pub fn has_tags(&self) -> bool { + self.tags.emit_service || self.tags.emit_source + } + + pub fn tags(&self) -> &Tags { + &self.tags + } + + /// The variant of `GroupedCountByteSize` + pub fn create_request_count_byte_size(&self) -> GroupedCountByteSize { + if self.has_tags() { + GroupedCountByteSize::new_tagged() + } else { + GroupedCountByteSize::new_untagged() + } + } +} + +/// Configures whether to emit certain tags +#[configurable_component] +#[derive(Clone, Debug, Eq, PartialEq, Default)] +#[serde(default)] +pub struct Tags { + /// True if the `service` tag should be emitted + /// in the `component_received_*` and `component_sent_*` + /// telemetry. + pub emit_service: bool, + + /// True if the `source` tag should be emitted + /// in the `component_received_*` and `component_sent_*` + /// telemetry. + pub emit_source: bool, +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn partial_telemetry() { + let toml = r#" + emit_source = true + "#; + toml::from_str::(toml).unwrap(); + } +} diff --git a/lib/vector-core/src/event/array.rs b/lib/vector-core/src/event/array.rs index da87a6f6a8074..9cdafbcf569d4 100644 --- a/lib/vector-core/src/event/array.rs +++ b/lib/vector-core/src/event/array.rs @@ -9,6 +9,7 @@ use futures::{stream, Stream}; use quickcheck::{Arbitrary, Gen}; use vector_buffers::EventCount; use vector_common::{ + config::ComponentKey, finalization::{AddBatchNotifier, BatchNotifier, EventFinalizers, Finalizable}, json_size::JsonSize, }; @@ -17,7 +18,7 @@ use super::{ EstimatedJsonEncodedSizeOf, Event, EventDataEq, EventFinalizer, EventMutRef, EventRef, LogEvent, Metric, TraceEvent, }; -use crate::{config::OutputId, ByteSizeOf}; +use crate::ByteSizeOf; /// The type alias for an array of `LogEvent` elements. pub type LogArray = Vec; @@ -142,7 +143,7 @@ pub enum EventArray { impl EventArray { /// Sets the `OutputId` in the metadata for all the events in this array. - pub fn set_output_id(&mut self, output_id: &Arc) { + pub fn set_output_id(&mut self, output_id: &Arc) { match self { EventArray::Logs(logs) => { for log in logs { diff --git a/lib/vector-core/src/event/log_event.rs b/lib/vector-core/src/event/log_event.rs index beb0afdec1e32..c782476b5c515 100644 --- a/lib/vector-core/src/event/log_event.rs +++ b/lib/vector-core/src/event/log_event.rs @@ -15,7 +15,9 @@ use lookup::lookup_v2::TargetPath; use lookup::PathPrefix; use serde::{Deserialize, Serialize, Serializer}; use vector_common::{ + internal_event::OptionalTag, json_size::{JsonSize, NonZeroJsonSize}, + request_metadata::{EventCountTags, GetEventCountTags}, EventDataEq, }; @@ -25,8 +27,8 @@ use super::{ metadata::EventMetadata, util, EventFinalizers, Finalizable, Value, }; -use crate::config::log_schema; use crate::config::LogNamespace; +use crate::config::{log_schema, telemetry}; use crate::{event::MaybeAsLogMut, ByteSizeOf}; use lookup::{metadata_path, path}; @@ -212,6 +214,26 @@ impl EstimatedJsonEncodedSizeOf for LogEvent { } } +impl GetEventCountTags for LogEvent { + fn get_tags(&self) -> EventCountTags { + let source = if telemetry().tags().emit_source { + self.metadata().source_id().cloned().into() + } else { + OptionalTag::Ignored + }; + + let service = if telemetry().tags().emit_service { + self.get_by_meaning("service") + .map(ToString::to_string) + .into() + } else { + OptionalTag::Ignored + }; + + EventCountTags { source, service } + } +} + impl LogEvent { #[must_use] pub fn new_with_metadata(metadata: EventMetadata) -> Self { diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index 403b43bfc52b9..f13bee6a5e009 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -1,15 +1,13 @@ #![deny(missing_docs)] -use std::collections::BTreeMap; -use std::sync::Arc; +use std::{collections::BTreeMap, sync::Arc}; use serde::{Deserialize, Serialize}; -use vector_common::EventDataEq; +use vector_common::{config::ComponentKey, EventDataEq}; use vrl::value::{Kind, Secrets, Value}; use super::{BatchNotifier, EventFinalizer, EventFinalizers, EventStatus}; -use crate::config::{LogNamespace, OutputId}; -use crate::{schema, ByteSizeOf}; +use crate::{config::LogNamespace, schema, ByteSizeOf}; const DATADOG_API_KEY: &str = "datadog_api_key"; const SPLUNK_HEC_TOKEN: &str = "splunk_hec_token"; @@ -30,7 +28,7 @@ pub struct EventMetadata { finalizers: EventFinalizers, /// The id of the source - source_id: Option>, + source_id: Option>, /// An identifier for a globally registered schema definition which provides information about /// the event shape (type information, and semantic meaning of fields). @@ -75,12 +73,12 @@ impl EventMetadata { /// Returns a reference to the metadata source. #[must_use] - pub fn source_id(&self) -> Option<&OutputId> { - self.source_id.as_deref() + pub fn source_id(&self) -> Option<&Arc> { + self.source_id.as_ref() } /// Sets the `source_id` in the metadata to the provided value. - pub fn set_source_id(&mut self, source_id: Arc) { + pub fn set_source_id(&mut self, source_id: Arc) { self.source_id = Some(source_id); } diff --git a/lib/vector-core/src/event/metric/mod.rs b/lib/vector-core/src/event/metric/mod.rs index 141d3b28997d9..fa62bec7ec52c 100644 --- a/lib/vector-core/src/event/metric/mod.rs +++ b/lib/vector-core/src/event/metric/mod.rs @@ -11,10 +11,16 @@ use std::{ }; use chrono::{DateTime, Utc}; -use vector_common::{json_size::JsonSize, EventDataEq}; +use vector_common::{ + internal_event::OptionalTag, + json_size::JsonSize, + request_metadata::{EventCountTags, GetEventCountTags}, + EventDataEq, +}; use vector_config::configurable_component; use crate::{ + config::telemetry, event::{ estimated_json_encoded_size_of::EstimatedJsonEncodedSizeOf, BatchNotifier, EventFinalizer, EventFinalizers, EventMetadata, Finalizable, @@ -476,6 +482,28 @@ impl Finalizable for Metric { } } +impl GetEventCountTags for Metric { + fn get_tags(&self) -> EventCountTags { + let source = if telemetry().tags().emit_source { + self.metadata().source_id().cloned().into() + } else { + OptionalTag::Ignored + }; + + // Currently there is no way to specify a tag that means the service, + // so we will be hardcoding it to "service". + let service = if telemetry().tags().emit_service { + self.tags() + .and_then(|tags| tags.get("service").map(ToString::to_string)) + .into() + } else { + OptionalTag::Ignored + }; + + EventCountTags { source, service } + } +} + /// Metric kind. /// /// Metrics can be either absolute of incremental. Absolute metrics represent a sort of "last write wins" scenario, diff --git a/lib/vector-core/src/event/mod.rs b/lib/vector-core/src/event/mod.rs index 04522793e3436..ae2e51e8a23a8 100644 --- a/lib/vector-core/src/event/mod.rs +++ b/lib/vector-core/src/event/mod.rs @@ -5,7 +5,7 @@ use std::{ sync::Arc, }; -use crate::{config::OutputId, ByteSizeOf}; +use crate::ByteSizeOf; pub use array::{into_event_stream, EventArray, EventContainer, LogArray, MetricArray, TraceArray}; pub use estimated_json_encoded_size_of::EstimatedJsonEncodedSizeOf; pub use finalization::{ @@ -19,7 +19,13 @@ pub use r#ref::{EventMutRef, EventRef}; use serde::{Deserialize, Serialize}; pub use trace::TraceEvent; use vector_buffers::EventCount; -use vector_common::{finalization, json_size::JsonSize, EventDataEq}; +use vector_common::{ + config::ComponentKey, + finalization, + json_size::JsonSize, + request_metadata::{EventCountTags, GetEventCountTags}, + EventDataEq, +}; pub use vrl::value::Value; #[cfg(feature = "vrl")] pub use vrl_target::{TargetEvents, VrlTarget}; @@ -90,6 +96,16 @@ impl Finalizable for Event { } } +impl GetEventCountTags for Event { + fn get_tags(&self) -> EventCountTags { + match self { + Event::Log(log) => log.get_tags(), + Event::Metric(metric) => metric.get_tags(), + Event::Trace(trace) => trace.get_tags(), + } + } +} + impl Event { /// Return self as a `LogEvent` /// @@ -284,18 +300,18 @@ impl Event { /// Returns a reference to the event metadata source. #[must_use] - pub fn source_id(&self) -> Option<&OutputId> { + pub fn source_id(&self) -> Option<&Arc> { self.metadata().source_id() } /// Sets the `source_id` in the event metadata to the provided value. - pub fn set_source_id(&mut self, source_id: Arc) { + pub fn set_source_id(&mut self, source_id: Arc) { self.metadata_mut().set_source_id(source_id); } /// Sets the `source_id` in the event metadata to the provided value. #[must_use] - pub fn with_source_id(mut self, source_id: Arc) -> Self { + pub fn with_source_id(mut self, source_id: Arc) -> Self { self.metadata_mut().set_source_id(source_id); self } diff --git a/lib/vector-core/src/event/trace.rs b/lib/vector-core/src/event/trace.rs index bd10a9e3aaca5..3885b50b9f13d 100644 --- a/lib/vector-core/src/event/trace.rs +++ b/lib/vector-core/src/event/trace.rs @@ -3,7 +3,11 @@ use std::{collections::BTreeMap, fmt::Debug}; use lookup::lookup_v2::TargetPath; use serde::{Deserialize, Serialize}; use vector_buffers::EventCount; -use vector_common::{json_size::JsonSize, EventDataEq}; +use vector_common::{ + json_size::JsonSize, + request_metadata::{EventCountTags, GetEventCountTags}, + EventDataEq, +}; use super::{ BatchNotifier, EstimatedJsonEncodedSizeOf, EventFinalizer, EventFinalizers, EventMetadata, @@ -143,3 +147,9 @@ impl AsMut for TraceEvent { &mut self.0 } } + +impl GetEventCountTags for TraceEvent { + fn get_tags(&self) -> EventCountTags { + self.0.get_tags() + } +} diff --git a/lib/vector-core/src/stream/driver.rs b/lib/vector-core/src/stream/driver.rs index 093a7e0c4fad0..6ff23014c96d5 100644 --- a/lib/vector-core/src/stream/driver.rs +++ b/lib/vector-core/src/stream/driver.rs @@ -5,10 +5,10 @@ use tokio::{pin, select}; use tower::Service; use tracing::Instrument; use vector_common::internal_event::{ - register, ByteSize, BytesSent, CallError, CountByteSize, EventsSent, InternalEventHandle as _, - Output, PollReadyError, Registered, SharedString, + register, ByteSize, BytesSent, CallError, InternalEventHandle as _, PollReadyError, Registered, + RegisteredEventCache, SharedString, TaggedEventsSent, }; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive}; use super::FuturesUnorderedCount; use crate::{ @@ -18,7 +18,7 @@ use crate::{ pub trait DriverResponse { fn event_status(&self) -> EventStatus; - fn events_sent(&self) -> CountByteSize; + fn events_sent(&self) -> &GroupedCountByteSize; /// Return the number of bytes that were sent in the request that returned this response. // TODO, remove the default implementation once all sinks have @@ -99,7 +99,7 @@ where pin!(batched_input); let bytes_sent = protocol.map(|protocol| register(BytesSent { protocol })); - let events_sent = register(EventsSent::from(Output(None))); + let events_sent = RegisteredEventCache::default(); loop { // Core behavior of the loop: @@ -167,8 +167,7 @@ where let finalizers = req.take_finalizers(); let bytes_sent = bytes_sent.clone(); let events_sent = events_sent.clone(); - - let metadata = req.get_metadata(); + let event_count = req.get_metadata().event_count(); let fut = svc.call(req) .err_into() @@ -176,7 +175,7 @@ where result, request_id, finalizers, - &metadata, + event_count, &bytes_sent, &events_sent, )) @@ -202,13 +201,13 @@ where result: Result, request_id: usize, finalizers: EventFinalizers, - metadata: &RequestMetadata, + event_count: usize, bytes_sent: &Option>, - events_sent: &Registered, + events_sent: &RegisteredEventCache, ) { match result { Err(error) => { - Self::emit_call_error(Some(error), request_id, metadata.event_count()); + Self::emit_call_error(Some(error), request_id, event_count); finalizers.update_status(EventStatus::Rejected); } Ok(response) => { @@ -220,10 +219,12 @@ where bytes_sent.emit(ByteSize(byte_size)); } } - events_sent.emit(response.events_sent()); + + response.events_sent().emit_event(events_sent); + // This condition occurs specifically when the `HttpBatchService::call()` is called *within* the `Service::call()` } else if response.event_status() == EventStatus::Rejected { - Self::emit_call_error(None, request_id, metadata.event_count()); + Self::emit_call_error(None, request_id, event_count); finalizers.update_status(EventStatus::Rejected); } } @@ -264,7 +265,7 @@ mod tests { use vector_common::{ finalization::{BatchNotifier, EventFinalizer, EventFinalizers, EventStatus, Finalizable}, json_size::JsonSize, - request_metadata::RequestMetadata, + request_metadata::{GroupedCountByteSize, RequestMetadata}, }; use vector_common::{internal_event::CountByteSize, request_metadata::MetaDescriptive}; @@ -298,20 +299,34 @@ mod tests { } impl MetaDescriptive for DelayRequest { - fn get_metadata(&self) -> RequestMetadata { - self.2 + fn get_metadata(&self) -> &RequestMetadata { + &self.2 + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.2 } } - struct DelayResponse; + struct DelayResponse { + events_sent: GroupedCountByteSize, + } + + impl DelayResponse { + fn new() -> Self { + Self { + events_sent: CountByteSize(1, JsonSize::new(1)).into(), + } + } + } impl DriverResponse for DelayResponse { fn event_status(&self) -> EventStatus { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(1, JsonSize::new(1)) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_sent } } @@ -396,7 +411,7 @@ mod tests { drop(permit); drop(req); - Ok(DelayResponse) + Ok(DelayResponse::new()) }) } } diff --git a/src/app.rs b/src/app.rs index c1ab0ae12e0d1..12658a01115d3 100644 --- a/src/app.rs +++ b/src/app.rs @@ -472,6 +472,8 @@ pub async fn load_configs( #[cfg(not(feature = "enterprise-tests"))] config::init_log_schema(config.global.log_schema.clone(), true); + config::init_telemetry(config.global.telemetry.clone(), true); + if !config.healthchecks.enabled { info!("Health checks are disabled."); } diff --git a/src/config/mod.rs b/src/config/mod.rs index 61c1a219d9eed..b4591199ef886 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -63,7 +63,7 @@ pub use unit_test::{build_unit_tests, build_unit_tests_main, UnitTestResult}; pub use validation::warnings; pub use vars::{interpolate, ENVIRONMENT_VARIABLE_INTERPOLATION_REGEX}; pub use vector_core::config::{ - init_log_schema, log_schema, proxy::ProxyConfig, LogSchema, OutputId, + init_log_schema, init_telemetry, log_schema, proxy::ProxyConfig, telemetry, LogSchema, OutputId, }; #[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq)] diff --git a/src/sinks/amqp/request_builder.rs b/src/sinks/amqp/request_builder.rs index ace1af1f66fe4..13aaeab81cfcd 100644 --- a/src/sinks/amqp/request_builder.rs +++ b/src/sinks/amqp/request_builder.rs @@ -13,7 +13,6 @@ pub(super) struct AmqpMetadata { routing_key: String, properties: BasicProperties, finalizers: EventFinalizers, - event_json_size: JsonSize, } /// Build the request to send to `AMQP` by using the encoder to convert it into @@ -43,14 +42,13 @@ impl RequestBuilder for AmqpRequestBuilder { &self, mut input: AmqpEvent, ) -> (Self::Metadata, RequestMetadataBuilder, Self::Events) { - let builder = RequestMetadataBuilder::from_events(&input); + let builder = RequestMetadataBuilder::from_event(&input.event); let metadata = AmqpMetadata { exchange: input.exchange, routing_key: input.routing_key, properties: input.properties, finalizers: input.event.take_finalizers(), - event_json_size: input.event.estimated_json_encoded_size_of(), }; (metadata, builder, input.event) @@ -70,7 +68,6 @@ impl RequestBuilder for AmqpRequestBuilder { amqp_metadata.properties, amqp_metadata.finalizers, metadata, - amqp_metadata.event_json_size, ) } } diff --git a/src/sinks/amqp/service.rs b/src/sinks/amqp/service.rs index 20b16b99e6e39..42ccf467e5692 100644 --- a/src/sinks/amqp/service.rs +++ b/src/sinks/amqp/service.rs @@ -22,7 +22,6 @@ pub(super) struct AmqpRequest { properties: BasicProperties, finalizers: EventFinalizers, metadata: RequestMetadata, - event_json_size: JsonSize, } impl AmqpRequest { @@ -33,7 +32,6 @@ impl AmqpRequest { properties: BasicProperties, finalizers: EventFinalizers, metadata: RequestMetadata, - event_json_size: JsonSize, ) -> Self { Self { body, @@ -42,7 +40,6 @@ impl AmqpRequest { properties, finalizers, metadata, - event_json_size, } } } @@ -54,15 +51,19 @@ impl Finalizable for AmqpRequest { } impl MetaDescriptive for AmqpRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } /// A successful response from `AMQP`. pub(super) struct AmqpResponse { byte_size: usize, - json_size: JsonSize, + json_size: GroupedCountByteSize, } impl DriverResponse for AmqpResponse { @@ -70,8 +71,8 @@ impl DriverResponse for AmqpResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(1, self.json_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.json_size } fn bytes_sent(&self) -> Option { @@ -129,7 +130,7 @@ impl Service for AmqpService { Ok(lapin::publisher_confirm::Confirmation::Nack(_)) => { warn!("Received Negative Acknowledgement from AMQP server."); Ok(AmqpResponse { - json_size: req.event_json_size, + json_size: req.metadata.into_events_estimated_json_encoded_byte_size(), byte_size, }) } @@ -139,7 +140,7 @@ impl Service for AmqpService { Err(AmqpError::AmqpAcknowledgementFailed { error }) } Ok(_) => Ok(AmqpResponse { - json_size: req.event_json_size, + json_size: req.metadata.into_events_estimated_json_encoded_byte_size(), byte_size, }), }, diff --git a/src/sinks/amqp/sink.rs b/src/sinks/amqp/sink.rs index f1da0b8d944f0..287b002b935f2 100644 --- a/src/sinks/amqp/sink.rs +++ b/src/sinks/amqp/sink.rs @@ -26,25 +26,6 @@ pub(super) struct AmqpEvent { pub(super) properties: BasicProperties, } -impl EventCount for AmqpEvent { - fn event_count(&self) -> usize { - // An AmqpEvent represents one event. - 1 - } -} - -impl ByteSizeOf for AmqpEvent { - fn allocated_bytes(&self) -> usize { - self.event.size_of() - } -} - -impl EstimatedJsonEncodedSizeOf for AmqpEvent { - fn estimated_json_encoded_size_of(&self) -> JsonSize { - self.event.estimated_json_encoded_size_of() - } -} - pub(super) struct AmqpSink { pub(super) channel: Arc, exchange: Template, diff --git a/src/sinks/aws_cloudwatch_logs/request_builder.rs b/src/sinks/aws_cloudwatch_logs/request_builder.rs index 0d2b63fa3322d..edbf4a752233c 100644 --- a/src/sinks/aws_cloudwatch_logs/request_builder.rs +++ b/src/sinks/aws_cloudwatch_logs/request_builder.rs @@ -39,8 +39,12 @@ impl Finalizable for CloudwatchRequest { } impl MetaDescriptive for CloudwatchRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -87,7 +91,7 @@ impl CloudwatchRequestBuilder { self.transformer.transform(&mut event); let mut message_bytes = BytesMut::new(); - let builder = RequestMetadataBuilder::from_events(&event); + let builder = RequestMetadataBuilder::from_event(&event); if self.encoder.encode(event, &mut message_bytes).is_err() { // The encoder handles internal event emission for Error and EventsDropped. diff --git a/src/sinks/aws_cloudwatch_logs/service.rs b/src/sinks/aws_cloudwatch_logs/service.rs index 93ed0b52252a2..9b02493536017 100644 --- a/src/sinks/aws_cloudwatch_logs/service.rs +++ b/src/sinks/aws_cloudwatch_logs/service.rs @@ -22,20 +22,18 @@ use tower::{ timeout::Timeout, Service, ServiceBuilder, ServiceExt, }; -use vector_common::{json_size::JsonSize, request_metadata::MetaDescriptive}; -use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; - -use crate::{ - event::EventStatus, - sinks::{ - aws_cloudwatch_logs::{ - config::CloudwatchLogsSinkConfig, request, retry::CloudwatchRetryLogic, - sink::BatchCloudwatchRequest, CloudwatchKey, - }, - util::{ - retries::FixedRetryPolicy, EncodedLength, TowerRequestConfig, TowerRequestSettings, - }, +use vector_common::{ + finalization::EventStatus, + request_metadata::{GroupedCountByteSize, MetaDescriptive}, +}; +use vector_core::stream::DriverResponse; + +use crate::sinks::{ + aws_cloudwatch_logs::{ + config::CloudwatchLogsSinkConfig, request, retry::CloudwatchRetryLogic, + sink::BatchCloudwatchRequest, CloudwatchKey, }, + util::{retries::FixedRetryPolicy, EncodedLength, TowerRequestConfig, TowerRequestSettings}, }; type Svc = Buffer< @@ -98,8 +96,7 @@ impl From> for CloudwatchError { #[derive(Debug)] pub struct CloudwatchResponse { - events_count: usize, - events_byte_size: JsonSize, + events_byte_size: GroupedCountByteSize, } impl crate::sinks::util::sink::Response for CloudwatchResponse { @@ -117,8 +114,8 @@ impl DriverResponse for CloudwatchResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.events_count, self.events_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_byte_size } } @@ -156,9 +153,9 @@ impl Service for CloudwatchLogsPartitionSvc { Poll::Ready(Ok(())) } - fn call(&mut self, req: BatchCloudwatchRequest) -> Self::Future { - let events_count = req.get_metadata().event_count(); - let events_byte_size = req.get_metadata().events_estimated_json_encoded_byte_size(); + fn call(&mut self, mut req: BatchCloudwatchRequest) -> Self::Future { + let metadata = std::mem::take(req.metadata_mut()); + let events_byte_size = metadata.into_events_estimated_json_encoded_byte_size(); let key = req.key; let events = req @@ -200,10 +197,7 @@ impl Service for CloudwatchLogsPartitionSvc { }; svc.oneshot(events) - .map_ok(move |_x| CloudwatchResponse { - events_count, - events_byte_size, - }) + .map_ok(move |_x| CloudwatchResponse { events_byte_size }) .map_err(Into::into) .boxed() } diff --git a/src/sinks/aws_cloudwatch_logs/sink.rs b/src/sinks/aws_cloudwatch_logs/sink.rs index a1546e8135687..3c320ad6236e7 100644 --- a/src/sinks/aws_cloudwatch_logs/sink.rs +++ b/src/sinks/aws_cloudwatch_logs/sink.rs @@ -51,8 +51,9 @@ where }) .batched_partitioned(CloudwatchPartitioner, batcher_settings) .map(|(key, events)| { - let metadata = - RequestMetadata::from_batch(events.iter().map(|req| req.get_metadata())); + let metadata = RequestMetadata::from_batch( + events.iter().map(|req| req.get_metadata().clone()), + ); BatchCloudwatchRequest { key, @@ -80,8 +81,12 @@ impl Finalizable for BatchCloudwatchRequest { } impl MetaDescriptive for BatchCloudwatchRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } diff --git a/src/sinks/aws_kinesis/firehose/record.rs b/src/sinks/aws_kinesis/firehose/record.rs index 52a656240282c..114d487558558 100644 --- a/src/sinks/aws_kinesis/firehose/record.rs +++ b/src/sinks/aws_kinesis/firehose/record.rs @@ -66,9 +66,8 @@ impl SendRecord for KinesisFirehoseClient { .instrument(info_span!("request").or_current()) .await .map(|output: PutRecordBatchOutput| KinesisResponse { - count: rec_count, failure_count: output.failed_put_count().unwrap_or(0) as usize, - events_byte_size: JsonSize::new(total_size), + events_byte_size: CountByteSize(rec_count, JsonSize::new(total_size)).into(), }) } } diff --git a/src/sinks/aws_kinesis/request_builder.rs b/src/sinks/aws_kinesis/request_builder.rs index 1491d59b08ee7..0483dd01e318b 100644 --- a/src/sinks/aws_kinesis/request_builder.rs +++ b/src/sinks/aws_kinesis/request_builder.rs @@ -53,8 +53,12 @@ impl MetaDescriptive for KinesisRequest where R: Record, { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -102,7 +106,7 @@ where partition_key: processed_event.metadata.partition_key, }; let event = Event::from(processed_event.event); - let builder = RequestMetadataBuilder::from_events(&event); + let builder = RequestMetadataBuilder::from_event(&event); (kinesis_metadata, builder, event) } diff --git a/src/sinks/aws_kinesis/service.rs b/src/sinks/aws_kinesis/service.rs index 4ebc53f0d746a..ae3723638c6bc 100644 --- a/src/sinks/aws_kinesis/service.rs +++ b/src/sinks/aws_kinesis/service.rs @@ -5,7 +5,6 @@ use std::{ use aws_smithy_client::SdkError; use aws_types::region::Region; -use vector_core::internal_event::CountByteSize; use super::{ record::{Record, SendRecord}, @@ -37,9 +36,8 @@ where } pub struct KinesisResponse { - pub(crate) count: usize, pub(crate) failure_count: usize, - pub(crate) events_byte_size: JsonSize, + pub(crate) events_byte_size: GroupedCountByteSize, } impl DriverResponse for KinesisResponse { @@ -47,8 +45,8 @@ impl DriverResponse for KinesisResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.count, self.events_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_byte_size } } @@ -69,10 +67,9 @@ where } // Emission of internal events for errors and dropped events is handled upstream by the caller. - fn call(&mut self, requests: BatchKinesisRequest) -> Self::Future { - let events_byte_size = requests - .get_metadata() - .events_estimated_json_encoded_byte_size(); + fn call(&mut self, mut requests: BatchKinesisRequest) -> Self::Future { + let metadata = std::mem::take(requests.metadata_mut()); + let events_byte_size = metadata.into_events_estimated_json_encoded_byte_size(); let records = requests .events diff --git a/src/sinks/aws_kinesis/sink.rs b/src/sinks/aws_kinesis/sink.rs index bc3d53947c338..0341c0e8244d6 100644 --- a/src/sinks/aws_kinesis/sink.rs +++ b/src/sinks/aws_kinesis/sink.rs @@ -69,8 +69,9 @@ where self.batch_settings, ) .map(|(key, events)| { - let metadata = - RequestMetadata::from_batch(events.iter().map(|req| req.get_metadata())); + let metadata = RequestMetadata::from_batch( + events.iter().map(|req| req.get_metadata().clone()), + ); BatchKinesisRequest { key, events, @@ -159,7 +160,7 @@ where partition_key: self.key.partition_key.clone(), }, events: self.events.to_vec(), - metadata: self.metadata, + metadata: self.metadata.clone(), } } } @@ -177,8 +178,12 @@ impl MetaDescriptive for BatchKinesisRequest where R: Record + Clone, { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } diff --git a/src/sinks/aws_kinesis/streams/record.rs b/src/sinks/aws_kinesis/streams/record.rs index 339d6997af63a..c7eebe4f6e0a7 100644 --- a/src/sinks/aws_kinesis/streams/record.rs +++ b/src/sinks/aws_kinesis/streams/record.rs @@ -82,9 +82,8 @@ impl SendRecord for KinesisStreamClient { .instrument(info_span!("request").or_current()) .await .map(|output: PutRecordsOutput| KinesisResponse { - count: rec_count, failure_count: output.failed_record_count().unwrap_or(0) as usize, - events_byte_size: JsonSize::new(total_size), + events_byte_size: CountByteSize(rec_count, JsonSize::new(total_size)).into(), }) } } diff --git a/src/sinks/aws_sqs/request_builder.rs b/src/sinks/aws_sqs/request_builder.rs index 03d30f34f3737..34f54b5c1c0a2 100644 --- a/src/sinks/aws_sqs/request_builder.rs +++ b/src/sinks/aws_sqs/request_builder.rs @@ -93,7 +93,7 @@ impl RequestBuilder for SqsRequestBuilder { None => None, }; - let builder = RequestMetadataBuilder::from_events(&event); + let builder = RequestMetadataBuilder::from_event(&event); let sqs_metadata = SqsMetadata { finalizers: event.take_finalizers(), @@ -154,7 +154,11 @@ impl Finalizable for SendMessageEntry { } impl MetaDescriptive for SendMessageEntry { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } diff --git a/src/sinks/aws_sqs/service.rs b/src/sinks/aws_sqs/service.rs index 38b20fddfe21f..e08f68c2753ba 100644 --- a/src/sinks/aws_sqs/service.rs +++ b/src/sinks/aws_sqs/service.rs @@ -4,10 +4,8 @@ use aws_sdk_sqs::{error::SendMessageError, types::SdkError, Client as SqsClient} use futures::{future::BoxFuture, TryFutureExt}; use tower::Service; use tracing::Instrument; -use vector_common::json_size::JsonSize; -use vector_core::{ - event::EventStatus, internal_event::CountByteSize, stream::DriverResponse, ByteSizeOf, -}; +use vector_common::request_metadata::GroupedCountByteSize; +use vector_core::{event::EventStatus, stream::DriverResponse, ByteSizeOf}; use super::request_builder::SendMessageEntry; @@ -47,7 +45,10 @@ impl Service for SqsService { .send() .map_ok(|_| SendMessageResponse { byte_size, - json_byte_size: entry.metadata.events_estimated_json_encoded_byte_size(), + json_byte_size: entry + .metadata + .events_estimated_json_encoded_byte_size() + .clone(), }) .instrument(info_span!("request").or_current()) .await @@ -57,7 +58,7 @@ impl Service for SqsService { pub(crate) struct SendMessageResponse { byte_size: usize, - json_byte_size: JsonSize, + json_byte_size: GroupedCountByteSize, } impl DriverResponse for SendMessageResponse { @@ -65,8 +66,8 @@ impl DriverResponse for SendMessageResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(1, self.json_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.json_byte_size } fn bytes_sent(&self) -> Option { diff --git a/src/sinks/azure_common/config.rs b/src/sinks/azure_common/config.rs index 56ed8d210f694..bfb61d616ce63 100644 --- a/src/sinks/azure_common/config.rs +++ b/src/sinks/azure_common/config.rs @@ -10,9 +10,9 @@ use http::StatusCode; use snafu::Snafu; use vector_common::{ json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, + request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}, }; -use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; +use vector_core::stream::DriverResponse; use crate::{ event::{EventFinalizers, EventStatus, Finalizable}, @@ -35,8 +35,12 @@ impl Finalizable for AzureBlobRequest { } impl MetaDescriptive for AzureBlobRequest { - fn get_metadata(&self) -> RequestMetadata { - self.request_metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.request_metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.request_metadata } } @@ -64,8 +68,7 @@ impl RetryLogic for AzureBlobRetryLogic { #[derive(Debug)] pub struct AzureBlobResponse { pub inner: PutBlockBlobResponse, - pub count: usize, - pub events_byte_size: JsonSize, + pub events_byte_size: GroupedCountByteSize, pub byte_size: usize, } @@ -74,8 +77,8 @@ impl DriverResponse for AzureBlobResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.count, self.events_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_byte_size } fn bytes_sent(&self) -> Option { diff --git a/src/sinks/azure_common/service.rs b/src/sinks/azure_common/service.rs index 9cbd07dfbf620..122bd66525b18 100644 --- a/src/sinks/azure_common/service.rs +++ b/src/sinks/azure_common/service.rs @@ -57,8 +57,9 @@ impl Service for AzureBlobService { result.map(|inner| AzureBlobResponse { inner, - count: request.metadata.count, - events_byte_size: request.metadata.byte_size, + events_byte_size: request + .request_metadata + .into_events_estimated_json_encoded_byte_size(), byte_size, }) }) diff --git a/src/sinks/databend/service.rs b/src/sinks/databend/service.rs index 473a2d3220ba3..05a8b4629a767 100644 --- a/src/sinks/databend/service.rs +++ b/src/sinks/databend/service.rs @@ -9,8 +9,7 @@ use rand_distr::Alphanumeric; use snafu::Snafu; use tower::Service; use vector_common::finalization::{EventFinalizers, EventStatus, Finalizable}; -use vector_common::internal_event::CountByteSize; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}; use vector_core::stream::DriverResponse; use crate::{internal_events::EndpointBytesSent, sinks::util::retries::RetryLogic}; @@ -67,8 +66,12 @@ impl Finalizable for DatabendRequest { } impl MetaDescriptive for DatabendRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -82,11 +85,8 @@ impl DriverResponse for DatabendResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize( - self.metadata.event_count(), - self.metadata.events_estimated_json_encoded_byte_size(), - ) + fn events_sent(&self) -> &GroupedCountByteSize { + self.metadata.events_estimated_json_encoded_byte_size() } fn bytes_sent(&self) -> Option { @@ -205,7 +205,7 @@ impl Service for DatabendService { let service = self.clone(); let future = async move { - let metadata = request.get_metadata(); + let metadata = request.get_metadata().clone(); let stage_location = service.new_stage_location(); let protocol = service.client.get_protocol(); let endpoint = service.client.get_host(); diff --git a/src/sinks/datadog/events/request_builder.rs b/src/sinks/datadog/events/request_builder.rs index 664f99beca436..93e4eeeb17c31 100644 --- a/src/sinks/datadog/events/request_builder.rs +++ b/src/sinks/datadog/events/request_builder.rs @@ -42,8 +42,12 @@ impl ElementCount for DatadogEventsRequest { } impl MetaDescriptive for DatadogEventsRequest { - fn get_metadata(&self) -> RequestMetadata { - self.request_metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.request_metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.request_metadata } } @@ -86,7 +90,7 @@ impl RequestBuilder for DatadogEventsRequestBuilder { } fn split_input(&self, event: Event) -> (Self::Metadata, RequestMetadataBuilder, Self::Events) { - let builder = RequestMetadataBuilder::from_events(&event); + let builder = RequestMetadataBuilder::from_event(&event); let mut log = event.into_log(); let metadata = Metadata { diff --git a/src/sinks/datadog/events/service.rs b/src/sinks/datadog/events/service.rs index 693929d62e961..374bd3268b802 100644 --- a/src/sinks/datadog/events/service.rs +++ b/src/sinks/datadog/events/service.rs @@ -8,8 +8,8 @@ use futures::{ use http::Request; use hyper::Body; use tower::{Service, ServiceExt}; -use vector_common::{json_size::JsonSize, request_metadata::MetaDescriptive}; -use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive}; +use vector_core::stream::DriverResponse; use crate::{ event::EventStatus, @@ -23,7 +23,7 @@ use crate::{ pub struct DatadogEventsResponse { pub(self) event_status: EventStatus, pub http_status: http::StatusCode, - pub event_byte_size: JsonSize, + pub event_byte_size: GroupedCountByteSize, } impl DriverResponse for DatadogEventsResponse { @@ -31,8 +31,8 @@ impl DriverResponse for DatadogEventsResponse { self.event_status } - fn events_sent(&self) -> CountByteSize { - CountByteSize(1, self.event_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.event_byte_size } fn bytes_sent(&self) -> Option { @@ -85,12 +85,13 @@ impl Service for DatadogEventsService { } // Emission of Error internal event is handled upstream by the caller - fn call(&mut self, req: DatadogEventsRequest) -> Self::Future { + fn call(&mut self, mut req: DatadogEventsRequest) -> Self::Future { let mut http_service = self.batch_http_service.clone(); Box::pin(async move { + let metadata = std::mem::take(req.metadata_mut()); http_service.ready().await?; - let event_byte_size = req.get_metadata().events_estimated_json_encoded_byte_size(); + let event_byte_size = metadata.into_events_estimated_json_encoded_byte_size(); let http_response = http_service.call(req).await?; let event_status = if http_response.is_successful() { EventStatus::Delivered diff --git a/src/sinks/datadog/logs/service.rs b/src/sinks/datadog/logs/service.rs index 06bc923ad3e36..47effa754df5e 100644 --- a/src/sinks/datadog/logs/service.rs +++ b/src/sinks/datadog/logs/service.rs @@ -14,13 +14,9 @@ use hyper::Body; use indexmap::IndexMap; use tower::Service; use tracing::Instrument; -use vector_common::{ - json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, -}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}; use vector_core::{ event::{EventFinalizers, EventStatus, Finalizable}, - internal_event::CountByteSize, stream::DriverResponse, }; @@ -59,16 +55,19 @@ impl Finalizable for LogApiRequest { } impl MetaDescriptive for LogApiRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } #[derive(Debug)] pub struct LogApiResponse { event_status: EventStatus, - count: usize, - events_byte_size: JsonSize, + events_byte_size: GroupedCountByteSize, raw_byte_size: usize, } @@ -77,8 +76,8 @@ impl DriverResponse for LogApiResponse { self.event_status } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.count, self.events_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_byte_size } fn bytes_sent(&self) -> Option { @@ -125,7 +124,7 @@ impl Service for LogApiService { } // Emission of Error internal event is handled upstream by the caller - fn call(&mut self, request: LogApiRequest) -> Self::Future { + fn call(&mut self, mut request: LogApiRequest) -> Self::Future { let mut client = self.client.clone(); let http_request = Request::post(&self.uri) .header(CONTENT_TYPE, "application/json") @@ -139,10 +138,8 @@ impl Service for LogApiService { http_request }; - let count = request.get_metadata().event_count(); - let events_byte_size = request - .get_metadata() - .events_estimated_json_encoded_byte_size(); + let metadata = std::mem::take(request.metadata_mut()); + let events_byte_size = metadata.into_events_estimated_json_encoded_byte_size(); let raw_byte_size = request.uncompressed_size; let mut http_request = http_request.header(CONTENT_LENGTH, request.body.len()); @@ -162,7 +159,6 @@ impl Service for LogApiService { DatadogApiError::from_result(client.call(http_request).in_current_span().await).map( |_| LogApiResponse { event_status: EventStatus::Delivered, - count, events_byte_size, raw_byte_size, }, diff --git a/src/sinks/datadog/logs/tests.rs b/src/sinks/datadog/logs/tests.rs index efe366120100e..c8ef154280e4f 100644 --- a/src/sinks/datadog/logs/tests.rs +++ b/src/sinks/datadog/logs/tests.rs @@ -11,7 +11,10 @@ use futures::{ use http::request::Parts; use hyper::StatusCode; use indoc::indoc; -use vector_core::event::{BatchNotifier, BatchStatus, Event, LogEvent}; +use vector_core::{ + config::{init_telemetry, Tags, Telemetry}, + event::{BatchNotifier, BatchStatus, Event, LogEvent}, +}; use crate::{ config::SinkConfig, @@ -22,8 +25,8 @@ use crate::{ }, test_util::{ components::{ - run_and_assert_sink_compliance, run_and_assert_sink_error, COMPONENT_ERROR_TAGS, - SINK_TAGS, + run_and_assert_data_volume_sink_compliance, run_and_assert_sink_compliance, + run_and_assert_sink_error, COMPONENT_ERROR_TAGS, DATA_VOLUME_SINK_TAGS, SINK_TAGS, }, next_addr, random_lines_with_stream, }, @@ -71,6 +74,13 @@ fn event_with_api_key(msg: &str, key: &str) -> Event { e } +#[derive(PartialEq)] +enum TestType { + Happy, + Telemetry, + Error, +} + /// Starts a test sink with random lines running into it /// /// This function starts a Datadog Logs sink with a simplistic configuration and @@ -83,8 +93,20 @@ fn event_with_api_key(msg: &str, key: &str) -> Event { async fn start_test_detail( api_status: ApiStatus, batch_status: BatchStatus, - is_error: bool, + test_type: TestType, ) -> (Vec, Receiver<(http::request::Parts, Bytes)>) { + if test_type == TestType::Telemetry { + init_telemetry( + Telemetry { + tags: Tags { + emit_service: true, + emit_source: true, + }, + }, + true, + ); + } + let config = indoc! {r#" default_api_key = "atoken" compression = "none" @@ -105,10 +127,12 @@ async fn start_test_detail( let (batch, receiver) = BatchNotifier::new_with_receiver(); let (expected, events) = random_lines_with_stream(100, 10, Some(batch)); - if is_error { - run_and_assert_sink_error(sink, events, &COMPONENT_ERROR_TAGS).await; - } else { - run_and_assert_sink_compliance(sink, events, &SINK_TAGS).await; + match test_type { + TestType::Happy => run_and_assert_sink_compliance(sink, events, &SINK_TAGS).await, + TestType::Error => run_and_assert_sink_error(sink, events, &COMPONENT_ERROR_TAGS).await, + TestType::Telemetry => { + run_and_assert_data_volume_sink_compliance(sink, events, &DATA_VOLUME_SINK_TAGS).await + } } assert_eq!(receiver.await, batch_status); @@ -120,14 +144,21 @@ async fn start_test_success( api_status: ApiStatus, batch_status: BatchStatus, ) -> (Vec, Receiver<(http::request::Parts, Bytes)>) { - start_test_detail(api_status, batch_status, false).await + start_test_detail(api_status, batch_status, TestType::Happy).await +} + +async fn start_test_telemetry( + api_status: ApiStatus, + batch_status: BatchStatus, +) -> (Vec, Receiver<(http::request::Parts, Bytes)>) { + start_test_detail(api_status, batch_status, TestType::Telemetry).await } async fn start_test_error( api_status: ApiStatus, batch_status: BatchStatus, ) -> (Vec, Receiver<(http::request::Parts, Bytes)>) { - start_test_detail(api_status, batch_status, true).await + start_test_detail(api_status, batch_status, TestType::Error).await } /// Assert the basic functionality of the sink in good conditions @@ -174,6 +205,13 @@ async fn smoke() { } } +/// Assert the sink emits source and service tags when run with telemetry configured. +#[tokio::test] +async fn telemetry() { + let (expected, rx) = start_test_telemetry(ApiStatus::OKv1, BatchStatus::Delivered).await; + let _ = rx.take(expected.len()).collect::>().await; +} + #[tokio::test] /// Assert delivery error behavior for v1 API /// diff --git a/src/sinks/datadog/metrics/service.rs b/src/sinks/datadog/metrics/service.rs index d15716b99d8ad..6abacfcc739b7 100644 --- a/src/sinks/datadog/metrics/service.rs +++ b/src/sinks/datadog/metrics/service.rs @@ -10,13 +10,9 @@ use http::{ use hyper::Body; use snafu::ResultExt; use tower::Service; -use vector_common::{ - json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, -}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}; use vector_core::{ event::{EventFinalizers, EventStatus, Finalizable}, - internal_event::CountByteSize, stream::DriverResponse, }; @@ -115,8 +111,12 @@ impl Finalizable for DatadogMetricsRequest { } impl MetaDescriptive for DatadogMetricsRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -125,8 +125,7 @@ impl MetaDescriptive for DatadogMetricsRequest { pub struct DatadogMetricsResponse { status_code: StatusCode, body: Bytes, - batch_size: usize, - byte_size: JsonSize, + byte_size: GroupedCountByteSize, raw_byte_size: usize, } @@ -141,8 +140,8 @@ impl DriverResponse for DatadogMetricsResponse { } } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.batch_size, self.byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.byte_size } fn bytes_sent(&self) -> Option { @@ -180,15 +179,13 @@ impl Service for DatadogMetricsService { } // Emission of Error internal event is handled upstream by the caller - fn call(&mut self, request: DatadogMetricsRequest) -> Self::Future { + fn call(&mut self, mut request: DatadogMetricsRequest) -> Self::Future { let client = self.client.clone(); let api_key = self.api_key.clone(); Box::pin(async move { - let byte_size = request - .get_metadata() - .events_estimated_json_encoded_byte_size(); - let batch_size = request.get_metadata().event_count(); + let metadata = std::mem::take(request.metadata_mut()); + let byte_size = metadata.into_events_estimated_json_encoded_byte_size(); let raw_byte_size = request.raw_bytes; let request = request @@ -208,7 +205,6 @@ impl Service for DatadogMetricsService { Ok(DatadogMetricsResponse { status_code: parts.status, body, - batch_size, byte_size, raw_byte_size, }) diff --git a/src/sinks/datadog/traces/service.rs b/src/sinks/datadog/traces/service.rs index 66e46b8075ca1..5128d855edb0f 100644 --- a/src/sinks/datadog/traces/service.rs +++ b/src/sinks/datadog/traces/service.rs @@ -9,13 +9,9 @@ use http::{Request, StatusCode, Uri}; use hyper::Body; use snafu::ResultExt; use tower::Service; -use vector_common::{ - json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, -}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}; use vector_core::{ event::{EventFinalizers, EventStatus, Finalizable}, - internal_event::CountByteSize, stream::DriverResponse, }; @@ -84,8 +80,12 @@ impl Finalizable for TraceApiRequest { } impl MetaDescriptive for TraceApiRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -93,8 +93,7 @@ impl MetaDescriptive for TraceApiRequest { pub struct TraceApiResponse { status_code: StatusCode, body: Bytes, - batch_size: usize, - byte_size: JsonSize, + byte_size: GroupedCountByteSize, uncompressed_size: usize, } @@ -109,8 +108,8 @@ impl DriverResponse for TraceApiResponse { } } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.batch_size, self.byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.byte_size } fn bytes_sent(&self) -> Option { @@ -145,14 +144,12 @@ impl Service for TraceApiService { } // Emission of Error internal event is handled upstream by the caller - fn call(&mut self, request: TraceApiRequest) -> Self::Future { + fn call(&mut self, mut request: TraceApiRequest) -> Self::Future { let client = self.client.clone(); Box::pin(async move { - let byte_size = request - .get_metadata() - .events_estimated_json_encoded_byte_size(); - let batch_size = request.get_metadata().event_count(); + let metadata = std::mem::take(request.metadata_mut()); + let byte_size = metadata.into_events_estimated_json_encoded_byte_size(); let uncompressed_size = request.uncompressed_size; let http_request = request.into_http_request().context(BuildRequestSnafu)?; @@ -166,7 +163,6 @@ impl Service for TraceApiService { Ok(TraceApiResponse { status_code: parts.status, body, - batch_size, byte_size, uncompressed_size, }) diff --git a/src/sinks/elasticsearch/encoder.rs b/src/sinks/elasticsearch/encoder.rs index 5d27ba891b596..f5b39a52b23a0 100644 --- a/src/sinks/elasticsearch/encoder.rs +++ b/src/sinks/elasticsearch/encoder.rs @@ -2,7 +2,10 @@ use std::{io, io::Write}; use serde::Serialize; use vector_buffers::EventCount; -use vector_common::json_size::JsonSize; +use vector_common::{ + json_size::JsonSize, + request_metadata::{EventCountTags, GetEventCountTags}, +}; use vector_core::{event::Event, ByteSizeOf, EstimatedJsonEncodedSizeOf}; use crate::{ @@ -47,6 +50,12 @@ impl EventCount for ProcessedEvent { } } +impl GetEventCountTags for ProcessedEvent { + fn get_tags(&self) -> EventCountTags { + self.log.get_tags() + } +} + #[derive(PartialEq, Eq, Default, Clone, Debug)] pub struct ElasticsearchEncoder { pub transformer: Transformer, diff --git a/src/sinks/elasticsearch/retry.rs b/src/sinks/elasticsearch/retry.rs index 4f5a6e0c73ed6..bf40d1751f051 100644 --- a/src/sinks/elasticsearch/retry.rs +++ b/src/sinks/elasticsearch/retry.rs @@ -160,7 +160,7 @@ mod tests { use bytes::Bytes; use http::Response; use similar_asserts::assert_eq; - use vector_common::json_size::JsonSize; + use vector_common::{internal_event::CountByteSize, json_size::JsonSize}; use super::*; use crate::event::EventStatus; @@ -180,7 +180,7 @@ mod tests { http_response: response, event_status: EventStatus::Rejected, batch_size: 1, - events_byte_size: JsonSize::new(1), + events_byte_size: CountByteSize(1, JsonSize::new(1)).into(), }), RetryAction::DontRetry(_) )); @@ -201,7 +201,7 @@ mod tests { http_response: response, event_status: EventStatus::Errored, batch_size: 1, - events_byte_size: JsonSize::new(1), + events_byte_size: CountByteSize(1, JsonSize::new(1)).into(), }), RetryAction::Retry(_) )); diff --git a/src/sinks/elasticsearch/service.rs b/src/sinks/elasticsearch/service.rs index bdf0824915de6..38ba9a41b3af3 100644 --- a/src/sinks/elasticsearch/service.rs +++ b/src/sinks/elasticsearch/service.rs @@ -13,9 +13,9 @@ use hyper::{service::Service, Body, Request}; use tower::ServiceExt; use vector_common::{ json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, + request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}, }; -use vector_core::{internal_event::CountByteSize, stream::DriverResponse, ByteSizeOf}; +use vector_core::{stream::DriverResponse, ByteSizeOf}; use crate::sinks::elasticsearch::sign_request; use crate::{ @@ -57,8 +57,12 @@ impl Finalizable for ElasticsearchRequest { } impl MetaDescriptive for ElasticsearchRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -149,7 +153,7 @@ pub struct ElasticsearchResponse { pub http_response: Response, pub event_status: EventStatus, pub batch_size: usize, - pub events_byte_size: JsonSize, + pub events_byte_size: GroupedCountByteSize, } impl DriverResponse for ElasticsearchResponse { @@ -157,8 +161,8 @@ impl DriverResponse for ElasticsearchResponse { self.event_status } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.batch_size, self.events_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_byte_size } } @@ -173,12 +177,13 @@ impl Service for ElasticsearchService { } // Emission of internal events for errors and dropped events is handled upstream by the caller. - fn call(&mut self, req: ElasticsearchRequest) -> Self::Future { + fn call(&mut self, mut req: ElasticsearchRequest) -> Self::Future { let mut http_service = self.batch_service.clone(); Box::pin(async move { http_service.ready().await?; let batch_size = req.batch_size; - let events_byte_size = req.events_byte_size; + let events_byte_size = + std::mem::take(req.metadata_mut()).into_events_estimated_json_encoded_byte_size(); let http_response = http_service.call(req).await?; let event_status = get_event_status(&http_response); diff --git a/src/sinks/gcp/chronicle_unstructured.rs b/src/sinks/gcp/chronicle_unstructured.rs index 3f7b3d4494d25..ddf70f31e4e25 100644 --- a/src/sinks/gcp/chronicle_unstructured.rs +++ b/src/sinks/gcp/chronicle_unstructured.rs @@ -286,8 +286,12 @@ impl Finalizable for ChronicleRequest { } impl MetaDescriptive for ChronicleRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -474,7 +478,7 @@ impl Service for ChronicleService { HeaderValue::from_str(&request.body.len().to_string()).unwrap(), ); - let metadata = request.get_metadata(); + let metadata = request.get_metadata().clone(); let mut http_request = builder.body(Body::from(request.body)).unwrap(); self.creds.apply(&mut http_request); diff --git a/src/sinks/gcs_common/service.rs b/src/sinks/gcs_common/service.rs index 502335e8430b2..9a75203629dfb 100644 --- a/src/sinks/gcs_common/service.rs +++ b/src/sinks/gcs_common/service.rs @@ -8,8 +8,8 @@ use http::{ }; use hyper::Body; use tower::Service; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; -use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}; +use vector_core::stream::DriverResponse; use crate::{ event::{EventFinalizers, EventStatus, Finalizable}, @@ -50,8 +50,12 @@ impl Finalizable for GcsRequest { } impl MetaDescriptive for GcsRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -84,11 +88,8 @@ impl DriverResponse for GcsResponse { } } - fn events_sent(&self) -> CountByteSize { - CountByteSize( - self.metadata.event_count(), - self.metadata.events_estimated_json_encoded_byte_size(), - ) + fn events_sent(&self) -> &GroupedCountByteSize { + self.metadata.events_estimated_json_encoded_byte_size() } fn bytes_sent(&self) -> Option { diff --git a/src/sinks/kafka/request_builder.rs b/src/sinks/kafka/request_builder.rs index 794de6174396e..9d1edd0d97c43 100644 --- a/src/sinks/kafka/request_builder.rs +++ b/src/sinks/kafka/request_builder.rs @@ -37,7 +37,7 @@ impl KafkaRequestBuilder { }) .ok()?; - let metadata_builder = RequestMetadataBuilder::from_events(&event); + let metadata_builder = RequestMetadataBuilder::from_event(&event); let metadata = KafkaRequestMetadata { finalizers: event.take_finalizers(), diff --git a/src/sinks/kafka/service.rs b/src/sinks/kafka/service.rs index f271a7a580e53..299a9de4ee056 100644 --- a/src/sinks/kafka/service.rs +++ b/src/sinks/kafka/service.rs @@ -8,7 +8,7 @@ use rdkafka::{ util::Timeout, }; use vector_core::internal_event::{ - ByteSize, BytesSent, CountByteSize, InternalEventHandle as _, Protocol, Registered, + ByteSize, BytesSent, InternalEventHandle as _, Protocol, Registered, }; use crate::{kafka::KafkaStatisticsContext, sinks::prelude::*}; @@ -28,7 +28,7 @@ pub struct KafkaRequestMetadata { } pub struct KafkaResponse { - event_byte_size: JsonSize, + event_byte_size: GroupedCountByteSize, } impl DriverResponse for KafkaResponse { @@ -36,8 +36,8 @@ impl DriverResponse for KafkaResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(1, self.event_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.event_byte_size } } @@ -48,8 +48,12 @@ impl Finalizable for KafkaRequest { } impl MetaDescriptive for KafkaRequest { - fn get_metadata(&self) -> RequestMetadata { - self.request_metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.request_metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.request_metadata } } @@ -82,8 +86,8 @@ impl Service for KafkaService { Box::pin(async move { let event_byte_size = request - .get_metadata() - .events_estimated_json_encoded_byte_size(); + .request_metadata + .into_events_estimated_json_encoded_byte_size(); let mut record = FutureRecord::to(&request.metadata.topic).payload(request.body.as_ref()); diff --git a/src/sinks/kafka/tests.rs b/src/sinks/kafka/tests.rs index ba1e62e1eeb9e..9ed3b66ba3fc7 100644 --- a/src/sinks/kafka/tests.rs +++ b/src/sinks/kafka/tests.rs @@ -18,7 +18,10 @@ mod integration_test { message::Headers, Message, Offset, TopicPartitionList, }; - use vector_core::event::{BatchNotifier, BatchStatus}; + use vector_core::{ + config::{init_telemetry, Tags, Telemetry}, + event::{BatchNotifier, BatchStatus}, + }; use crate::{ event::Value, @@ -32,7 +35,10 @@ mod integration_test { prelude::*, }, test_util::{ - components::{assert_sink_compliance, SINK_TAGS}, + components::{ + assert_data_volume_sink_compliance, assert_sink_compliance, DATA_VOLUME_SINK_TAGS, + SINK_TAGS, + }, random_lines_with_stream, random_string, wait_for, }, tls::{TlsConfig, TlsEnableableConfig, TEST_PEM_INTERMEDIATE_CA_PATH}, @@ -72,31 +78,74 @@ mod integration_test { #[tokio::test] async fn kafka_happy_path_plaintext() { crate::test_util::trace_init(); - kafka_happy_path(kafka_address(9091), None, None, KafkaCompression::None).await; + kafka_happy_path( + kafka_address(9091), + None, + None, + KafkaCompression::None, + true, + ) + .await; + kafka_happy_path( + kafka_address(9091), + None, + None, + KafkaCompression::None, + false, + ) + .await; } #[tokio::test] async fn kafka_happy_path_gzip() { crate::test_util::trace_init(); - kafka_happy_path(kafka_address(9091), None, None, KafkaCompression::Gzip).await; + kafka_happy_path( + kafka_address(9091), + None, + None, + KafkaCompression::Gzip, + false, + ) + .await; } #[tokio::test] async fn kafka_happy_path_lz4() { crate::test_util::trace_init(); - kafka_happy_path(kafka_address(9091), None, None, KafkaCompression::Lz4).await; + kafka_happy_path( + kafka_address(9091), + None, + None, + KafkaCompression::Lz4, + false, + ) + .await; } #[tokio::test] async fn kafka_happy_path_snappy() { crate::test_util::trace_init(); - kafka_happy_path(kafka_address(9091), None, None, KafkaCompression::Snappy).await; + kafka_happy_path( + kafka_address(9091), + None, + None, + KafkaCompression::Snappy, + false, + ) + .await; } #[tokio::test] async fn kafka_happy_path_zstd() { crate::test_util::trace_init(); - kafka_happy_path(kafka_address(9091), None, None, KafkaCompression::Zstd).await; + kafka_happy_path( + kafka_address(9091), + None, + None, + KafkaCompression::Zstd, + false, + ) + .await; } async fn kafka_batch_options_overrides( @@ -208,6 +257,7 @@ mod integration_test { options: TlsConfig::test_config(), }), KafkaCompression::None, + false, ) .await; } @@ -225,6 +275,7 @@ mod integration_test { }), None, KafkaCompression::None, + false, ) .await; } @@ -234,7 +285,22 @@ mod integration_test { sasl: Option, tls: Option, compression: KafkaCompression, + test_telemetry_tags: bool, ) { + if test_telemetry_tags { + // We need to configure Vector to emit the service and source tags. + // The default is to not emit these. + init_telemetry( + Telemetry { + tags: Tags { + emit_service: true, + emit_source: true, + }, + }, + true, + ); + } + let topic = format!("test-{}", random_string(10)); let headers_key = "headers_key".to_string(); let kafka_auth = KafkaAuthConfig { sasl, tls }; @@ -273,13 +339,24 @@ mod integration_test { }); events }); - assert_sink_compliance(&SINK_TAGS, async move { - let sink = KafkaSink::new(config).unwrap(); - let sink = VectorSink::from_event_streamsink(sink); - sink.run(input_events).await - }) - .await - .expect("Running sink failed"); + + if test_telemetry_tags { + assert_data_volume_sink_compliance(&DATA_VOLUME_SINK_TAGS, async move { + let sink = KafkaSink::new(config).unwrap(); + let sink = VectorSink::from_event_streamsink(sink); + sink.run(input_events).await + }) + .await + .expect("Running sink failed"); + } else { + assert_sink_compliance(&SINK_TAGS, async move { + let sink = KafkaSink::new(config).unwrap(); + let sink = VectorSink::from_event_streamsink(sink); + sink.run(input_events).await + }) + .await + .expect("Running sink failed"); + } assert_eq!(receiver.try_recv(), Ok(BatchStatus::Delivered)); // read back everything from the beginning diff --git a/src/sinks/loki/event.rs b/src/sinks/loki/event.rs index 6b85153c0655b..22d399f970710 100644 --- a/src/sinks/loki/event.rs +++ b/src/sinks/loki/event.rs @@ -1,11 +1,8 @@ use std::{collections::HashMap, io}; -use crate::sinks::prelude::*; +use crate::sinks::{prelude::*, util::encoding::Encoder}; use bytes::Bytes; use serde::{ser::SerializeSeq, Serialize}; -use vector_buffers::EventCount; - -use crate::sinks::util::encoding::{write_all, Encoder}; pub type Labels = Vec<(String, String)>; @@ -155,6 +152,7 @@ pub struct LokiRecord { pub event: LokiEvent, pub json_byte_size: JsonSize, pub finalizers: EventFinalizers, + pub event_count_tags: EventCountTags, } impl ByteSizeOf for LokiRecord { @@ -186,6 +184,12 @@ impl Finalizable for LokiRecord { } } +impl GetEventCountTags for LokiRecord { + fn get_tags(&self) -> EventCountTags { + self.event_count_tags.clone() + } +} + #[derive(Hash, Eq, PartialEq, Clone, Debug)] pub struct PartitionKey { pub tenant_id: Option, diff --git a/src/sinks/loki/service.rs b/src/sinks/loki/service.rs index 1ac3c871631cb..edcc762042fba 100644 --- a/src/sinks/loki/service.rs +++ b/src/sinks/loki/service.rs @@ -4,7 +4,6 @@ use bytes::Bytes; use http::StatusCode; use snafu::Snafu; use tracing::Instrument; -use vector_core::internal_event::CountByteSize; use crate::sinks::loki::config::{CompressionConfigAdapter, ExtendedCompression}; use crate::{ @@ -50,11 +49,8 @@ impl DriverResponse for LokiResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize( - self.metadata.event_count(), - self.metadata.events_estimated_json_encoded_byte_size(), - ) + fn events_sent(&self) -> &GroupedCountByteSize { + self.metadata.events_estimated_json_encoded_byte_size() } fn bytes_sent(&self) -> Option { @@ -78,8 +74,12 @@ impl Finalizable for LokiRequest { } impl MetaDescriptive for LokiRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -120,7 +120,7 @@ impl Service for LokiService { }; let mut req = http::Request::post(&self.endpoint.uri).header("Content-Type", content_type); - let metadata = request.get_metadata(); + let metadata = request.get_metadata().clone(); if let Some(tenant_id) = request.tenant_id { req = req.header("X-Scope-OrgID", tenant_id); diff --git a/src/sinks/loki/sink.rs b/src/sinks/loki/sink.rs index 1ba3cbee6268a..74e133887b6b4 100644 --- a/src/sinks/loki/sink.rs +++ b/src/sinks/loki/sink.rs @@ -262,6 +262,8 @@ impl EventEncoder { event.as_mut_log().remove_timestamp(); } + let event_count_tags = event.get_tags(); + self.transformer.transform(&mut event); let mut bytes = BytesMut::new(); self.encoder.encode(event, &mut bytes).ok(); @@ -285,6 +287,7 @@ impl EventEncoder { partition, finalizers, json_byte_size, + event_count_tags, }) } } diff --git a/src/sinks/new_relic/service.rs b/src/sinks/new_relic/service.rs index 67a2f27ad28f9..290276b72b5cf 100644 --- a/src/sinks/new_relic/service.rs +++ b/src/sinks/new_relic/service.rs @@ -13,10 +13,9 @@ use http::{ use hyper::Body; use tower::Service; use tracing::Instrument; -use vector_common::request_metadata::{MetaDescriptive, RequestMetadata}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}; use vector_core::{ event::{EventFinalizers, EventStatus, Finalizable}, - internal_event::CountByteSize, stream::DriverResponse, }; @@ -39,8 +38,12 @@ impl Finalizable for NewRelicApiRequest { } impl MetaDescriptive for NewRelicApiRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -55,11 +58,8 @@ impl DriverResponse for NewRelicApiResponse { self.event_status } - fn events_sent(&self) -> CountByteSize { - CountByteSize( - self.metadata.event_count(), - self.metadata.events_estimated_json_encoded_byte_size(), - ) + fn events_sent(&self) -> &GroupedCountByteSize { + self.metadata.events_estimated_json_encoded_byte_size() } fn bytes_sent(&self) -> Option { @@ -97,7 +97,7 @@ impl Service for NewRelicApiService { }; let payload_len = request.payload.len(); - let metadata = request.get_metadata(); + let metadata = request.get_metadata().clone(); let http_request = http_request .header(CONTENT_LENGTH, payload_len) .body(Body::from(request.payload)) diff --git a/src/sinks/opendal_common.rs b/src/sinks/opendal_common.rs index ba961607be438..f8e5877b8e1ed 100644 --- a/src/sinks/opendal_common.rs +++ b/src/sinks/opendal_common.rs @@ -20,10 +20,9 @@ use tracing::Instrument; use vector_common::{ finalization::{EventStatus, Finalizable}, json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, + request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}, }; use vector_core::{ - internal_event::CountByteSize, sink::StreamSink, stream::{BatcherSettings, DriverResponse}, EstimatedJsonEncodedSizeOf, @@ -153,8 +152,12 @@ pub struct OpenDalRequest { } impl MetaDescriptive for OpenDalRequest { - fn get_metadata(&self) -> RequestMetadata { - self.request_metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.request_metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.request_metadata } } @@ -237,8 +240,7 @@ impl RequestBuilder<(String, Vec)> for OpenDalRequestBuilder { /// OpenDalResponse is the response returned by OpenDAL services. #[derive(Debug)] pub struct OpenDalResponse { - pub count: usize, - pub events_byte_size: JsonSize, + pub events_byte_size: GroupedCountByteSize, pub byte_size: usize, } @@ -247,8 +249,8 @@ impl DriverResponse for OpenDalResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.count, self.events_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_byte_size } fn bytes_sent(&self) -> Option { @@ -277,8 +279,9 @@ impl Service for OpenDalService { .in_current_span() .await; result.map(|_| OpenDalResponse { - count: request.metadata.count, - events_byte_size: request.metadata.byte_size, + events_byte_size: request + .request_metadata + .into_events_estimated_json_encoded_byte_size(), byte_size, }) }) diff --git a/src/sinks/prelude.rs b/src/sinks/prelude.rs index 15f5d99376a0f..ffef449c78df1 100644 --- a/src/sinks/prelude.rs +++ b/src/sinks/prelude.rs @@ -30,7 +30,9 @@ pub use vector_common::{ finalization::{EventFinalizers, EventStatus, Finalizable}, internal_event::CountByteSize, json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, + request_metadata::{ + EventCountTags, GetEventCountTags, GroupedCountByteSize, MetaDescriptive, RequestMetadata, + }, }; pub use vector_config::configurable_component; pub use vector_core::{ diff --git a/src/sinks/pulsar/request_builder.rs b/src/sinks/pulsar/request_builder.rs index b284ffef1ab26..de62f179dcb30 100644 --- a/src/sinks/pulsar/request_builder.rs +++ b/src/sinks/pulsar/request_builder.rs @@ -41,7 +41,7 @@ impl RequestBuilder for PulsarRequestBuilder { &self, mut input: PulsarEvent, ) -> (Self::Metadata, RequestMetadataBuilder, Self::Events) { - let builder = RequestMetadataBuilder::from_events(&input); + let builder = RequestMetadataBuilder::from_event(&input.event); let metadata = PulsarMetadata { finalizers: input.event.take_finalizers(), key: input.key, diff --git a/src/sinks/pulsar/service.rs b/src/sinks/pulsar/service.rs index b04d2eb0d13e5..8afabb3260207 100644 --- a/src/sinks/pulsar/service.rs +++ b/src/sinks/pulsar/service.rs @@ -6,7 +6,6 @@ use bytes::Bytes; use pulsar::producer::Message; use pulsar::{Error as PulsarError, Executor, MultiTopicProducer, ProducerOptions, Pulsar}; use tokio::sync::Mutex; -use vector_common::internal_event::CountByteSize; use crate::internal_events::PulsarSendingError; use crate::sinks::{prelude::*, pulsar::request_builder::PulsarMetadata}; @@ -20,7 +19,7 @@ pub(super) struct PulsarRequest { pub struct PulsarResponse { byte_size: usize, - event_byte_size: JsonSize, + event_byte_size: GroupedCountByteSize, } impl DriverResponse for PulsarResponse { @@ -28,8 +27,8 @@ impl DriverResponse for PulsarResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(1, self.event_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.event_byte_size } fn bytes_sent(&self) -> Option { @@ -44,8 +43,12 @@ impl Finalizable for PulsarRequest { } impl MetaDescriptive for PulsarRequest { - fn get_metadata(&self) -> RequestMetadata { - self.request_metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.request_metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.request_metadata } } @@ -134,7 +137,7 @@ impl Service for PulsarService { byte_size, event_byte_size: request .request_metadata - .events_estimated_json_encoded_byte_size(), + .into_events_estimated_json_encoded_byte_size(), }), Err(e) => { emit!(PulsarSendingError { diff --git a/src/sinks/s3_common/service.rs b/src/sinks/s3_common/service.rs index c9c12ac4bcb69..5ad3bea516d5c 100644 --- a/src/sinks/s3_common/service.rs +++ b/src/sinks/s3_common/service.rs @@ -11,13 +11,9 @@ use futures::future::BoxFuture; use md5::Digest; use tower::Service; use tracing::Instrument; -use vector_common::{ - json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, -}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}; use vector_core::{ event::{EventFinalizers, EventStatus, Finalizable}, - internal_event::CountByteSize, stream::DriverResponse, }; @@ -41,8 +37,12 @@ impl Finalizable for S3Request { } impl MetaDescriptive for S3Request { - fn get_metadata(&self) -> RequestMetadata { - self.request_metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.request_metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.request_metadata } } @@ -55,8 +55,7 @@ pub struct S3Metadata { #[derive(Debug)] pub struct S3Response { - count: usize, - events_byte_size: JsonSize, + events_byte_size: GroupedCountByteSize, } impl DriverResponse for S3Response { @@ -64,8 +63,8 @@ impl DriverResponse for S3Response { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.count, self.events_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_byte_size } } @@ -102,11 +101,6 @@ impl Service for S3Service { // Emission of internal events for errors and dropped events is handled upstream by the caller. fn call(&mut self, request: S3Request) -> Self::Future { - let count = request.get_metadata().event_count(); - let events_byte_size = request - .get_metadata() - .events_estimated_json_encoded_byte_size(); - let options = request.options; let content_encoding = request.content_encoding; @@ -127,6 +121,10 @@ impl Service for S3Service { tagging.finish() }); + let events_byte_size = request + .request_metadata + .into_events_estimated_json_encoded_byte_size(); + let client = self.client.clone(); Box::pin(async move { @@ -150,10 +148,7 @@ impl Service for S3Service { let result = request.send().in_current_span().await; - result.map(|_| S3Response { - count, - events_byte_size, - }) + result.map(|_| S3Response { events_byte_size }) }) } } diff --git a/src/sinks/splunk_hec/common/request.rs b/src/sinks/splunk_hec/common/request.rs index ba0ab442076ac..f1fc2366b6b30 100644 --- a/src/sinks/splunk_hec/common/request.rs +++ b/src/sinks/splunk_hec/common/request.rs @@ -40,7 +40,11 @@ impl Finalizable for HecRequest { } impl MetaDescriptive for HecRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } diff --git a/src/sinks/splunk_hec/common/response.rs b/src/sinks/splunk_hec/common/response.rs index 65eaea0f12bcf..16a7b74abc1ab 100644 --- a/src/sinks/splunk_hec/common/response.rs +++ b/src/sinks/splunk_hec/common/response.rs @@ -1,11 +1,10 @@ -use vector_common::json_size::JsonSize; -use vector_core::internal_event::CountByteSize; +use vector_common::request_metadata::GroupedCountByteSize; use vector_core::{event::EventStatus, stream::DriverResponse}; pub struct HecResponse { pub event_status: EventStatus, pub events_count: usize, - pub events_byte_size: JsonSize, + pub events_byte_size: GroupedCountByteSize, } impl AsRef for HecResponse { @@ -19,7 +18,7 @@ impl DriverResponse for HecResponse { self.event_status } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.events_count, self.events_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_byte_size } } diff --git a/src/sinks/splunk_hec/common/service.rs b/src/sinks/splunk_hec/common/service.rs index 9492f11137dbe..a8abc57e77b09 100644 --- a/src/sinks/splunk_hec/common/service.rs +++ b/src/sinks/splunk_hec/common/service.rs @@ -109,12 +109,13 @@ where } } - fn call(&mut self, req: HecRequest) -> Self::Future { + fn call(&mut self, mut req: HecRequest) -> Self::Future { let ack_finalizer_tx = self.ack_finalizer_tx.clone(); let ack_slot = self.current_ack_slot.take(); - let events_count = req.get_metadata().event_count(); - let events_byte_size = req.get_metadata().events_estimated_json_encoded_byte_size(); + let metadata = std::mem::take(req.metadata_mut()); + let events_count = metadata.event_count(); + let events_byte_size = metadata.into_events_estimated_json_encoded_byte_size(); let response = self.inner.call(req); Box::pin(async move { diff --git a/src/sinks/statsd/service.rs b/src/sinks/statsd/service.rs index 5686dc22de1ea..5ab5e6092e7ed 100644 --- a/src/sinks/statsd/service.rs +++ b/src/sinks/statsd/service.rs @@ -4,8 +4,7 @@ use futures_util::future::BoxFuture; use tower::Service; use vector_common::{ finalization::{EventFinalizers, EventStatus, Finalizable}, - internal_event::CountByteSize, - request_metadata::{MetaDescriptive, RequestMetadata}, + request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}, }; use vector_core::stream::DriverResponse; @@ -24,8 +23,12 @@ impl Finalizable for StatsdRequest { } impl MetaDescriptive for StatsdRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -46,11 +49,8 @@ impl DriverResponse for StatsdResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize( - self.metadata.event_count(), - self.metadata.events_estimated_json_encoded_byte_size(), - ) + fn events_sent(&self) -> &GroupedCountByteSize { + self.metadata.events_estimated_json_encoded_byte_size() } fn bytes_sent(&self) -> Option { diff --git a/src/sinks/util/metadata.rs b/src/sinks/util/metadata.rs index d89b51140e5f6..521f1c080995c 100644 --- a/src/sinks/util/metadata.rs +++ b/src/sinks/util/metadata.rs @@ -1,9 +1,13 @@ use std::num::NonZeroUsize; use vector_buffers::EventCount; -use vector_core::{ByteSizeOf, EstimatedJsonEncodedSizeOf}; +use vector_core::{config, ByteSizeOf, EstimatedJsonEncodedSizeOf}; -use vector_common::{json_size::JsonSize, request_metadata::RequestMetadata}; +use vector_common::{ + internal_event::CountByteSize, + json_size::JsonSize, + request_metadata::{GetEventCountTags, GroupedCountByteSize, RequestMetadata}, +}; use super::request_builder::EncodeResult; @@ -11,22 +15,47 @@ use super::request_builder::EncodeResult; pub struct RequestMetadataBuilder { event_count: usize, events_byte_size: usize, - events_estimated_json_encoded_byte_size: JsonSize, + events_estimated_json_encoded_byte_size: GroupedCountByteSize, } impl RequestMetadataBuilder { - pub fn from_events(events: E) -> Self + pub fn from_events(events: &[E]) -> Self where - E: ByteSizeOf + EventCount + EstimatedJsonEncodedSizeOf, + E: ByteSizeOf + EventCount + GetEventCountTags + EstimatedJsonEncodedSizeOf, { + let mut size = config::telemetry().create_request_count_byte_size(); + + let mut event_count = 0; + let mut events_byte_size = 0; + + for event in events { + event_count += 1; + events_byte_size += event.size_of(); + size.add_event(event, event.estimated_json_encoded_size_of()); + } + + Self { + event_count, + events_byte_size, + events_estimated_json_encoded_byte_size: size, + } + } + + pub fn from_event(event: &E) -> Self + where + E: ByteSizeOf + GetEventCountTags + EstimatedJsonEncodedSizeOf, + { + let mut size = config::telemetry().create_request_count_byte_size(); + size.add_event(event, event.estimated_json_encoded_size_of()); + Self { - event_count: events.event_count(), - events_byte_size: events.size_of(), - events_estimated_json_encoded_byte_size: events.estimated_json_encoded_size_of(), + event_count: 1, + events_byte_size: event.size_of(), + events_estimated_json_encoded_byte_size: size, } } - pub const fn new( + pub fn new( event_count: usize, events_byte_size: usize, events_estimated_json_encoded_byte_size: JsonSize, @@ -34,17 +63,23 @@ impl RequestMetadataBuilder { Self { event_count, events_byte_size, - events_estimated_json_encoded_byte_size, + events_estimated_json_encoded_byte_size: CountByteSize( + event_count, + events_estimated_json_encoded_byte_size, + ) + .into(), } } pub fn track_event(&mut self, event: E) where - E: ByteSizeOf + EstimatedJsonEncodedSizeOf, + E: ByteSizeOf + GetEventCountTags + EstimatedJsonEncodedSizeOf, { self.event_count += 1; self.events_byte_size += event.size_of(); - self.events_estimated_json_encoded_byte_size += event.estimated_json_encoded_size_of(); + let json_size = event.estimated_json_encoded_size_of(); + self.events_estimated_json_encoded_byte_size + .add_event(&event, json_size); } pub fn with_request_size(&self, size: NonZeroUsize) -> RequestMetadata { @@ -55,7 +90,7 @@ impl RequestMetadataBuilder { self.events_byte_size, size, size, - self.events_estimated_json_encoded_byte_size, + self.events_estimated_json_encoded_byte_size.clone(), ) } @@ -67,7 +102,7 @@ impl RequestMetadataBuilder { result .compressed_byte_size .unwrap_or(result.uncompressed_byte_size), - self.events_estimated_json_encoded_byte_size, + self.events_estimated_json_encoded_byte_size.clone(), ) } } diff --git a/src/sinks/util/processed_event.rs b/src/sinks/util/processed_event.rs index dd13df8bd3f21..fe4a50a8eb42d 100644 --- a/src/sinks/util/processed_event.rs +++ b/src/sinks/util/processed_event.rs @@ -1,5 +1,8 @@ use serde::Serialize; -use vector_common::json_size::JsonSize; +use vector_common::{ + json_size::JsonSize, + request_metadata::{EventCountTags, GetEventCountTags}, +}; use vector_core::{ event::{EventFinalizers, Finalizable, LogEvent, MaybeAsLogMut}, ByteSizeOf, EstimatedJsonEncodedSizeOf, @@ -49,3 +52,12 @@ where self.event.estimated_json_encoded_size_of() } } + +impl GetEventCountTags for ProcessedEvent +where + E: GetEventCountTags, +{ + fn get_tags(&self) -> EventCountTags { + self.event.get_tags() + } +} diff --git a/src/sinks/vector/service.rs b/src/sinks/vector/service.rs index a93a196a58dc9..5277a408634ee 100644 --- a/src/sinks/vector/service.rs +++ b/src/sinks/vector/service.rs @@ -8,11 +8,8 @@ use hyper_proxy::ProxyConnector; use prost::Message; use tonic::{body::BoxBody, IntoRequest}; use tower::Service; -use vector_common::{ - json_size::JsonSize, - request_metadata::{MetaDescriptive, RequestMetadata}, -}; -use vector_core::{internal_event::CountByteSize, stream::DriverResponse}; +use vector_common::request_metadata::{GroupedCountByteSize, MetaDescriptive, RequestMetadata}; +use vector_core::stream::DriverResponse; use super::VectorSinkError; use crate::{ @@ -31,8 +28,7 @@ pub struct VectorService { } pub struct VectorResponse { - events_count: usize, - events_byte_size: JsonSize, + events_byte_size: GroupedCountByteSize, } impl DriverResponse for VectorResponse { @@ -40,8 +36,8 @@ impl DriverResponse for VectorResponse { EventStatus::Delivered } - fn events_sent(&self) -> CountByteSize { - CountByteSize(self.events_count, self.events_byte_size) + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_byte_size } } @@ -59,8 +55,12 @@ impl Finalizable for VectorRequest { } impl MetaDescriptive for VectorRequest { - fn get_metadata(&self) -> RequestMetadata { - self.metadata + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata } } @@ -103,13 +103,11 @@ impl Service for VectorService { } // Emission of internal events for errors and dropped events is handled upstream by the caller. - fn call(&mut self, list: VectorRequest) -> Self::Future { + fn call(&mut self, mut list: VectorRequest) -> Self::Future { let mut service = self.clone(); let byte_size = list.request.encoded_len(); - let events_count = list.get_metadata().event_count(); - let events_byte_size = list - .get_metadata() - .events_estimated_json_encoded_byte_size(); + let metadata = std::mem::take(list.metadata_mut()); + let events_byte_size = metadata.into_events_estimated_json_encoded_byte_size(); let future = async move { service @@ -121,10 +119,7 @@ impl Service for VectorService { protocol: &service.protocol, endpoint: &service.endpoint, }); - VectorResponse { - events_count, - events_byte_size, - } + VectorResponse { events_byte_size } }) .map_err(|source| VectorSinkError::Request { source }.into()) .await diff --git a/src/sources/kubernetes_logs/mod.rs b/src/sources/kubernetes_logs/mod.rs index 17878ec54c28b..bd004a778881b 100644 --- a/src/sources/kubernetes_logs/mod.rs +++ b/src/sources/kubernetes_logs/mod.rs @@ -21,10 +21,7 @@ use k8s_paths_provider::K8sPathsProvider; use kube::{ api::Api, config::{self, KubeConfigOptions}, - runtime::{ - reflector::{self}, - watcher, WatchStreamExt, - }, + runtime::{reflector, watcher, WatchStreamExt}, Client, Config as ClientConfig, }; use lifecycle::Lifecycle; diff --git a/src/sources/vector/mod.rs b/src/sources/vector/mod.rs index a6f5ad5b049f3..a6fdaa494d60e 100644 --- a/src/sources/vector/mod.rs +++ b/src/sources/vector/mod.rs @@ -280,11 +280,7 @@ mod tests { use crate::{ config::{SinkConfig as _, SinkContext}, sinks::vector::VectorConfig as SinkConfig, - test_util::{ - self, - components::{assert_source_compliance, SOURCE_TAGS}, - }, - SourceSender, + test_util, SourceSender, }; async fn run_test(vector_source_config_str: &str, addr: SocketAddr) { @@ -323,25 +319,19 @@ mod tests { async fn receive_message() { let addr = test_util::next_addr(); - assert_source_compliance(&SOURCE_TAGS, async { - let config = format!(r#"address = "{}""#, addr); - run_test(&config, addr).await; - }) - .await; + let config = format!(r#"address = "{}""#, addr); + run_test(&config, addr).await; } #[tokio::test] async fn receive_compressed_message() { let addr = test_util::next_addr(); - assert_source_compliance(&SOURCE_TAGS, async { - let config = format!( - r#"address = "{}" + let config = format!( + r#"address = "{}" compression=true"#, - addr - ); - run_test(&config, addr).await; - }) - .await; + addr + ); + run_test(&config, addr).await; } } diff --git a/src/test_util/components.rs b/src/test_util/components.rs index 8d999246628dc..e819bee0352bf 100644 --- a/src/test_util/components.rs +++ b/src/test_util/components.rs @@ -58,6 +58,8 @@ pub const FILE_SOURCE_TAGS: [&str; 1] = ["file"]; /// The most basic set of tags for sinks, regardless of whether or not they push data or have it pulled out. pub const SINK_TAGS: [&str; 1] = ["protocol"]; +pub const DATA_VOLUME_SINK_TAGS: [&str; 2] = ["source", "service"]; + /// The standard set of tags for all sinks that write a file. pub const FILE_SINK_TAGS: [&str; 2] = ["file", "protocol"]; @@ -120,6 +122,17 @@ pub static SINK_TESTS: Lazy = Lazy::new(|| { } }); +pub static DATA_VOLUME_SINK_TESTS: Lazy = Lazy::new(|| { + ComponentTests { + events: &["BytesSent", "EventsSent"], // EventsReceived is emitted in the topology + tagged_counters: &[ + "component_sent_events_total", + "component_sent_event_bytes_total", + ], + untagged_counters: &[], + } +}); + /// The component test specification for sinks which simply expose data, or do not otherwise "send" it anywhere. pub static NONSENDING_SINK_TESTS: Lazy = Lazy::new(|| ComponentTests { events: &["EventsSent"], @@ -432,6 +445,32 @@ where .await; } +/// Convenience wrapper for running sink tests +pub async fn assert_data_volume_sink_compliance(tags: &[&str], f: impl Future) -> T { + init_test(); + + let result = f.await; + + DATA_VOLUME_SINK_TESTS.assert(tags); + + result +} + +pub async fn run_and_assert_data_volume_sink_compliance( + sink: VectorSink, + events: S, + tags: &[&str], +) where + S: Stream + Send, + I: Into, +{ + assert_data_volume_sink_compliance(tags, async move { + let events = events.map(Into::into); + sink.run(events).await.expect("Running sink failed") + }) + .await; +} + pub async fn assert_nonsending_sink_compliance(tags: &[&str], f: impl Future) -> T { init_test(); diff --git a/src/topology/builder.rs b/src/topology/builder.rs index ba0d2dda8a761..b6385704b2b70 100644 --- a/src/topology/builder.rs +++ b/src/topology/builder.rs @@ -245,10 +245,7 @@ impl<'a> Builder<'a> { let mut rx = builder.add_source_output(output.clone()); let (mut fanout, control) = Fanout::new(); - let source = Arc::new(OutputId { - component: key.clone(), - port: output.port.clone(), - }); + let source = Arc::new(key.clone()); let pump = async move { debug!("Source pump starting."); diff --git a/src/topology/test/compliance.rs b/src/topology/test/compliance.rs index c7c9d3c349818..a716d29593998 100644 --- a/src/topology/test/compliance.rs +++ b/src/topology/test/compliance.rs @@ -1,10 +1,8 @@ use std::sync::Arc; use tokio::sync::oneshot::{channel, Receiver}; -use vector_core::{ - config::OutputId, - event::{Event, EventArray, EventContainer, LogEvent}, -}; +use vector_common::config::ComponentKey; +use vector_core::event::{Event, EventArray, EventContainer, LogEvent}; use crate::{ config::{unit_test::UnitTestSourceConfig, ConfigBuilder}, @@ -58,7 +56,7 @@ async fn test_function_transform_single_event() { let mut events = events.into_events().collect::>(); assert_eq!(events.len(), 1); - original_event.set_source_id(Arc::new(OutputId::from("in"))); + original_event.set_source_id(Arc::new(ComponentKey::from("in"))); let event = events.remove(0); assert_eq!(original_event, event); @@ -79,7 +77,7 @@ async fn test_sync_transform_single_event() { let mut events = events.into_events().collect::>(); assert_eq!(events.len(), 1); - original_event.set_source_id(Arc::new(OutputId::from("in"))); + original_event.set_source_id(Arc::new(ComponentKey::from("in"))); let event = events.remove(0); assert_eq!(original_event, event); @@ -99,7 +97,7 @@ async fn test_task_transform_single_event() { let mut events = events.into_events().collect::>(); assert_eq!(events.len(), 1); - original_event.set_source_id(Arc::new(OutputId::from("in"))); + original_event.set_source_id(Arc::new(ComponentKey::from("in"))); let event = events.remove(0); assert_eq!(original_event, event); diff --git a/src/topology/test/mod.rs b/src/topology/test/mod.rs index babce13f20d90..aa5720382e96c 100644 --- a/src/topology/test/mod.rs +++ b/src/topology/test/mod.rs @@ -26,7 +26,7 @@ use tokio::{ time::{sleep, Duration}, }; use vector_buffers::{BufferConfig, BufferType, WhenFull}; -use vector_core::config::OutputId; +use vector_common::config::ComponentKey; mod backpressure; mod compliance; @@ -148,7 +148,7 @@ async fn topology_source_and_sink() { let res = out1.flat_map(into_event_stream).collect::>().await; - event.set_source_id(Arc::new(OutputId::from("in1"))); + event.set_source_id(Arc::new(ComponentKey::from("in1"))); assert_eq!(vec![event], res); } @@ -181,8 +181,8 @@ async fn topology_multiple_sources() { topology.stop().await; - event1.set_source_id(Arc::new(OutputId::from("in1"))); - event2.set_source_id(Arc::new(OutputId::from("in2"))); + event1.set_source_id(Arc::new(ComponentKey::from("in1"))); + event2.set_source_id(Arc::new(ComponentKey::from("in2"))); assert_eq!(out_event1, Some(event1.into())); assert_eq!(out_event2, Some(event2.into())); @@ -217,7 +217,7 @@ async fn topology_multiple_sinks() { let res2 = out2.flat_map(into_event_stream).collect::>().await; // We should see that both sinks got the exact same event: - event.set_source_id(Arc::new(OutputId::from("in1"))); + event.set_source_id(Arc::new(ComponentKey::from("in1"))); let expected = vec![event]; assert_eq!(expected, res1); assert_eq!(expected, res2); @@ -291,7 +291,7 @@ async fn topology_remove_one_source() { drop(in2); topology.stop().await; - event1.set_source_id(Arc::new(OutputId::from("in1"))); + event1.set_source_id(Arc::new(ComponentKey::from("in1"))); let res = h_out1.await.unwrap(); assert_eq!(vec![event1], res); @@ -330,7 +330,7 @@ async fn topology_remove_one_sink() { let res1 = out1.flat_map(into_event_stream).collect::>().await; let res2 = out2.flat_map(into_event_stream).collect::>().await; - event.set_source_id(Arc::new(OutputId::from("in1"))); + event.set_source_id(Arc::new(ComponentKey::from("in1"))); assert_eq!(vec![event], res1); assert_eq!(Vec::::new(), res2); @@ -441,7 +441,7 @@ async fn topology_swap_source() { // as we've removed it from the topology prior to the sends. assert_eq!(Vec::::new(), res1); - event2.set_source_id(Arc::new(OutputId::from("in2"))); + event2.set_source_id(Arc::new(ComponentKey::from("in2"))); assert_eq!(vec![event2], res2); } @@ -553,7 +553,7 @@ async fn topology_swap_sink() { // the new sink, which _was_ rebuilt: assert_eq!(Vec::::new(), res1); - event1.set_source_id(Arc::new(OutputId::from("in1"))); + event1.set_source_id(Arc::new(ComponentKey::from("in1"))); assert_eq!(vec![event1], res2); } @@ -660,8 +660,8 @@ async fn topology_rebuild_connected() { let res = h_out1.await.unwrap(); - event1.set_source_id(Arc::new(OutputId::from("in1"))); - event2.set_source_id(Arc::new(OutputId::from("in1"))); + event1.set_source_id(Arc::new(ComponentKey::from("in1"))); + event2.set_source_id(Arc::new(ComponentKey::from("in1"))); assert_eq!(vec![event1, event2], res); } @@ -714,7 +714,7 @@ async fn topology_rebuild_connected_transform() { let res2 = h_out2.await.unwrap(); assert_eq!(Vec::::new(), res1); - event.set_source_id(Arc::new(OutputId::from("in1"))); + event.set_source_id(Arc::new(ComponentKey::from("in1"))); assert_eq!(vec![event], res2); } @@ -897,11 +897,11 @@ async fn source_metadata_reaches_sink() { topology.stop().await; assert_eq!( - out_event1.into_log().metadata().source_id().unwrap(), - &OutputId::from("in1") + **out_event1.into_log().metadata().source_id().unwrap(), + ComponentKey::from("in1") ); assert_eq!( - out_event2.into_log().metadata().source_id().unwrap(), - &OutputId::from("in2") + **out_event2.into_log().metadata().source_id().unwrap(), + ComponentKey::from("in2") ); } diff --git a/src/transforms/aggregate.rs b/src/transforms/aggregate.rs index de97b69eafe41..a591305764df1 100644 --- a/src/transforms/aggregate.rs +++ b/src/transforms/aggregate.rs @@ -155,6 +155,7 @@ mod tests { use futures::stream; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; + use vector_common::config::ComponentKey; use super::*; use crate::{ @@ -173,7 +174,8 @@ mod tests { kind: metric::MetricKind, value: metric::MetricValue, ) -> Event { - Event::Metric(Metric::new(name, kind, value)).with_source_id(Arc::new(OutputId::from("in"))) + Event::Metric(Metric::new(name, kind, value)) + .with_source_id(Arc::new(ComponentKey::from("in"))) } #[test] diff --git a/src/transforms/dedupe.rs b/src/transforms/dedupe.rs index 8375e125763f1..4a6497628d78a 100644 --- a/src/transforms/dedupe.rs +++ b/src/transforms/dedupe.rs @@ -288,7 +288,7 @@ mod tests { use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; - use vector_core::config::OutputId; + use vector_common::config::ComponentKey; use crate::{ event::{Event, LogEvent, Value}, @@ -362,7 +362,7 @@ mod tests { tx.send(event1.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event1.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event1); // Second event differs in matched field so should be output even though it @@ -370,7 +370,7 @@ mod tests { tx.send(event2.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event2.set_source_id(Arc::new(OutputId::from("in"))); + event2.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event2); // Third event has the same value for "matched" as first event, so it should be dropped. @@ -412,7 +412,7 @@ mod tests { tx.send(event1.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event1.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event1); // Second event has a different matched field name with the same value, @@ -420,7 +420,7 @@ mod tests { tx.send(event2.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event2.set_source_id(Arc::new(OutputId::from("in"))); + event2.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event2); drop(tx); @@ -465,7 +465,7 @@ mod tests { tx.send(event1.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event1.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event1); // Second event is the same just with different field order, so it @@ -510,7 +510,7 @@ mod tests { tx.send(event1.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event1.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event1); // Second event gets output because it's not a dupe. This causes the first @@ -518,7 +518,7 @@ mod tests { tx.send(event2.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event2.set_source_id(Arc::new(OutputId::from("in"))); + event2.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event2); // Third event is a dupe but gets output anyway because the first @@ -526,7 +526,7 @@ mod tests { tx.send(event1.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event1.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event1); drop(tx); @@ -567,7 +567,7 @@ mod tests { tx.send(event1.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event1.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event1); // Second event should also get passed through even though the string @@ -575,7 +575,7 @@ mod tests { tx.send(event2.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event2.set_source_id(Arc::new(OutputId::from("in"))); + event2.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event2); drop(tx); @@ -620,7 +620,7 @@ mod tests { tx.send(event1.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event1.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event1); // Second event should also get passed through even though the string @@ -628,7 +628,7 @@ mod tests { tx.send(event2.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event2.set_source_id(Arc::new(OutputId::from("in"))); + event2.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event2); drop(tx); @@ -666,7 +666,7 @@ mod tests { tx.send(event1.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event1.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event1); // Second event should also get passed through as null is different than @@ -674,7 +674,7 @@ mod tests { tx.send(event2.clone()).await.unwrap(); let new_event = out.recv().await.unwrap(); - event2.set_source_id(Arc::new(OutputId::from("in"))); + event2.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event, event2); drop(tx); diff --git a/src/transforms/filter.rs b/src/transforms/filter.rs index 85bee0a071ba1..95e8877bee255 100644 --- a/src/transforms/filter.rs +++ b/src/transforms/filter.rs @@ -100,6 +100,7 @@ mod test { use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; + use vector_common::config::ComponentKey; use vector_core::event::{Metric, MetricKind, MetricValue}; use super::*; @@ -127,7 +128,7 @@ mod test { let mut log = Event::from(LogEvent::from("message")); tx.send(log.clone()).await.unwrap(); - log.set_source_id(Arc::new(OutputId::from("in"))); + log.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(out.recv().await.unwrap(), log); let metric = Event::from(Metric::new( diff --git a/src/transforms/log_to_metric.rs b/src/transforms/log_to_metric.rs index aaf7a68e42225..ad44b0a9e6d55 100644 --- a/src/transforms/log_to_metric.rs +++ b/src/transforms/log_to_metric.rs @@ -409,6 +409,7 @@ mod tests { use std::time::Duration; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; + use vector_common::config::ComponentKey; use vector_core::metric_tags; use super::*; @@ -508,7 +509,7 @@ mod tests { let event = create_event("status", "42"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let metric = do_transform(config, event).await.unwrap(); assert_eq!( @@ -540,7 +541,7 @@ mod tests { event.as_mut_log().insert("method", "post"); event.as_mut_log().insert("code", "200"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let metric = do_transform(config, event).await.unwrap(); @@ -629,7 +630,7 @@ mod tests { let event = create_event("backtrace", "message"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let metric = do_transform(config, event).await.unwrap(); assert_eq!( @@ -673,7 +674,7 @@ mod tests { let event = create_event("amount", "33.99"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let metric = do_transform(config, event).await.unwrap(); assert_eq!( @@ -703,7 +704,7 @@ mod tests { let event = create_event("amount", "33.99"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let metric = do_transform(config, event).await.unwrap(); assert_eq!( @@ -731,7 +732,7 @@ mod tests { let event = create_event("memory_rss", "123"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let metric = do_transform(config, event).await.unwrap(); assert_eq!( @@ -815,7 +816,7 @@ mod tests { event.as_mut_log().insert("status", "42"); event.as_mut_log().insert("backtrace", "message"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let output = do_transform_multiple_events(config, event, 2).await; assert_eq!(2, output.len()); @@ -869,7 +870,7 @@ mod tests { event.as_mut_log().insert("worker", "abc"); event.as_mut_log().insert("service", "xyz"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let output = do_transform_multiple_events(config, event, 2).await; @@ -912,7 +913,7 @@ mod tests { let event = create_event("user_ip", "1.2.3.4"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let metric = do_transform(config, event).await.unwrap(); assert_eq!( @@ -941,7 +942,7 @@ mod tests { let event = create_event("response_time", "2.5"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let metric = do_transform(config, event).await.unwrap(); assert_eq!( @@ -971,7 +972,7 @@ mod tests { let event = create_event("response_time", "2.5"); let mut metadata = event.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let metric = do_transform(config, event).await.unwrap(); assert_eq!( diff --git a/src/transforms/metric_to_log.rs b/src/transforms/metric_to_log.rs index b1c7c7394fca2..7e23962aa990c 100644 --- a/src/transforms/metric_to_log.rs +++ b/src/transforms/metric_to_log.rs @@ -346,6 +346,7 @@ mod tests { use similar_asserts::assert_eq; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; + use vector_common::config::ComponentKey; use vector_core::metric_tags; use super::*; @@ -410,7 +411,7 @@ mod tests { .with_tags(Some(tags())) .with_timestamp(Some(ts())); let mut metadata = counter.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let log = do_transform(counter).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); @@ -438,7 +439,7 @@ mod tests { ) .with_timestamp(Some(ts())); let mut metadata = gauge.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let log = do_transform(gauge).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); @@ -466,7 +467,7 @@ mod tests { ) .with_timestamp(Some(ts())); let mut metadata = set.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let log = do_transform(set).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); @@ -496,7 +497,7 @@ mod tests { ) .with_timestamp(Some(ts())); let mut metadata = distro.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let log = do_transform(distro).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); @@ -545,7 +546,7 @@ mod tests { ) .with_timestamp(Some(ts())); let mut metadata = histo.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let log = do_transform(histo).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); @@ -592,7 +593,7 @@ mod tests { ) .with_timestamp(Some(ts())); let mut metadata = summary.metadata().clone(); - metadata.set_source_id(Arc::new(OutputId::from("in"))); + metadata.set_source_id(Arc::new(ComponentKey::from("in"))); let log = do_transform(summary).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); diff --git a/src/transforms/tag_cardinality_limit/tests.rs b/src/transforms/tag_cardinality_limit/tests.rs index 845883f4ae045..8488658e8ea55 100644 --- a/src/transforms/tag_cardinality_limit/tests.rs +++ b/src/transforms/tag_cardinality_limit/tests.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use vector_core::config::OutputId; +use vector_common::config::ComponentKey; use vector_core::metric_tags; use super::*; @@ -85,8 +85,8 @@ async fn drop_event(config: TagCardinalityLimitConfig) { let new_event3 = out.recv().await; - event1.set_source_id(Arc::new(OutputId::from("in"))); - event2.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event2.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event1, Some(event1)); assert_eq!(new_event2, Some(event2)); @@ -131,9 +131,9 @@ async fn drop_tag(config: TagCardinalityLimitConfig) { drop(tx); topology.stop().await; - event1.set_source_id(Arc::new(OutputId::from("in"))); - event2.set_source_id(Arc::new(OutputId::from("in"))); - event3.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event2.set_source_id(Arc::new(ComponentKey::from("in"))); + event3.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event1, Some(event1)); assert_eq!(new_event2, Some(event2)); @@ -203,9 +203,9 @@ async fn drop_tag_multi_value(config: TagCardinalityLimitConfig) { let new_event2 = out.recv().await; let new_event3 = out.recv().await; - event1.set_source_id(Arc::new(OutputId::from("in"))); - event2.set_source_id(Arc::new(OutputId::from("in"))); - event3.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event2.set_source_id(Arc::new(ComponentKey::from("in"))); + event3.set_source_id(Arc::new(ComponentKey::from("in"))); drop(tx); topology.stop().await; @@ -253,9 +253,9 @@ async fn separate_value_limit_per_tag(config: TagCardinalityLimitConfig) { drop(tx); topology.stop().await; - event1.set_source_id(Arc::new(OutputId::from("in"))); - event2.set_source_id(Arc::new(OutputId::from("in"))); - event3.set_source_id(Arc::new(OutputId::from("in"))); + event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event2.set_source_id(Arc::new(ComponentKey::from("in"))); + event3.set_source_id(Arc::new(ComponentKey::from("in"))); assert_eq!(new_event1, Some(event1)); assert_eq!(new_event2, Some(event2)); From 94e3f1542be0c4ba93f554803973c9e26e7dc566 Mon Sep 17 00:00:00 2001 From: gadisn Date: Mon, 26 Jun 2023 16:53:51 +0300 Subject: [PATCH 179/236] docs: remove aggregator beta warning (#17750) According to https://discord.com/channels/742820443487993987/1040011926563983441/1040016552243761162, this warning isn't relevant anymore --- website/content/en/docs/setup/deployment/roles.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/website/content/en/docs/setup/deployment/roles.md b/website/content/en/docs/setup/deployment/roles.md index 1a81a8ed6166a..8385bac91191c 100644 --- a/website/content/en/docs/setup/deployment/roles.md +++ b/website/content/en/docs/setup/deployment/roles.md @@ -8,13 +8,6 @@ aliases: ["/docs/setup/deployment/strategies"] Vector is an end-to-end data pipeline designed to collect, process, and route data. This means that Vector serves all roles in building your pipeline. You can deploy it as an [agent](#agent), [sidecar](#sidecar), or [aggregator](#aggregator). You combine these roles to form [topologies]. In this section, we'll cover each role in detail and help you understand when to use each. -{{< warning title="Aggregator role in public beta" >}} -Helm support for the [aggregator] role is currently in public beta. We're currently seeking beta testers. If interested, please [join our chat][chat] and let us know. - -[aggregator]: /docs/setup/deployment/roles/#aggregator -[chat]: https://chat.vector.dev -{{< /warning >}} - {{< roles >}} You can install the Vector as an Aggregator on Kubernetes using Helm. For more information about getting started with the Aggregator role, see the [Helm install docs][helm]. From 63ba2a95d972bbba11cd9a1f913f2606bb2ba20b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 09:53:17 -0600 Subject: [PATCH 180/236] chore(deps): Bump proc-macro2 from 1.0.60 to 1.0.63 (#17757) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [proc-macro2](https://github.com/dtolnay/proc-macro2) from 1.0.60 to 1.0.63.
Release notes

Sourced from proc-macro2's releases.

1.0.63

1.0.62

yanked

1.0.61

  • Stricter parsing of whitespace in cooked string literals and between tokens (#392, #393)
Commits
  • 10f5dc3 Release 1.0.63
  • 4207faa Fix overflow in delimiter_of_raw_string at end of input
  • cfa1524 Release 1.0.62
  • 2331259 Merge pull request #396 from dtolnay/trailingbackslash
  • aebe7a0 Eliminate unneeded peekability on cooked string iterators
  • e2923c3 Factor out logic for processing trailing backslashes
  • 07ffd04 Convert cooked_byte_string trailing backslash logic to bytes
  • 231e8c8 Return Result from \x parser
  • 3f6d84b Merge pull request #395 from dtolnay/cstr
  • 96c97e5 Implement c-str literal parsing
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=proc-macro2&package-manager=cargo&previous-version=1.0.60&new-version=1.0.63)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 134 ++++++++++++++++++++++++++--------------------------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eceb55ace3aac..e0b0929678214 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -463,7 +463,7 @@ dependencies = [ "async-graphql-parser", "darling 0.14.2", "proc-macro-crate 1.2.1", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "thiserror", @@ -583,7 +583,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -605,7 +605,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -622,7 +622,7 @@ version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -1526,7 +1526,7 @@ dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "syn 1.0.109", ] @@ -1536,7 +1536,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61820b4c5693eafb998b1e67485423c923db4a75f72585c247bdee32bad81e7b" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1547,7 +1547,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c76cdbfa13def20d1f8af3ae7b3c6771f06352a74221d8851262ac384c122b8e" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1618,7 +1618,7 @@ version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1701,7 +1701,7 @@ checksum = "b48814962d2fd604c50d2b9433c2a41a0ab567779ee2c02f7fba6eca1221f082" dependencies = [ "cached_proc_macro_types", "darling 0.14.2", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -1926,7 +1926,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81d7dc0031c3a59a04fc2ba395c8e2dd463cba1859275f065d225f6122221b45" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -2434,7 +2434,7 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "scratch", "syn 1.0.109", @@ -2452,7 +2452,7 @@ version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -2485,7 +2485,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", @@ -2499,7 +2499,7 @@ checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", @@ -2598,7 +2598,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -2609,7 +2609,7 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -2621,7 +2621,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "rustc_version 0.4.0", "syn 1.0.109", @@ -2877,7 +2877,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -2889,7 +2889,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -2901,7 +2901,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11f36e95862220b211a6e2aa5eca09b4fa391b13cd52ceb8035a24bf65a79de2" dependencies = [ "once_cell", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -2921,7 +2921,7 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -3043,7 +3043,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f47da3a72ec598d9c8937a7ebca8962a5c7a1f28444e38c2b33c771ba3f55f05" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3139,7 +3139,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4c81935e123ab0741c4c4f0d9b8377e5fb21d3de7e062fa4b1263b1fbcba1ea" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3320,7 +3320,7 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -3403,7 +3403,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb19fe8de3ea0920d282f7b77dd4227aea6b8b999b42cdf0ca41b2472b14443a" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -3503,7 +3503,7 @@ dependencies = [ "graphql-parser", "heck 0.4.0", "lazy_static", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "serde", "serde_json", @@ -3517,7 +3517,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00bda454f3d313f909298f626115092d348bc231025699f557b27e248475f48c" dependencies = [ "graphql_client_codegen", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "syn 1.0.109", ] @@ -4878,7 +4878,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -5395,7 +5395,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.2.1", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -5407,7 +5407,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.2.1", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -5581,7 +5581,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -5825,7 +5825,7 @@ checksum = "75a1ef20bf3193c15ac345acb32e26b3dc3223aff4d77ae4fc5359567683796b" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -5913,7 +5913,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -6134,7 +6134,7 @@ version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c142c0e46b57171fe0c528bee8c5b7569e80f0c17e377cd0e30ea57dbc11bb51" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "syn 1.0.109", ] @@ -6179,7 +6179,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "version_check", @@ -6191,7 +6191,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "version_check", ] @@ -6219,9 +6219,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.60" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dec2b086b7a862cf4de201096214fa870344cf922b2b30c167badb3af3195406" +checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb" dependencies = [ "unicode-ident", ] @@ -6300,7 +6300,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -6329,7 +6329,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -6428,7 +6428,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -6448,7 +6448,7 @@ version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", ] [[package]] @@ -6843,7 +6843,7 @@ version = "0.7.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -7337,7 +7337,7 @@ version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -7348,7 +7348,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -7400,7 +7400,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -7459,7 +7459,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -7471,7 +7471,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" dependencies = [ "darling 0.14.2", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -7742,7 +7742,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -7883,7 +7883,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -7901,7 +7901,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "rustversion", "syn 1.0.109", @@ -7940,7 +7940,7 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "unicode-ident", ] @@ -7951,7 +7951,7 @@ version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "unicode-ident", ] @@ -7968,7 +7968,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "unicode-xid 0.2.4", @@ -8122,7 +8122,7 @@ version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -8267,7 +8267,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -8487,7 +8487,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "prost-build", "quote 1.0.28", "syn 1.0.109", @@ -8592,7 +8592,7 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -8863,7 +8863,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -8893,7 +8893,7 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3e1c30cedd24fc597f7d37a721efdbdc2b1acae012c1ef1218f4c7c2c0f3e7" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", ] @@ -9433,7 +9433,7 @@ dependencies = [ "convert_case 0.6.0", "darling 0.13.4", "once_cell", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "serde", "serde_json", @@ -9446,7 +9446,7 @@ name = "vector-config-macros" version = "0.1.0" dependencies = [ "darling 0.13.4", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "serde", "serde_derive_internals", @@ -9706,7 +9706,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", ] @@ -9807,7 +9807,7 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", "wasm-bindgen-shared", @@ -9841,7 +9841,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 2.0.10", "wasm-bindgen-backend", @@ -10252,7 +10252,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6505e6815af7de1746a08f69c69606bb45695a17149517680f3b2149713b19a3" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", ] @@ -10272,7 +10272,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ - "proc-macro2 1.0.60", + "proc-macro2 1.0.63", "quote 1.0.28", "syn 1.0.109", "synstructure", From a53c7a2153960038b8e68e13d6beede09eb1a69a Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Mon, 26 Jun 2023 09:48:37 -0700 Subject: [PATCH 181/236] chore(kubernetes_logs source): Add warning about Windows support (#17762) Signed-off-by: Jesse Szwedko --------- Signed-off-by: Jesse Szwedko Co-authored-by: Doug Smith --- website/cue/reference/components/sources/kubernetes_logs.cue | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/cue/reference/components/sources/kubernetes_logs.cue b/website/cue/reference/components/sources/kubernetes_logs.cue index 082a2205a9e43..00f6cf7427c67 100644 --- a/website/cue/reference/components/sources/kubernetes_logs.cue +++ b/website/cue/reference/components/sources/kubernetes_logs.cue @@ -47,7 +47,9 @@ components: sources: kubernetes_logs: { Kubernetes cluster this can be provided with a [hostPath](\(urls.kubernetes_host_path)) volume. """, ] - warnings: [] + warnings: [""" + This source is only tested on Linux. Your mileage may vary for clusters on Windows. + """] notices: [] } From e164b36436b85a332b5a3b4c492caab6b53578d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 16:53:13 +0000 Subject: [PATCH 182/236] chore(deps): Bump serde_yaml from 0.9.21 to 0.9.22 (#17756) Bumps [serde_yaml](https://github.com/dtolnay/serde-yaml) from 0.9.21 to 0.9.22.
Release notes

Sourced from serde_yaml's releases.

0.9.22

  • Update indexmap dependency to version 2
Commits
  • 060eb86 Release 0.9.22
  • b12ad38 Merge pull request #377 from dtolnay/indexmap
  • c418ad5 Update indexmap dependency to version 2
  • f1cd9e6 Remove .clippy.toml in favor of respecting rust-version from Cargo.toml
  • 147103c Show error details during miri setup in CI
  • 622553f Fix unused import warnings in test under cfg miri
  • 2037c7e Fix new unused_mut detected by nightly-2023-04-30
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=serde_yaml&package-manager=cargo&previous-version=0.9.21&new-version=0.9.22)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Doug Smith --- Cargo.lock | 70 +++++++++++++++++++++++++++----------------- Cargo.toml | 2 +- LICENSE-3rdparty.csv | 1 + vdev/Cargo.toml | 2 +- 4 files changed, 46 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e0b0929678214..a1fee9a897355 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -439,7 +439,7 @@ dependencies = [ "fnv", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "mime", "multer", "num-traits", @@ -488,7 +488,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d461325bfb04058070712296601dfe5e5bd6cdff84780a0a8c569ffb15c87eb3" dependencies = [ "bytes 1.4.0", - "indexmap", + "indexmap 1.9.3", "serde", "serde_json", ] @@ -1562,7 +1562,7 @@ dependencies = [ "base64 0.13.1", "bitvec", "hex", - "indexmap", + "indexmap 1.9.3", "js-sys", "lazy_static", "rand 0.8.5", @@ -2955,6 +2955,12 @@ dependencies = [ "termcolor", ] +[[package]] +name = "equivalent" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" + [[package]] name = "erased-serde" version = "0.3.23" @@ -3096,7 +3102,7 @@ dependencies = [ "flate2", "futures 0.3.28", "glob", - "indexmap", + "indexmap 1.9.3", "libc", "quickcheck", "scan_fmt", @@ -3543,7 +3549,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util", @@ -4062,6 +4068,16 @@ dependencies = [ "serde", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", +] + [[package]] name = "indicatif" version = "0.17.5" @@ -4441,7 +4457,7 @@ dependencies = [ "secrecy", "serde", "serde_json", - "serde_yaml 0.9.21", + "serde_yaml 0.9.22", "thiserror", "tokio", "tokio-util", @@ -4909,7 +4925,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.13.2", - "indexmap", + "indexmap 1.9.3", "metrics", "num_cpus", "ordered-float 3.7.0", @@ -5127,7 +5143,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af5a8477ac96877b5bd1fd67e0c28736c12943aba24eda92b127e036b0c8f400" dependencies = [ - "indexmap", + "indexmap 1.9.3", "itertools 0.10.5", "ndarray", "noisy_float", @@ -5848,7 +5864,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 1.9.3", ] [[package]] @@ -6230,7 +6246,7 @@ dependencies = [ name = "prometheus-parser" version = "0.1.0" dependencies = [ - "indexmap", + "indexmap 1.9.3", "nom", "num_enum 0.6.1", "prost", @@ -7359,7 +7375,7 @@ version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" dependencies = [ - "indexmap", + "indexmap 1.9.3", "itoa", "ryu", "serde", @@ -7445,7 +7461,7 @@ dependencies = [ "base64 0.13.1", "chrono", "hex", - "indexmap", + "indexmap 1.9.3", "serde", "serde_json", "serde_with_macros 2.3.2", @@ -7482,7 +7498,7 @@ version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ - "indexmap", + "indexmap 1.9.3", "ryu", "serde", "yaml-rust", @@ -7490,11 +7506,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.21" +version = "0.9.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9d684e3ec7de3bf5466b32bd75303ac16f0736426e5a4e0d6e489559ce1249c" +checksum = "452e67b9c20c37fa79df53201dc03839651086ed9bbe92b3ca585ca9fdaa7d85" dependencies = [ - "indexmap", + "indexmap 2.0.0", "itoa", "ryu", "serde", @@ -8440,7 +8456,7 @@ version = "0.19.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f" dependencies = [ - "indexmap", + "indexmap 1.9.3", "serde", "serde_spanned", "toml_datetime", @@ -8501,7 +8517,7 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "indexmap", + "indexmap 1.9.3", "pin-project", "pin-project-lite", "rand 0.8.5", @@ -9085,7 +9101,7 @@ dependencies = [ "dunce", "glob", "hex", - "indexmap", + "indexmap 1.9.3", "indicatif", "itertools 0.11.0", "log", @@ -9097,7 +9113,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "serde_yaml 0.9.21", + "serde_yaml 0.9.22", "sha2 0.10.7", "tempfile", "toml 0.7.4", @@ -9188,7 +9204,7 @@ dependencies = [ "hyper", "hyper-openssl", "hyper-proxy", - "indexmap", + "indexmap 1.9.3", "indoc", "infer 0.14.0", "inventory", @@ -9249,7 +9265,7 @@ dependencies = [ "serde_bytes", "serde_json", "serde_with 2.3.2", - "serde_yaml 0.9.21", + "serde_yaml 0.9.22", "sha2 0.10.7", "similar-asserts", "smallvec", @@ -9353,7 +9369,7 @@ dependencies = [ "rand 0.8.5", "rkyv", "serde", - "serde_yaml 0.9.21", + "serde_yaml 0.9.22", "snafu", "temp-dir", "tokio", @@ -9379,7 +9395,7 @@ dependencies = [ "crossbeam-utils", "derivative", "futures 0.3.28", - "indexmap", + "indexmap 1.9.3", "metrics", "nom", "ordered-float 3.7.0", @@ -9409,7 +9425,7 @@ dependencies = [ "chrono", "chrono-tz", "encoding_rs", - "indexmap", + "indexmap 1.9.3", "inventory", "no-proxy", "num-traits", @@ -9479,7 +9495,7 @@ dependencies = [ "headers", "http", "hyper-proxy", - "indexmap", + "indexmap 1.9.3", "metrics", "metrics-tracing-context", "metrics-util", @@ -9642,7 +9658,7 @@ dependencies = [ "hex", "hmac", "hostname", - "indexmap", + "indexmap 1.9.3", "indoc", "itertools 0.10.5", "lalrpop", diff --git a/Cargo.toml b/Cargo.toml index 9743c88b77259..4ec26aa26bf2d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -194,7 +194,7 @@ serde-toml-merge = { version = "0.3.0", default-features = false } serde_bytes = { version = "0.11.9", default-features = false, features = ["std"], optional = true } serde_json = { version = "1.0.97", default-features = false, features = ["raw_value"] } serde_with = { version = "2.3.2", default-features = false, features = ["macros", "std"] } -serde_yaml = { version = "0.9.21", default-features = false } +serde_yaml = { version = "0.9.22", default-features = false } # Messagepack rmp-serde = { version = "1.1.1", default-features = false, optional = true } diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 31740f86ce27b..c137f340d73e9 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -174,6 +174,7 @@ enum-as-inner,https://github.com/bluejekyll/enum-as-inner,MIT OR Apache-2.0,Benj enum_dispatch,https://gitlab.com/antonok/enum_dispatch,MIT OR Apache-2.0,Anton Lazarev enumflags2,https://github.com/meithecatte/enumflags2,MIT OR Apache-2.0,"maik klein , Maja Kądziołka " env_logger,https://github.com/env-logger-rs/env_logger,MIT OR Apache-2.0,The Rust Project Developers +equivalent,https://github.com/cuviper/equivalent,Apache-2.0 OR MIT,The equivalent Authors erased-serde,https://github.com/dtolnay/erased-serde,MIT OR Apache-2.0,David Tolnay errno,https://github.com/lambda-fairy/rust-errno,MIT OR Apache-2.0,Chris Wong errno-dragonfly,https://github.com/mneumann/errno-dragonfly-rs,MIT,Michael Neumann diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index b9d39b2c405ad..d144180e1ac88 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -34,7 +34,7 @@ regex = { version = "1.8.4", default-features = false, features = ["std", "perf" reqwest = { version = "0.11", features = ["json", "blocking"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.97" -serde_yaml = "0.9.21" +serde_yaml = "0.9.22" sha2 = "0.10.7" tempfile = "3.6.0" toml = { version = "0.7.4", default-features = false, features = ["parse"] } From 5417a06e29f7a6050f916df993edba0149084b57 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Mon, 26 Jun 2023 14:43:30 -0400 Subject: [PATCH 183/236] chore: Download submodules in the CI checkouts (#17760) --- .github/audit.yml | 2 +- .github/workflows/component_features.yml | 3 +++ .github/workflows/cross.yml | 3 +++ .github/workflows/integration-test.yml | 3 +++ .github/workflows/misc.yml | 3 +++ .github/workflows/regression.yml | 2 ++ .github/workflows/test.yml | 1 + .github/workflows/unit_mac.yml | 3 +++ .github/workflows/unit_windows.yml | 3 +++ 9 files changed, 22 insertions(+), 1 deletion(-) diff --git a/.github/audit.yml b/.github/audit.yml index 83db5d60151bd..8a655a04ace82 100644 --- a/.github/audit.yml +++ b/.github/audit.yml @@ -11,7 +11,7 @@ jobs: security_audit: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v3 - uses: actions-rs/audit-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/component_features.yml b/.github/workflows/component_features.yml index 2705483f598c4..a888fe6c83d05 100644 --- a/.github/workflows/component_features.yml +++ b/.github/workflows/component_features.yml @@ -26,10 +26,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index 1bf7ef188b577..a3afadb8bddac 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -39,10 +39,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - uses: actions/cache@v3 name: Cache Cargo registry + index diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 36c563732c417..a5f02316cdaa4 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -54,10 +54,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - run: sudo npm -g install @datadog/datadog-ci diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index ce40a006da65f..b106c65ee2655 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -28,10 +28,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - uses: actions/cache@v3 name: Cache Cargo registry + index diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index d1cea2f64a737..40600010998c2 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -292,6 +292,7 @@ jobs: with: ref: ${{ needs.compute-metadata.outputs.baseline-sha }} path: baseline-vector + submodules: "recursive" - name: Set up Docker Buildx id: buildx @@ -329,6 +330,7 @@ jobs: with: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} path: comparison-vector + submodules: "recursive" - name: Set up Docker Buildx id: buildx diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index ca5184b5041c2..7947529457c90 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -44,6 +44,7 @@ jobs: with: # check-version needs tags fetch-depth: 0 # fetch everything + submodules: "recursive" - uses: actions/cache@v3 name: Cache Cargo registry + index diff --git a/.github/workflows/unit_mac.yml b/.github/workflows/unit_mac.yml index abda6ae1e177f..22476d63efada 100644 --- a/.github/workflows/unit_mac.yml +++ b/.github/workflows/unit_mac.yml @@ -32,10 +32,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - uses: actions/cache@v3 name: Cache Cargo registry + index diff --git a/.github/workflows/unit_windows.yml b/.github/workflows/unit_windows.yml index 87fb3f14c5e3e..61128cf5a801a 100644 --- a/.github/workflows/unit_windows.yml +++ b/.github/workflows/unit_windows.yml @@ -35,10 +35,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - run: .\scripts\environment\bootstrap-windows-2019.ps1 - run: make test From 4236e32cb1fe514e117fa8737e43f6dd51b937dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 20:14:15 +0000 Subject: [PATCH 184/236] chore(deps): Bump memmap2 from 0.7.0 to 0.7.1 (#17752) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [memmap2](https://github.com/RazrFalcon/memmap2-rs) from 0.7.0 to 0.7.1.
Changelog

Sourced from memmap2's changelog.

[0.7.1] - 2023-06-24

Fixed

  • Mapping beyond 4GB offset on 32 bit glibc. Linux-only. @​lvella
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=memmap2&package-manager=cargo&previous-version=0.7.0&new-version=0.7.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- lib/vector-buffers/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a1fee9a897355..0d8161dd990ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4852,9 +4852,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memmap2" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180d4b35be83d33392d1d1bfbd2ae1eca7ff5de1a94d3fc87faaa99a069e7cbd" +checksum = "f49388d20533534cd19360ad3d6a7dadc885944aa802ba3995040c5ec11288c6" dependencies = [ "libc", ] diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index 21ec45d1b7b58..1936760090a09 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -16,7 +16,7 @@ crossbeam-queue = { version = "0.3.8", default-features = false, features = ["st crossbeam-utils = { version = "0.8.16", default-features = false } fslock = { version = "0.2.1", default-features = false, features = ["std"] } futures = { version = "0.3.28", default-features = false, features = ["std"] } -memmap2 = { version = "0.7.0", default-features = false } +memmap2 = { version = "0.7.1", default-features = false } metrics = "0.21.0" num-traits = { version = "0.2.15", default-features = false } pin-project = { version = "1.1.0", default-features = false } From 219883eb2fc6fb7020b38d9e62d1a4ae0c2ba9e7 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Mon, 26 Jun 2023 14:50:36 -0700 Subject: [PATCH 185/236] fix(sinks): Add missing component span for sink building (#17765) We have one for sources and transforms, but evidentially didn't for sinks. Closes: #17763 Signed-off-by: Jesse Szwedko --------- Signed-off-by: Jesse Szwedko --- src/topology/builder.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/topology/builder.rs b/src/topology/builder.rs index b6385704b2b70..4a858acb7d113 100644 --- a/src/topology/builder.rs +++ b/src/topology/builder.rs @@ -512,6 +512,16 @@ impl<'a> Builder<'a> { let typetag = sink.inner.get_component_name(); let input_type = sink.inner.input().data_type(); + let span = error_span!( + "sink", + component_kind = "sink", + component_id = %key.id(), + component_type = %sink.inner.get_component_name(), + // maintained for compatibility + component_name = %key.id(), + ); + let _entered_span = span.enter(); + // At this point, we've validated that all transforms are valid, including any // transform that mutates the schema provided by their sources. We can now validate the // schema expectations of each individual sink. @@ -531,14 +541,7 @@ impl<'a> Builder<'a> { BufferType::Memory { .. } => "memory", BufferType::DiskV2 { .. } => "disk", }; - let buffer_span = error_span!( - "sink", - component_kind = "sink", - component_id = %key.id(), - component_type = typetag, - component_name = %key.id(), - buffer_type, - ); + let buffer_span = error_span!("sink", buffer_type); let buffer = sink .buffer .build( From 53f8bff371cdfa96770c03be38ae3c83a497043f Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Mon, 26 Jun 2023 16:03:03 -0600 Subject: [PATCH 186/236] chore(datadog_archives sink): Remove this component (#17749) The `datadog_archives` sink is an unfinished component that was never included in docs or announced. The initiative on this has shifted and the component is being dropped. --- .github/CODEOWNERS | 1 - Cargo.toml | 2 - src/common/mod.rs | 1 - src/sinks/datadog_archives.rs | 1183 ----------------- src/sinks/mod.rs | 9 +- .../components/sinks/datadog_archives.cue | 248 ---- 6 files changed, 2 insertions(+), 1442 deletions(-) delete mode 100644 src/sinks/datadog_archives.rs delete mode 100644 website/cue/reference/components/sinks/datadog_archives.cue diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 970d00d30ebc1..78154a62a6992 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -27,7 +27,6 @@ src/sinks/blackhole/ @dsmith3197 @vectordotdev/integrations-team src/sinks/clickhouse/ @dsmith3197 @vectordotdev/integrations-team src/sinks/console/ @dsmith3197 @vectordotdev/integrations-team src/sinks/databend/ @spencergilbert @vectordotdev/integrations-team -src/sinks/datadog_archives.rs @neuronull @vectordotdev/integrations-team src/sinks/datadog_events/ @neuronull @vectordotdev/integrations-team src/sinks/datadog_logs/ @neuronull @vectordotdev/integrations-team src/sinks/datadog_metrics/ @neuronull @vectordotdev/integrations-team diff --git a/Cargo.toml b/Cargo.toml index 4ec26aa26bf2d..35de47bb073ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -611,7 +611,6 @@ sinks-logs = [ "sinks-clickhouse", "sinks-console", "sinks-databend", - "sinks-datadog_archives", "sinks-datadog_events", "sinks-datadog_logs", "sinks-datadog_traces", @@ -670,7 +669,6 @@ sinks-chronicle = [] sinks-clickhouse = [] sinks-console = [] sinks-databend = [] -sinks-datadog_archives = ["sinks-aws_s3", "sinks-azure_blob", "sinks-gcp"] sinks-datadog_events = [] sinks-datadog_logs = [] sinks-datadog_metrics = ["protobuf-build"] diff --git a/src/common/mod.rs b/src/common/mod.rs index 50fa2c509eeb4..557ace9f3cdf2 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -1,6 +1,5 @@ #[cfg(any( feature = "sources-datadog_agent", - feature = "sinks-datadog_archives", feature = "sinks-datadog_events", feature = "sinks-datadog_logs", feature = "sinks-datadog_metrics", diff --git a/src/sinks/datadog_archives.rs b/src/sinks/datadog_archives.rs deleted file mode 100644 index d92265ae05a42..0000000000000 --- a/src/sinks/datadog_archives.rs +++ /dev/null @@ -1,1183 +0,0 @@ -// NOTE: We intentionally do not assert/verify that `datadog_archives` meets the component specification because it -// derives all of its capabilities from existing sink implementations which themselves are tested. We probably _should_ -// also verify it here, but for now, this is a punt to avoid having to add a bunch of specific integration tests that -// exercise all possible configurations of the sink. - -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - convert::TryFrom, - io::{self, Write}, - sync::{ - atomic::{AtomicU32, Ordering}, - Arc, - }, -}; - -use azure_storage_blobs::prelude::ContainerClient; -use base64::prelude::{Engine as _, BASE64_STANDARD}; -use bytes::{BufMut, Bytes, BytesMut}; -use chrono::{SecondsFormat, Utc}; -use codecs::{encoding::Framer, JsonSerializerConfig, NewlineDelimitedEncoder}; -use goauth::scopes::Scope; -use http::header::{HeaderName, HeaderValue}; -use http::Uri; -use lookup::event_path; -use rand::{thread_rng, Rng}; -use snafu::Snafu; -use tower::ServiceBuilder; -use uuid::Uuid; -use vector_common::request_metadata::RequestMetadata; -use vector_config::{configurable_component, NamedComponent}; -use vector_core::{ - config::AcknowledgementsConfig, - event::{Event, EventFinalizers, Finalizable}, - schema, EstimatedJsonEncodedSizeOf, -}; -use vrl::value::Kind; - -use crate::{ - aws::{AwsAuthentication, RegionOrEndpoint}, - codecs::{Encoder, Transformer}, - config::{GenerateConfig, Input, SinkConfig, SinkContext}, - gcp::{GcpAuthConfig, GcpAuthenticator}, - http::{get_http_scheme_from_uri, HttpClient}, - serde::json::to_string, - sinks::{ - azure_common::{ - self, - config::{AzureBlobMetadata, AzureBlobRequest, AzureBlobRetryLogic}, - service::AzureBlobService, - sink::AzureBlobSink, - }, - gcs_common::{ - self, - config::{GcsPredefinedAcl, GcsRetryLogic, GcsStorageClass, BASE_URL}, - service::{GcsRequest, GcsRequestSettings, GcsService}, - sink::GcsSink, - }, - s3_common::{ - self, - config::{ - create_service, S3CannedAcl, S3RetryLogic, S3ServerSideEncryption, S3StorageClass, - }, - partitioner::{S3KeyPartitioner, S3PartitionKey}, - service::{S3Metadata, S3Request, S3Service}, - sink::S3Sink, - }, - util::{ - metadata::RequestMetadataBuilder, partitioner::KeyPartitioner, - request_builder::EncodeResult, BatchConfig, Compression, RequestBuilder, - ServiceBuilderExt, SinkBatchSettings, TowerRequestConfig, - }, - VectorSink, - }, - template::Template, - tls::{TlsConfig, TlsSettings}, -}; - -const DEFAULT_COMPRESSION: Compression = Compression::gzip_default(); - -#[derive(Clone, Copy, Debug, Default)] -pub struct DatadogArchivesDefaultBatchSettings; - -/// We should avoid producing many small batches - this might slow down Log Rehydration, -/// these values are similar with how DataDog's Log Archives work internally: -/// batch size - 100mb -/// batch timeout - 15min -impl SinkBatchSettings for DatadogArchivesDefaultBatchSettings { - const MAX_EVENTS: Option = None; - const MAX_BYTES: Option = Some(100_000_000); - const TIMEOUT_SECS: f64 = 900.0; -} -/// Configuration for the `datadog_archives` sink. -#[configurable_component] -#[derive(Clone, Debug)] -#[serde(deny_unknown_fields)] -pub struct DatadogArchivesSinkConfig { - /// The name of the object storage service to use. - // TODO: This should really be an enum. - pub service: String, - - /// The name of the bucket to store the archives in. - pub bucket: String, - - /// A prefix to apply to all object keys. - /// - /// Prefixes are useful for partitioning objects, such as by creating an object key that - /// stores objects under a particular directory. If using a prefix for this purpose, it must end - /// in `/` to act as a directory path. A trailing `/` is **not** automatically added. - pub key_prefix: Option, - - #[configurable(derived)] - #[serde(default)] - pub request: TowerRequestConfig, - - #[configurable(derived)] - #[serde(default)] - pub aws_s3: Option, - - #[configurable(derived)] - #[serde(default)] - pub azure_blob: Option, - - #[configurable(derived)] - #[serde(default)] - pub gcp_cloud_storage: Option, - - #[configurable(derived)] - tls: Option, - - #[configurable(derived)] - #[serde( - default, - skip_serializing_if = "crate::serde::skip_serializing_if_default" - )] - pub encoding: Transformer, - - #[configurable(derived)] - #[serde( - default, - deserialize_with = "crate::serde::bool_or_struct", - skip_serializing_if = "crate::serde::skip_serializing_if_default" - )] - acknowledgements: AcknowledgementsConfig, -} - -/// S3-specific configuration options. -#[configurable_component] -#[derive(Clone, Debug, Default)] -#[serde(deny_unknown_fields)] -pub struct S3Config { - #[serde(flatten)] - pub options: S3Options, - - #[serde(flatten)] - pub region: RegionOrEndpoint, - - #[configurable(derived)] - #[serde(default)] - pub auth: AwsAuthentication, -} - -/// S3-specific bucket/object options. -#[configurable_component] -#[derive(Clone, Debug, Default)] -#[serde(deny_unknown_fields)] -pub struct S3Options { - /// Canned ACL to apply to the created objects. - /// - /// For more information, see [Canned ACL][canned_acl]. - /// - /// [canned_acl]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl - pub acl: Option, - - /// Grants `READ`, `READ_ACP`, and `WRITE_ACP` permissions on the created objects to the named [grantee]. - /// - /// This allows the grantee to read the created objects and their metadata, as well as read and - /// modify the ACL on the created objects. - /// - /// [grantee]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#specifying-grantee - pub grant_full_control: Option, - - /// Grants `READ` permissions on the created objects to the named [grantee]. - /// - /// This allows the grantee to read the created objects and their metadata. - /// - /// [grantee]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#specifying-grantee - pub grant_read: Option, - - /// Grants `READ_ACP` permissions on the created objects to the named [grantee]. - /// - /// This allows the grantee to read the ACL on the created objects. - /// - /// [grantee]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#specifying-grantee - pub grant_read_acp: Option, - - /// Grants `WRITE_ACP` permissions on the created objects to the named [grantee]. - /// - /// This allows the grantee to modify the ACL on the created objects. - /// - /// [grantee]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#specifying-grantee - pub grant_write_acp: Option, - - /// The Server-side Encryption algorithm used when storing these objects. - pub server_side_encryption: Option, - - /// Specifies the ID of the AWS Key Management Service (AWS KMS) symmetrical customer managed - /// customer master key (CMK) that is used for the created objects. - /// - /// Only applies when `server_side_encryption` is configured to use KMS. - /// - /// If not specified, Amazon S3 uses the AWS managed CMK in AWS to protect the data. - pub ssekms_key_id: Option, - - /// The storage class for the created objects. - /// - /// For more information, see [Using Amazon S3 storage classes][storage_classes]. - /// - /// [storage_classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html - pub storage_class: S3StorageClass, - - /// The tag-set for the object. - #[configurable(metadata(docs::additional_props_description = "A single tag."))] - pub tags: Option>, -} - -/// ABS-specific configuration options. -#[configurable_component] -#[derive(Clone, Debug, Default)] -#[serde(deny_unknown_fields)] -pub struct AzureBlobConfig { - /// The Azure Blob Storage Account connection string. - /// - /// Authentication with access key is the only supported authentication method. - pub connection_string: String, -} - -/// GCS-specific configuration options. -#[configurable_component] -#[derive(Clone, Debug, Default)] -#[serde(deny_unknown_fields)] -pub struct GcsConfig { - #[configurable(derived)] - acl: Option, - - #[configurable(derived)] - storage_class: Option, - - /// The set of metadata `key:value` pairs for the created objects. - /// - /// For more information, see [Custom metadata][custom_metadata]. - /// - /// [custom_metadata]: https://cloud.google.com/storage/docs/metadata#custom-metadata - #[configurable(metadata(docs::additional_props_description = "A key/value pair."))] - metadata: Option>, - - #[serde(flatten)] - auth: GcpAuthConfig, -} - -impl GenerateConfig for DatadogArchivesSinkConfig { - fn generate_config() -> toml::Value { - toml::Value::try_from(Self { - service: "".to_owned(), - bucket: "".to_owned(), - key_prefix: None, - request: TowerRequestConfig::default(), - aws_s3: None, - gcp_cloud_storage: None, - tls: None, - azure_blob: None, - encoding: Default::default(), - acknowledgements: Default::default(), - }) - .unwrap() - } -} - -#[derive(Debug, Snafu, PartialEq)] -enum ConfigError { - #[snafu(display("Unsupported service: {}", service))] - UnsupportedService { service: String }, - #[snafu(display("Unsupported storage class: {}", storage_class))] - UnsupportedStorageClass { storage_class: String }, -} - -const KEY_TEMPLATE: &str = "/dt=%Y%m%d/hour=%H/"; - -impl DatadogArchivesSinkConfig { - async fn build_sink(&self, cx: SinkContext) -> crate::Result<(VectorSink, super::Healthcheck)> { - match &self.service[..] { - "aws_s3" => { - let s3_config = self.aws_s3.as_ref().expect("s3 config wasn't provided"); - let service = - create_service(&s3_config.region, &s3_config.auth, &cx.proxy, &self.tls) - .await?; - let client = service.client(); - let svc = self - .build_s3_sink(&s3_config.options, service) - .map_err(|error| error.to_string())?; - Ok(( - svc, - s3_common::config::build_healthcheck(self.bucket.clone(), client)?, - )) - } - "azure_blob" => { - let azure_config = self - .azure_blob - .as_ref() - .expect("azure blob config wasn't provided"); - let client = azure_common::config::build_client( - Some(azure_config.connection_string.clone()), - None, - self.bucket.clone(), - None, - )?; - let svc = self - .build_azure_sink(Arc::::clone(&client)) - .map_err(|error| error.to_string())?; - let healthcheck = - azure_common::config::build_healthcheck(self.bucket.clone(), client)?; - Ok((svc, healthcheck)) - } - "gcp_cloud_storage" => { - let gcs_config = self - .gcp_cloud_storage - .as_ref() - .expect("gcs config wasn't provided"); - let auth = gcs_config.auth.build(Scope::DevStorageReadWrite).await?; - let base_url = format!("{}{}/", BASE_URL, self.bucket); - let tls = TlsSettings::from_options(&self.tls)?; - let client = HttpClient::new(tls, cx.proxy())?; - let healthcheck = gcs_common::config::build_healthcheck( - self.bucket.clone(), - client.clone(), - base_url.clone(), - auth.clone(), - )?; - let sink = self - .build_gcs_sink(client, base_url, auth) - .map_err(|error| error.to_string())?; - Ok((sink, healthcheck)) - } - - service => Err(Box::new(ConfigError::UnsupportedService { - service: service.to_owned(), - })), - } - } - - fn build_s3_sink( - &self, - s3_options: &S3Options, - service: S3Service, - ) -> Result { - // we use lower default limits, because we send 100mb batches, - // thus no need of the higher number of outgoing requests - let request_limits = self.request.unwrap_with(&Default::default()); - let service = ServiceBuilder::new() - .settings(request_limits, S3RetryLogic) - .service(service); - - match s3_options.storage_class { - class @ S3StorageClass::DeepArchive | class @ S3StorageClass::Glacier => { - return Err(ConfigError::UnsupportedStorageClass { - storage_class: format!("{:?}", class), - }); - } - _ => (), - } - - let batcher_settings = BatchConfig::::default() - .into_batcher_settings() - .expect("invalid batch settings"); - - let partitioner = S3KeyPartitioner::new( - Template::try_from(KEY_TEMPLATE).expect("invalid object key format"), - None, - ); - - let s3_config = self - .aws_s3 - .as_ref() - .expect("s3 config wasn't provided") - .clone(); - let request_builder = DatadogS3RequestBuilder::new( - self.bucket.clone(), - self.key_prefix.clone(), - s3_config, - self.encoding.clone(), - ); - - let sink = S3Sink::new(service, request_builder, partitioner, batcher_settings); - - Ok(VectorSink::from_event_streamsink(sink)) - } - - pub fn build_gcs_sink( - &self, - client: HttpClient, - base_url: String, - auth: GcpAuthenticator, - ) -> crate::Result { - let request = self.request.unwrap_with(&Default::default()); - let protocol = get_http_scheme_from_uri(&base_url.parse::()?); - - let batcher_settings = BatchConfig::::default() - .into_batcher_settings() - .expect("invalid batch settings"); - - let svc = ServiceBuilder::new() - .settings(request, GcsRetryLogic) - .service(GcsService::new(client, base_url, auth)); - - let gcs_config = self - .gcp_cloud_storage - .as_ref() - .expect("gcs config wasn't provided") - .clone(); - - let acl = gcs_config - .acl - .map(|acl| HeaderValue::from_str(&to_string(acl)).unwrap()); - let storage_class = gcs_config.storage_class.unwrap_or_default(); - let storage_class = HeaderValue::from_str(&to_string(storage_class)).unwrap(); - let metadata = gcs_config - .metadata - .as_ref() - .map(|metadata| { - metadata - .iter() - .map(make_header) - .collect::, _>>() - }) - .unwrap_or_else(|| Ok(vec![]))?; - let request_builder = DatadogGcsRequestBuilder { - bucket: self.bucket.clone(), - key_prefix: self.key_prefix.clone(), - acl, - storage_class, - metadata, - encoding: DatadogArchivesEncoding::new(self.encoding.clone()), - compression: DEFAULT_COMPRESSION, - }; - - let partitioner = DatadogArchivesSinkConfig::build_partitioner(); - - let sink = GcsSink::new( - svc, - request_builder, - partitioner, - batcher_settings, - protocol, - ); - - Ok(VectorSink::from_event_streamsink(sink)) - } - - fn build_azure_sink(&self, client: Arc) -> crate::Result { - let request_limits = self.request.unwrap_with(&Default::default()); - let service = ServiceBuilder::new() - .settings(request_limits, AzureBlobRetryLogic) - .service(AzureBlobService::new(client)); - - let batcher_settings = BatchConfig::::default() - .into_batcher_settings() - .expect("invalid batch settings"); - - let partitioner = DatadogArchivesSinkConfig::build_partitioner(); - let request_builder = DatadogAzureRequestBuilder { - container_name: self.bucket.clone(), - blob_prefix: self.key_prefix.clone(), - encoding: DatadogArchivesEncoding::new(self.encoding.clone()), - }; - - let sink = AzureBlobSink::new(service, request_builder, partitioner, batcher_settings); - - Ok(VectorSink::from_event_streamsink(sink)) - } - - pub fn build_partitioner() -> KeyPartitioner { - KeyPartitioner::new(Template::try_from(KEY_TEMPLATE).expect("invalid object key format")) - } -} - -const RESERVED_ATTRIBUTES: [&str; 10] = [ - "_id", "date", "message", "host", "source", "service", "status", "tags", "trace_id", "span_id", -]; - -#[derive(Debug)] -struct DatadogArchivesEncoding { - encoder: (Transformer, Encoder), - reserved_attributes: HashSet<&'static str>, - id_rnd_bytes: [u8; 8], - id_seq_number: AtomicU32, -} - -impl DatadogArchivesEncoding { - /// Generates a unique event ID compatible with DD: - /// - 18 bytes; - /// - first 6 bytes represent a "now" timestamp in millis; - /// - the rest 12 bytes can be just any sequence unique for a given timestamp. - /// - /// To generate unique-ish trailing 12 bytes we use random 8 bytes, generated at startup, - /// and a rolling-over 4-bytes sequence number. - fn generate_log_id(&self) -> String { - let mut id = BytesMut::with_capacity(18); - // timestamp in millis - 6 bytes - let now = Utc::now(); - id.put_int(now.timestamp_millis(), 6); - - // 8 random bytes - id.put_slice(&self.id_rnd_bytes); - - // 4 bytes for the counter should be more than enough - it should be unique for 1 millisecond only - let id_seq_number = self.id_seq_number.fetch_add(1, Ordering::Relaxed); - id.put_u32(id_seq_number); - - BASE64_STANDARD.encode(id.freeze()) - } -} - -impl DatadogArchivesEncoding { - pub fn new(transformer: Transformer) -> Self { - Self { - encoder: ( - transformer, - Encoder::::new( - NewlineDelimitedEncoder::new().into(), - JsonSerializerConfig::default().build().into(), - ), - ), - reserved_attributes: RESERVED_ATTRIBUTES.iter().copied().collect(), - id_rnd_bytes: thread_rng().gen::<[u8; 8]>(), - id_seq_number: AtomicU32::new(0), - } - } -} - -impl crate::sinks::util::encoding::Encoder> for DatadogArchivesEncoding { - /// Applies the following transformations to align event's schema with DD: - /// - (required) `_id` is generated in the sink(format described below); - /// - (required) `date` is set from the `timestamp` meaning or Global Log Schema mapping, or to the current time if missing; - /// - `message`,`host` are set from the corresponding meanings or Global Log Schema mappings; - /// - `source`, `service`, `status`, `tags` and other reserved attributes are left as is; - /// - the rest of the fields is moved to `attributes`. - // TODO: All reserved attributes could have specific meanings, rather than specific paths - fn encode_input(&self, mut input: Vec, writer: &mut dyn Write) -> io::Result { - for event in input.iter_mut() { - let log_event = event.as_mut_log(); - - log_event.insert("_id", self.generate_log_id()); - - let timestamp = log_event - .remove_timestamp() - .unwrap_or_else(|| Utc::now().timestamp_millis().into()); - log_event.insert( - "date", - timestamp - .as_timestamp() - .cloned() - .unwrap_or_else(Utc::now) - .to_rfc3339_opts(SecondsFormat::Millis, true), - ); - - if let Some(message_path) = log_event.message_path() { - log_event.rename_key(message_path.as_str(), event_path!("message")); - } - - if let Some(host_path) = log_event.host_path() { - log_event.rename_key(host_path.as_str(), event_path!("host")); - } - - let mut attributes = BTreeMap::new(); - - let custom_attributes = if let Some(map) = log_event.as_map() { - map.keys() - .filter(|&path| !self.reserved_attributes.contains(path.as_str())) - .map(|v| v.to_owned()) - .collect() - } else { - vec![] - }; - - for path in custom_attributes { - if let Some(value) = log_event.remove(path.as_str()) { - attributes.insert(path, value); - } - } - log_event.insert("attributes", attributes); - } - - self.encoder.encode_input(input, writer) - } -} -#[derive(Debug)] -struct DatadogS3RequestBuilder { - bucket: String, - key_prefix: Option, - config: S3Config, - encoding: DatadogArchivesEncoding, -} - -impl DatadogS3RequestBuilder { - pub fn new( - bucket: String, - key_prefix: Option, - config: S3Config, - transformer: Transformer, - ) -> Self { - Self { - bucket, - key_prefix, - config, - encoding: DatadogArchivesEncoding::new(transformer), - } - } -} - -impl RequestBuilder<(S3PartitionKey, Vec)> for DatadogS3RequestBuilder { - type Metadata = S3Metadata; - type Events = Vec; - type Encoder = DatadogArchivesEncoding; - type Payload = Bytes; - type Request = S3Request; - type Error = io::Error; - - fn compression(&self) -> Compression { - DEFAULT_COMPRESSION - } - - fn encoder(&self) -> &Self::Encoder { - &self.encoding - } - - fn split_input( - &self, - input: (S3PartitionKey, Vec), - ) -> (Self::Metadata, RequestMetadataBuilder, Self::Events) { - let (partition_key, mut events) = input; - let finalizers = events.take_finalizers(); - let s3_key_prefix = partition_key.key_prefix.clone(); - - let builder = RequestMetadataBuilder::from_events(&events); - - let s3metadata = S3Metadata { - partition_key, - s3_key: s3_key_prefix, - finalizers, - }; - - (s3metadata, builder, events) - } - - fn build_request( - &self, - mut metadata: Self::Metadata, - request_metadata: RequestMetadata, - payload: EncodeResult, - ) -> Self::Request { - metadata.s3_key = generate_object_key(self.key_prefix.clone(), metadata.s3_key); - - let body = payload.into_payload(); - trace!( - message = "Sending events.", - bytes = ?body.len(), - events_len = ?request_metadata.events_byte_size(), - bucket = ?self.bucket, - key = ?metadata.partition_key - ); - - let s3_options = self.config.options.clone(); - S3Request { - body, - bucket: self.bucket.clone(), - metadata, - request_metadata, - content_encoding: DEFAULT_COMPRESSION.content_encoding(), - options: s3_common::config::S3Options { - acl: s3_options.acl, - grant_full_control: s3_options.grant_full_control, - grant_read: s3_options.grant_read, - grant_read_acp: s3_options.grant_read_acp, - grant_write_acp: s3_options.grant_write_acp, - server_side_encryption: s3_options.server_side_encryption, - ssekms_key_id: s3_options.ssekms_key_id, - storage_class: s3_options.storage_class, - tags: s3_options.tags.map(|tags| tags.into_iter().collect()), - content_encoding: None, - content_type: None, - }, - } - } -} - -#[derive(Debug)] -struct DatadogGcsRequestBuilder { - bucket: String, - key_prefix: Option, - acl: Option, - storage_class: HeaderValue, - metadata: Vec<(HeaderName, HeaderValue)>, - encoding: DatadogArchivesEncoding, - compression: Compression, -} - -impl RequestBuilder<(String, Vec)> for DatadogGcsRequestBuilder { - type Metadata = (String, EventFinalizers); - type Events = Vec; - type Payload = Bytes; - type Request = GcsRequest; - type Encoder = DatadogArchivesEncoding; - type Error = io::Error; - - fn split_input( - &self, - input: (String, Vec), - ) -> (Self::Metadata, RequestMetadataBuilder, Self::Events) { - let (partition_key, mut events) = input; - let metadata_builder = RequestMetadataBuilder::from_events(&events); - let finalizers = events.take_finalizers(); - - ((partition_key, finalizers), metadata_builder, events) - } - - fn build_request( - &self, - dd_metadata: Self::Metadata, - metadata: RequestMetadata, - payload: EncodeResult, - ) -> Self::Request { - let (key, finalizers) = dd_metadata; - - let key = generate_object_key(self.key_prefix.clone(), key); - - let body = payload.into_payload(); - - trace!( - message = "Sending events.", - bytes = body.len(), - events_len = metadata.event_count(), - bucket = %self.bucket, - ?key - ); - - let content_type = HeaderValue::from_str(self.encoding.encoder.1.content_type()).unwrap(); - let content_encoding = DEFAULT_COMPRESSION - .content_encoding() - .map(|ce| HeaderValue::from_str(&to_string(ce)).unwrap()); - - GcsRequest { - key, - body, - finalizers, - settings: GcsRequestSettings { - acl: self.acl.clone(), - content_type, - content_encoding, - storage_class: self.storage_class.clone(), - headers: self.metadata.clone(), - }, - metadata, - } - } - - fn compression(&self) -> Compression { - self.compression - } - - fn encoder(&self) -> &Self::Encoder { - &self.encoding - } -} - -fn generate_object_key(key_prefix: Option, partition_key: String) -> String { - let filename = Uuid::new_v4().to_string(); - - format!( - "{}/{}/archive_{}.{}", - key_prefix.unwrap_or_default(), - partition_key, - filename, - "json.gz" - ) - .replace("//", "/") -} - -#[derive(Debug)] -struct DatadogAzureRequestBuilder { - container_name: String, - blob_prefix: Option, - encoding: DatadogArchivesEncoding, -} - -impl RequestBuilder<(String, Vec)> for DatadogAzureRequestBuilder { - type Metadata = AzureBlobMetadata; - type Events = Vec; - type Encoder = DatadogArchivesEncoding; - type Payload = Bytes; - type Request = AzureBlobRequest; - type Error = io::Error; - - fn compression(&self) -> Compression { - DEFAULT_COMPRESSION - } - - fn encoder(&self) -> &Self::Encoder { - &self.encoding - } - - fn split_input( - &self, - input: (String, Vec), - ) -> (Self::Metadata, RequestMetadataBuilder, Self::Events) { - let (partition_key, mut events) = input; - let finalizers = events.take_finalizers(); - let metadata = AzureBlobMetadata { - partition_key, - count: events.len(), - byte_size: events.estimated_json_encoded_size_of(), - finalizers, - }; - let builder = RequestMetadataBuilder::from_events(&events); - - (metadata, builder, events) - } - - fn build_request( - &self, - mut metadata: Self::Metadata, - request_metadata: RequestMetadata, - payload: EncodeResult, - ) -> Self::Request { - metadata.partition_key = - generate_object_key(self.blob_prefix.clone(), metadata.partition_key); - - let blob_data = payload.into_payload(); - - trace!( - message = "Sending events.", - bytes = ?blob_data.len(), - events_len = ?metadata.count, - container = ?self.container_name, - blob = ?metadata.partition_key - ); - - AzureBlobRequest { - blob_data, - content_encoding: DEFAULT_COMPRESSION.content_encoding(), - content_type: "application/gzip", - metadata, - request_metadata, - } - } -} - -// This is implemented manually to satisfy `SinkConfig`, because if we derive it automatically via -// `#[configurable_component(sink("..."))]`, it would register the sink in a way that allowed it to -// be used in `vector generate`, etc... and we don't want that. -// -// TODO: When the sink is fully supported and we expose it for use/within the docs, remove this. -impl NamedComponent for DatadogArchivesSinkConfig { - fn get_component_name(&self) -> &'static str { - "datadog_archives" - } -} - -#[async_trait::async_trait] -#[typetag::serde(name = "datadog_archives")] -impl SinkConfig for DatadogArchivesSinkConfig { - async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, super::Healthcheck)> { - let sink_and_healthcheck = self.build_sink(cx).await?; - Ok(sink_and_healthcheck) - } - - fn input(&self) -> Input { - let requirements = schema::Requirement::empty() - .optional_meaning("host", Kind::bytes()) - .optional_meaning("message", Kind::bytes()) - .optional_meaning("source", Kind::bytes()) - .optional_meaning("service", Kind::bytes()) - .optional_meaning("severity", Kind::bytes()) - // TODO: A `timestamp` is required for rehydration, however today we generate a `Utc::now()` - // timestamp if it's not found in the event. We could require this meaning instead. - .optional_meaning("timestamp", Kind::timestamp()) - .optional_meaning("trace_id", Kind::bytes()); - - Input::log().with_schema_requirement(requirements) - } - - fn acknowledgements(&self) -> &AcknowledgementsConfig { - &self.acknowledgements - } -} - -// Make a header pair from a key-value string pair -fn make_header((name, value): (&String, &String)) -> crate::Result<(HeaderName, HeaderValue)> { - Ok(( - HeaderName::from_bytes(name.as_bytes())?, - HeaderValue::from_str(value)?, - )) -} - -#[cfg(test)] -mod tests { - #![allow(clippy::print_stdout)] // tests - - use std::{collections::BTreeMap, io::Cursor}; - - use chrono::DateTime; - use vector_core::partition::Partitioner; - - use super::*; - use crate::{event::LogEvent, sinks::util::encoding::Encoder as _}; - - #[test] - fn generate_config() { - crate::test_util::test_generate_config::(); - } - - #[test] - fn encodes_event() { - let mut event = Event::Log(LogEvent::from("test message")); - let log_mut = event.as_mut_log(); - log_mut.insert("service", "test-service"); - log_mut.insert("not_a_reserved_attribute", "value"); - log_mut.insert("tags", vec!["tag1:value1", "tag2:value2"]); - let timestamp = DateTime::parse_from_rfc3339("2021-08-23T18:00:27.879+02:00") - .expect("invalid test case") - .with_timezone(&Utc); - log_mut.insert("timestamp", timestamp); - - let mut writer = Cursor::new(Vec::new()); - let encoding = DatadogArchivesEncoding::new(Default::default()); - _ = encoding.encode_input(vec![event], &mut writer); - - let encoded = writer.into_inner(); - let json: BTreeMap = - serde_json::from_slice(encoded.as_slice()).unwrap(); - - validate_event_id( - json.get("_id") - .expect("_id not found") - .as_str() - .expect("_id is not a string"), - ); - - assert_eq!(json.len(), 6); // _id, message, date, service, attributes - assert_eq!( - json.get("message") - .expect("message not found") - .as_str() - .expect("message is not a string"), - "test message" - ); - assert_eq!( - json.get("date") - .expect("date not found") - .as_str() - .expect("date is not a string"), - "2021-08-23T16:00:27.879Z" - ); - assert_eq!( - json.get("service") - .expect("service not found") - .as_str() - .expect("service is not a string"), - "test-service" - ); - - assert_eq!( - json.get("tags") - .expect("tags not found") - .as_array() - .expect("service is not an array") - .to_owned(), - vec!["tag1:value1", "tag2:value2"] - ); - - let attributes = json - .get("attributes") - .expect("attributes not found") - .as_object() - .expect("attributes is not an object"); - assert_eq!(attributes.len(), 1); - assert_eq!( - String::from_utf8_lossy( - attributes - .get("not_a_reserved_attribute") - .expect("not_a_reserved_attribute wasn't moved to attributes") - .as_str() - .expect("not_a_reserved_attribute is not a string") - .as_ref() - ), - "value" - ); - } - - #[test] - fn generates_valid_key_for_an_event() { - let mut log = LogEvent::from("test message"); - - let timestamp = DateTime::parse_from_rfc3339("2021-08-23T18:00:27.879+02:00") - .expect("invalid test case") - .with_timezone(&Utc); - log.insert("timestamp", timestamp); - - let partitioner = DatadogArchivesSinkConfig::build_partitioner(); - let key = partitioner - .partition(&log.into()) - .expect("key wasn't provided"); - - assert_eq!(key, "/dt=20210823/hour=16/"); - } - - #[test] - fn generates_valid_id() { - let log1 = Event::Log(LogEvent::from("test event 1")); - let mut writer = Cursor::new(Vec::new()); - let encoding = DatadogArchivesEncoding::new(Default::default()); - _ = encoding.encode_input(vec![log1], &mut writer); - let encoded = writer.into_inner(); - let json: BTreeMap = - serde_json::from_slice(encoded.as_slice()).unwrap(); - let id1 = json - .get("_id") - .expect("_id not found") - .as_str() - .expect("_id is not a string"); - validate_event_id(id1); - - // check that id is different for the next event - let log2 = Event::Log(LogEvent::from("test event 2")); - let mut writer = Cursor::new(Vec::new()); - _ = encoding.encode_input(vec![log2], &mut writer); - let encoded = writer.into_inner(); - let json: BTreeMap = - serde_json::from_slice(encoded.as_slice()).unwrap(); - let id2 = json - .get("_id") - .expect("_id not found") - .as_str() - .expect("_id is not a string"); - validate_event_id(id2); - assert_ne!(id1, id2) - } - - #[test] - fn generates_date_if_missing() { - let log = Event::Log(LogEvent::from("test message")); - let mut writer = Cursor::new(Vec::new()); - let encoding = DatadogArchivesEncoding::new(Default::default()); - _ = encoding.encode_input(vec![log], &mut writer); - let encoded = writer.into_inner(); - let json: BTreeMap = - serde_json::from_slice(encoded.as_slice()).unwrap(); - - let date = DateTime::parse_from_rfc3339( - json.get("date") - .expect("date not found") - .as_str() - .expect("date is not a string"), - ) - .expect("date is not in an rfc3339 format"); - - // check that it is a recent timestamp - assert!(Utc::now().timestamp() - date.timestamp() < 1000); - } - - /// check that _id is: - /// - 18 bytes, - /// - base64-encoded, - /// - first 6 bytes - a "now" timestamp in millis - fn validate_event_id(id: &str) { - let bytes = BASE64_STANDARD - .decode(id) - .expect("_id is not base64-encoded"); - assert_eq!(bytes.len(), 18); - let mut timestamp: [u8; 8] = [0; 8]; - for (i, b) in bytes[..6].iter().enumerate() { - timestamp[i + 2] = *b; - } - let timestamp = i64::from_be_bytes(timestamp); - // check that it is a recent timestamp in millis - assert!(Utc::now().timestamp_millis() - timestamp < 1000); - } - - #[test] - fn s3_build_request() { - let fake_buf = Bytes::new(); - let mut log = Event::Log(LogEvent::from("test message")); - let timestamp = DateTime::parse_from_rfc3339("2021-08-23T18:00:27.879+02:00") - .expect("invalid test case") - .with_timezone(&Utc); - log.as_mut_log().insert("timestamp", timestamp); - let partitioner = S3KeyPartitioner::new( - Template::try_from(KEY_TEMPLATE).expect("invalid object key format"), - None, - ); - let key = partitioner.partition(&log).expect("key wasn't provided"); - - let request_builder = DatadogS3RequestBuilder::new( - "dd-logs".into(), - Some("audit".into()), - S3Config::default(), - Default::default(), - ); - - let (metadata, metadata_request_builder, _events) = - request_builder.split_input((key, vec![log])); - - let payload = EncodeResult::uncompressed(fake_buf.clone()); - let request_metadata = metadata_request_builder.build(&payload); - let req = request_builder.build_request(metadata, request_metadata, payload); - - let expected_key_prefix = "audit/dt=20210823/hour=16/archive_"; - let expected_key_ext = ".json.gz"; - println!("{}", req.metadata.s3_key); - assert!(req.metadata.s3_key.starts_with(expected_key_prefix)); - assert!(req.metadata.s3_key.ends_with(expected_key_ext)); - let uuid1 = &req.metadata.s3_key - [expected_key_prefix.len()..req.metadata.s3_key.len() - expected_key_ext.len()]; - assert_eq!(uuid1.len(), 36); - - // check that the second batch has a different UUID - let log2 = LogEvent::default().into(); - - let key = partitioner.partition(&log2).expect("key wasn't provided"); - let (metadata, metadata_request_builder, _events) = - request_builder.split_input((key, vec![log2])); - let payload = EncodeResult::uncompressed(fake_buf); - let request_metadata = metadata_request_builder.build(&payload); - let req = request_builder.build_request(metadata, request_metadata, payload); - - let uuid2 = &req.metadata.s3_key - [expected_key_prefix.len()..req.metadata.s3_key.len() - expected_key_ext.len()]; - - assert_ne!(uuid1, uuid2); - } - - #[tokio::test] - async fn error_if_unsupported_s3_storage_class() { - for (class, supported) in [ - (S3StorageClass::Standard, true), - (S3StorageClass::StandardIa, true), - (S3StorageClass::IntelligentTiering, true), - (S3StorageClass::OnezoneIa, true), - (S3StorageClass::ReducedRedundancy, true), - (S3StorageClass::DeepArchive, false), - (S3StorageClass::Glacier, false), - ] { - let config = DatadogArchivesSinkConfig { - service: "aws_s3".to_owned(), - bucket: "vector-datadog-archives".to_owned(), - key_prefix: Some("logs/".to_owned()), - request: TowerRequestConfig::default(), - aws_s3: Some(S3Config { - options: S3Options { - storage_class: class, - ..Default::default() - }, - region: RegionOrEndpoint::with_region("us-east-1".to_owned()), - auth: Default::default(), - }), - azure_blob: None, - gcp_cloud_storage: None, - tls: None, - encoding: Default::default(), - acknowledgements: Default::default(), - }; - - let res = config.build_sink(SinkContext::new_test()).await; - - if supported { - assert!(res.is_ok()); - } else { - assert_eq!( - res.err().unwrap().to_string(), - format!(r#"Unsupported storage class: {:?}"#, class) - ); - } - } - } -} diff --git a/src/sinks/mod.rs b/src/sinks/mod.rs index d64d92e37664b..194f1539e2011 100644 --- a/src/sinks/mod.rs +++ b/src/sinks/mod.rs @@ -26,7 +26,7 @@ pub mod aws_sqs; pub mod axiom; #[cfg(feature = "sinks-azure_blob")] pub mod azure_blob; -#[cfg(any(feature = "sinks-azure_blob", feature = "sinks-datadog_archives"))] +#[cfg(feature = "sinks-azure_blob")] pub mod azure_common; #[cfg(feature = "sinks-azure_monitor_logs")] pub mod azure_monitor_logs; @@ -45,8 +45,6 @@ pub mod databend; feature = "sinks-datadog_traces" ))] pub mod datadog; -#[cfg(feature = "sinks-datadog_archives")] -pub mod datadog_archives; #[cfg(feature = "sinks-elasticsearch")] pub mod elasticsearch; #[cfg(feature = "sinks-file")] @@ -83,10 +81,7 @@ pub mod prometheus; pub mod pulsar; #[cfg(feature = "sinks-redis")] pub mod redis; -#[cfg(all( - any(feature = "sinks-aws_s3", feature = "sinks-datadog_archives"), - feature = "aws-core" -))] +#[cfg(all(feature = "sinks-aws_s3", feature = "aws-core"))] pub mod s3_common; #[cfg(feature = "sinks-sematext")] pub mod sematext; diff --git a/website/cue/reference/components/sinks/datadog_archives.cue b/website/cue/reference/components/sinks/datadog_archives.cue deleted file mode 100644 index 9fce2f2901e1e..0000000000000 --- a/website/cue/reference/components/sinks/datadog_archives.cue +++ /dev/null @@ -1,248 +0,0 @@ -package metadata - -components: sinks: datadog_archives: { - title: "Datadog Log Archives" - - classes: { - commonly_used: true - delivery: "at_least_once" - development: "beta" - egress_method: "batch" - service_providers: ["AWS", "GCP"] // GCP, Azure is coming - stateful: false - } - - features: { - acknowledgements: true - healthcheck: enabled: true - send: { - batch: enabled: false - compression: enabled: false - encoding: enabled: false - proxy: enabled: false - request: { - enabled: true - headers: false - } - tls: { - enabled: true - can_verify_certificate: true - can_verify_hostname: true - enabled_default: true - enabled_by_scheme: true - } - } - } - - support: { - requirements: [] - warnings: [] - notices: [] - } - - configuration: { - bucket: { - description: "The bucket name. Do not include a leading `s3://` or a trailing `/`." - required: true - type: string: { - examples: ["my-bucket"] - - } - } - key_prefix: { - common: true - category: "File Naming" - description: "A prefix to apply to all object key names. This should be used to partition your objects in \"folders\"." - required: false - type: string: { - default: "/" - examples: ["logs/audit"] - } - } - service: { - category: "Storage" - description: "An external storage service where archived logs are sent to." - required: true - type: string: { - enum: { - aws_s3: "[AWS S3](\(urls.aws_s3)) is used as an external storage service." - google_cloud_storage: "[Google Cloud Storage](\(urls.gcs)) is used as an external storage service." - } - } - } - aws_s3: { - description: "AWS S3 specific configuration options. Required when `service` has the value `\"aws_s3\"`." - common: false - required: false - relevant_when: "service = \"aws_s3\"" - type: object: { - examples: [] - options: { - auth: { - common: false - description: "Options for the authentication strategy. Check the [`auth`](\(urls.vector_aws_s3_sink_auth)) section of the AWS S3 sink for more details." - required: false - type: object: {} - } - acl: sinks.aws_s3.configuration.acl - grant_full_control: sinks.aws_s3.configuration.grant_full_control - grant_read: sinks.aws_s3.configuration.grant_read - grant_read_acp: sinks.aws_s3.configuration.grant_read_acp - grant_write_acp: sinks.aws_s3.configuration.grant_write_acp - server_side_encryption: sinks.aws_s3.configuration.server_side_encryption - ssekms_key_id: sinks.aws_s3.configuration.ssekms_key_id - storage_class: { - category: "Storage" - common: false - description: """ - The storage class for the created objects. See [the S3 Storage Classes](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) for more details. - Log Rehydration supports all storage classes except for Glacier and Glacier Deep Archive. - """ - required: false - type: string: { - default: null - enum: { - STANDARD: "The default storage class. If you don't specify the storage class when you upload an object, Amazon S3 assigns the STANDARD storage class." - REDUCED_REDUNDANCY: "Designed for noncritical, reproducible data that can be stored with less redundancy than the STANDARD storage class. AWS recommends that you not use this storage class. The STANDARD storage class is more cost effective. " - INTELLIGENT_TIERING: "Stores objects in two access tiers: one tier that is optimized for frequent access and another lower-cost tier that is optimized for infrequently accessed data." - STANDARD_IA: "Amazon S3 stores the object data redundantly across multiple geographically separated Availability Zones (similar to the STANDARD storage class)." - ONEZONE_IA: "Amazon S3 stores the object data in only one Availability Zone." - } - } - } - tags: sinks.aws_s3.configuration.tags - region: { - description: "The [AWS region](\(urls.aws_regions)) of the target service." - required: true - type: string: { - examples: ["us-east-1"] - } - } - } - } - } - google_cloud_storage: { - description: "GCP Cloud Storage specific configuration options. Required when `service` has the value `\"google_cloud_storage\"`." - common: false - required: false - relevant_when: "service = \"google_cloud_storage\"" - warnings: [] - type: object: { - examples: [] - options: { - acl: sinks.gcp_cloud_storage.configuration.acl - credentials_path: sinks.gcp_cloud_storage.configuration.credentials_path - metadata: sinks.gcp_cloud_storage.configuration.metadata - storage_class: sinks.gcp_cloud_storage.configuration.storage_class - } - } - } - } - - input: { - logs: true - metrics: null - traces: false - } - - how_it_works: { - - a_object_key_format: { - title: "Custom object key format" - body: """ - Objects written to the external archives have the following key format: - ```text - /my/bucket/my/key/prefix/dt=/hour=/.json.gz - ``` - For example: - - ```text - /my/bucket/my/key/prefix/dt=20180515/hour=14/7dq1a9mnSya3bFotoErfxl.json.gz - ``` - """ - } - - b_event_preprocessing: { - title: "Event format/pre-processing" - body: """ - Within the gzipped JSON file, each event’s content is formatted as follows: - - ```json - { - "_id": "123456789abcdefg", - "date": "2018-05-15T14:31:16.003Z", - "host": "i-12345abced6789efg", - "source": "source_name", - "service": "service_name", - "status": "status_level", - "message": "2018-05-15T14:31:16.003Z INFO rid='acb-123' status=403 method=PUT", - "attributes": { "rid": "abc-123", "http": { "status_code": 403, "method": "PUT" } }, - "tags": [ "env:prod", "team:acme" ] - } - ``` - - Events are pre-processed as follows: - - - `_id` is always generated in the sink - - `date` is set from the Global [Log Schema](\(urls.vector_log_schema))'s `timestamp_key` mapping, - or to the current time if missing - - `message`,`host` are also set from the corresponding Global [Log Schema](\(urls.vector_log_schema)) mappings - - `source`, `service`, `status`, `tags` are left as is - - the rest of the fields is moved to `attributes` - - Though only `_id` and `date` are mandatory, - most reserved attributes( `host`, `source`, `service`, `status`, `message`, `tags`) are expected - for a meaningful log processing by DataDog. Therefore users should make sure that these optional fields are populated - before they reach this sink. - """ - } - - c_aws: { - title: "AWS S3 setup" - body: """ - For more details about AWS S3 configuration and how it works check out [AWS S3 sink](\(urls.vector_aws_s3_sink_how_it_works)). - """ - } - - d_google_cloud_storage: { - title: "GCP Cloud Storage setup" - body: """ - For more details about GCP Cloud Storage configuration and how it works check out [GCS sink](\(urls.vector_gcs_sink_how_it_works)). - """ - } - } - - permissions: iam: [ - { - platform: "aws" - _service: "s3" - _docs_tag: "AmazonS3" - _url_fragment: "API" - - policies: [ - { - _action: "HeadBucket" - required_for: ["healthcheck"] - }, - { - _action: "PutObject" - }, - ] - }, - { - platform: "gcp" - _service: "storage" - - policies: [ - { - _action: "objects.create" - required_for: ["operation"] - }, - { - _action: "objects.get" - required_for: ["healthcheck"] - }, - ] - }, - ] -} From 719662280205d47ce9497646368911c8f5c28b0d Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Tue, 27 Jun 2023 15:35:38 -0400 Subject: [PATCH 187/236] chore: Add submodules to all checkouts (#17770) --- .github/audit.yml | 2 ++ .github/workflows/changes.yml | 4 ++++ .github/workflows/cli.yml | 3 +++ .github/workflows/compilation-timings.yml | 10 ++++++++++ .github/workflows/environment.yml | 3 +++ .../gardener_remove_waiting_author.yml | 2 ++ .github/workflows/install-sh.yml | 3 +++ .github/workflows/k8s_e2e.yml | 6 ++++++ .github/workflows/msrv.yml | 2 ++ .github/workflows/publish.yml | 17 +++++++++++++++++ .github/workflows/regression.yml | 11 +++++++++++ 11 files changed, 63 insertions(+) diff --git a/.github/audit.yml b/.github/audit.yml index 8a655a04ace82..6a3007248db03 100644 --- a/.github/audit.yml +++ b/.github/audit.yml @@ -12,6 +12,8 @@ jobs: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 + with: + submodules: "recursive" - uses: actions-rs/audit-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index db6c4225073e0..4b6697fed8726 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -119,6 +119,8 @@ jobs: k8s: ${{ steps.filter.outputs.k8s }} steps: - uses: actions/checkout@v3 + with: + submodules: "recursive" - uses: dorny/paths-filter@v2 id: filter @@ -212,6 +214,8 @@ jobs: webhdfs: ${{ steps.filter.outputs.webhdfs }} steps: - uses: actions/checkout@v3 + with: + submodules: "recursive" # creates a yaml file that contains the filters for each integration, # extracted from the output of the `vdev int ci-paths` command, which diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index a2ba175e0e4cd..7b03d7fef1876 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -28,10 +28,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - name: Cache Cargo registry + index uses: actions/cache@v3 diff --git a/.github/workflows/compilation-timings.yml b/.github/workflows/compilation-timings.yml index e96bea65ea946..94616e04af086 100644 --- a/.github/workflows/compilation-timings.yml +++ b/.github/workflows/compilation-timings.yml @@ -17,6 +17,8 @@ jobs: steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 + with: + submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: cargo clean @@ -33,6 +35,8 @@ jobs: steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 + with: + submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: cargo clean @@ -44,6 +48,8 @@ jobs: steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 + with: + submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: cargo clean @@ -55,6 +61,8 @@ jobs: steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 + with: + submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: cargo clean @@ -68,6 +76,8 @@ jobs: steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 + with: + submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: cargo clean diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 7057f98f21d6a..29c1901f31b67 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -34,10 +34,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - name: Set up QEMU uses: docker/setup-qemu-action@v2.2.0 diff --git a/.github/workflows/gardener_remove_waiting_author.yml b/.github/workflows/gardener_remove_waiting_author.yml index 9fe063e50b40d..37f6665034a14 100644 --- a/.github/workflows/gardener_remove_waiting_author.yml +++ b/.github/workflows/gardener_remove_waiting_author.yml @@ -9,6 +9,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + submodules: "recursive" - uses: actions-ecosystem/action-remove-labels@v1 with: labels: "meta: awaiting author" diff --git a/.github/workflows/install-sh.yml b/.github/workflows/install-sh.yml index 045319a191642..83d7f9517035e 100644 --- a/.github/workflows/install-sh.yml +++ b/.github/workflows/install-sh.yml @@ -28,10 +28,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - run: pip3 install awscli --upgrade --user - env: diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index f9360132d71e0..bbdea21a59588 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -88,10 +88,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - uses: actions/cache@v3 with: @@ -205,10 +208,13 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} + submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 + with: + submodules: "recursive" - uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 361aa9cd4a3b8..00ced7b7504b5 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -16,6 +16,8 @@ jobs: runs-on: [ubuntu-20.04] steps: - uses: actions/checkout@v3 + with: + submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: cargo install cargo-msrv --version 0.15.1 - run: cargo msrv verify diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 69c86091ea8c0..381be90c0806f 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -39,6 +39,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Generate publish metadata id: generate-publish-metadata run: make ci-generate-publish-metadata @@ -56,6 +57,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -81,6 +83,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -106,6 +109,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -133,6 +137,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -160,6 +165,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -187,6 +193,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -214,6 +221,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Bootstrap runner environment (macOS-specific) run: bash scripts/environment/bootstrap-macos-10.sh - name: Build Vector @@ -244,6 +252,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Bootstrap runner environment (Windows-specific) run: .\scripts\environment\bootstrap-windows-2019.ps1 - name: Install Wix @@ -311,6 +320,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Download staged package artifacts (x86_64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: @@ -367,6 +377,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Download staged package artifacts (x86_64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: @@ -394,6 +405,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Download staged package artifacts (x86_64-apple-darwin) uses: actions/download-artifact@v3 with: @@ -424,6 +436,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Login to DockerHub uses: docker/login-action@v2.1.0 with: @@ -499,6 +512,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Download staged package artifacts (aarch64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: @@ -570,6 +584,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Download staged package artifacts (aarch64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: @@ -630,6 +645,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Publish update to Homebrew tap env: GITHUB_TOKEN: ${{ secrets.GH_PACKAGE_PUBLISHER_TOKEN }} @@ -655,6 +671,7 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} + submodules: "recursive" - name: Download staged package artifacts (aarch64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 40600010998c2..ea6882a2447aa 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -48,6 +48,8 @@ jobs: comment_valid: ${{ steps.comment.outputs.isTeamMember }} steps: - uses: actions/checkout@v3 + with: + submodules: "recursive" - name: Collect file changes id: changes @@ -129,6 +131,7 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 1000 + submodules: "recursive" # If triggered by issue comment, the event payload doesn't directly contain the head and base sha from the PR. # But, we can retrieve this info from some commands. @@ -287,6 +290,8 @@ jobs: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 + with: + submodules: "recursive" - uses: actions/checkout@v3 with: @@ -325,6 +330,8 @@ jobs: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 + with: + submodules: "recursive" - uses: actions/checkout@v3 with: @@ -475,6 +482,7 @@ jobs: - uses: actions/checkout@v3 with: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} + submodules: "recursive" - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2.2.0 @@ -594,6 +602,8 @@ jobs: - compute-metadata steps: - uses: actions/checkout@v3 + with: + submodules: "recursive" - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2.2.0 @@ -685,6 +695,7 @@ jobs: - uses: actions/checkout@v3 with: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} + submodules: "recursive" - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2.2.0 From 1a75ec6656cc194068fb98f3d18e14705ef32c91 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 21:29:42 +0000 Subject: [PATCH 188/236] chore(deps): Bump libc from 0.2.146 to 0.2.147 (#17753) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [libc](https://github.com/rust-lang/libc) from 0.2.146 to 0.2.147.
Release notes

Sourced from libc's releases.

0.2.147

What's Changed

New Contributors

Full Changelog: https://github.com/rust-lang/libc/compare/0.2.146...0.2.147

Commits
  • e2b5151 Auto merge of #3283 - flba-eb:release_147, r=JohnTitor
  • 2fc3ef9 Auto merge of #3276 - dragan-cecavac-nordsec:android/add-nlm_f_dump_filtered,...
  • fffd791 Update and release version 0.2.147
  • 1e8943d Auto merge of #3271 - devnexen:android_sendfile, r=JohnTitor
  • b5e3a8a Auto merge of #3270 - devnexen:android_getentropy, r=JohnTitor
  • ea1e561 Auto merge of #3273 - flba-eb:add_qnx_extra_traits, r=JohnTitor
  • 0c7a69c Update src/unix/nto/neutrino.rs
  • c16d97c Auto merge of #3272 - sunfishcode:sunfishcode/ethernet, r=JohnTitor
  • 8530e6f Auto merge of #3279 - GuillaumeGomez:docs-rs, r=JohnTitor
  • 4df3981 Auto merge of #3281 - JohnTitor:fix-s390x-installer, r=JohnTitor
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=libc&package-manager=cargo&previous-version=0.2.146&new-version=0.2.147)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0d8161dd990ba..9cf9364bd095a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4566,9 +4566,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.146" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libflate" diff --git a/Cargo.toml b/Cargo.toml index 35de47bb073ca..e42a8e6538be7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -348,7 +348,7 @@ azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = base64 = "0.21.2" criterion = { version = "0.5.1", features = ["html_reports", "async_tokio"] } itertools = { version = "0.11.0", default-features = false, features = ["use_alloc"] } -libc = "0.2.146" +libc = "0.2.147" similar-asserts = "1.4.2" proptest = "1.2" quickcheck = "1.0.3" From ac3bc72bb5e0fd905e2680d4046a2984de5d07b8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 21:31:15 +0000 Subject: [PATCH 189/236] chore(deps): Bump h2 from 0.3.19 to 0.3.20 (#17767) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [h2](https://github.com/hyperium/h2) from 0.3.19 to 0.3.20.
Release notes

Sourced from h2's releases.

v0.3.20

Bug Fixes

  • Fix panic if a server received a request with a :status pseudo header in the 1xx range. (#695)
  • Fix panic if a reset stream had pending push promises that were more than allowed. (#685)
  • Fix potential flow control overflow by subtraction, instead returning a connection error. (#692)

New Contributors

Changelog

Sourced from h2's changelog.

0.3.20 (June 26, 2023)

  • Fix panic if a server received a request with a :status pseudo header in the 1xx range.
  • Fix panic if a reset stream had pending push promises that were more than allowed.
  • Fix potential flow control overflow by subtraction, instead returning a connection error.
Commits
  • 6a75f23 v0.3.20
  • 0189722 Fix for a fuzzer-discovered integer underflow of the flow control window size...
  • 478f7b9 Fix for invalid header panic corrected (#695)
  • 864430c Enabled clippy in CI and ran clippy --fix
  • 972fb6f chore: add funding file
  • 97bc3e3 hammer test requires a new tokio feature
  • 66c36c4 fix panic on receiving invalid headers frame by making the take_request fun...
  • 04e6398 fix: panicked when a reset stream would decrement twice
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=h2&package-manager=cargo&previous-version=0.3.19&new-version=0.3.20)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9cf9364bd095a..72b0be0c20714 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3539,9 +3539,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes 1.4.0", "fnv", diff --git a/Cargo.toml b/Cargo.toml index e42a8e6538be7..15ad67c89e4d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -256,7 +256,7 @@ futures-util = { version = "0.3.28", default-features = false } glob = { version = "0.3.1", default-features = false } governor = { version = "0.5.1", default-features = false, features = ["dashmap", "jitter", "std"], optional = true } grok = { version = "2.0.0", default-features = false, optional = true } -h2 = { version = "0.3.19", default-features = false, optional = true } +h2 = { version = "0.3.20", default-features = false, optional = true } hash_hasher = { version = "2.0.0", default-features = false } hashbrown = { version = "0.14.0", default-features = false, optional = true, features = ["ahash"] } headers = { version = "0.3.8", default-features = false } From e07158c7a80352d2d36216eb90141033b863964a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 21:33:17 +0000 Subject: [PATCH 190/236] chore(deps): Bump serde_json from 1.0.97 to 1.0.99 (#17754) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.97 to 1.0.99.
Release notes

Sourced from serde_json's releases.

v1.0.99

v1.0.98

  • Update indexmap dependency used by "preserve_order" feature to version 2
Commits
  • b4ec50c Release 1.0.99
  • 1153052 Merge pull request 1030 from SecondHalfGames/map-key-serialize-some
  • ba29a89 Release 1.0.98
  • 9508e50 Merge pull request #1031 from serde-rs/indexmap
  • 706fc2b Do all CI builds with old rustc using shim crate
  • d4c98d0 Move serde_json_test crate to own workspace
  • e09d78f Update indexmap dependency used for preserve_order feature to version 2
  • 5145907 Delete unneeded conditional on preserve_order steps in CI
  • b0fa978 Change MapKeySerializer::serialize_some to fall through instead of erroring
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=serde_json&package-manager=cargo&previous-version=1.0.97&new-version=1.0.99)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- lib/vector-api-client/Cargo.toml | 2 +- lib/vector-common/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- vdev/Cargo.toml | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72b0be0c20714..d7a6b4221055e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7371,11 +7371,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.97" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf3bf93142acad5821c99197022e170842cdbc1c30482b98750c688c640842a" +checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" dependencies = [ - "indexmap 1.9.3", + "indexmap 2.0.0", "itoa", "ryu", "serde", diff --git a/Cargo.toml b/Cargo.toml index 15ad67c89e4d8..2ea1fc9912f6b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -192,7 +192,7 @@ tower-http = { version = "0.4.1", default-features = false, features = ["decompr serde = { version = "1.0.164", default-features = false, features = ["derive"] } serde-toml-merge = { version = "0.3.0", default-features = false } serde_bytes = { version = "0.11.9", default-features = false, features = ["std"], optional = true } -serde_json = { version = "1.0.97", default-features = false, features = ["raw_value"] } +serde_json = { version = "1.0.99", default-features = false, features = ["raw_value"] } serde_with = { version = "2.3.2", default-features = false, features = ["macros", "std"] } serde_yaml = { version = "0.9.22", default-features = false } diff --git a/lib/vector-api-client/Cargo.toml b/lib/vector-api-client/Cargo.toml index 06c39deff3810..16100f5efb28d 100644 --- a/lib/vector-api-client/Cargo.toml +++ b/lib/vector-api-client/Cargo.toml @@ -10,7 +10,7 @@ license = "MPL-2.0" # Serde serde = { version = "1.0.164", default-features = false, features = ["derive"] } -serde_json = { version = "1.0.97", default-features = false, features = ["raw_value"] } +serde_json = { version = "1.0.99", default-features = false, features = ["raw_value"] } # Error handling anyhow = { version = "1.0.71", default-features = false, features = ["std"] } diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 0f38b78fdea30..6cc524d2f9ceb 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -55,7 +55,7 @@ ordered-float = { version = "3.7.0", default-features = false } paste = "1.0.12" pin-project = { version = "1.1.0", default-features = false } ryu = { version = "1", default-features = false } -serde_json = { version = "1.0.97", default-features = false, features = ["std", "raw_value"] } +serde_json = { version = "1.0.99", default-features = false, features = ["std", "raw_value"] } serde = { version = "1.0.164", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false } snafu = { version = "0.7", optional = true } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 211442af80960..e12aa71d00f5c 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -41,7 +41,7 @@ quanta = { version = "0.11.1", default-features = false } regex = { version = "1.8.4", default-features = false, features = ["std", "perf"] } ryu = { version = "1", default-features = false } serde = { version = "1.0.164", default-features = false, features = ["derive", "rc"] } -serde_json = { version = "1.0.97", default-features = false } +serde_json = { version = "1.0.99", default-features = false } serde_with = { version = "2.3.2", default-features = false, features = ["std", "macros"] } smallvec = { version = "1", default-features = false, features = ["serde", "const_generics"] } snafu = { version = "0.7.4", default-features = false } diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index d144180e1ac88..0fbffa7064b5a 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -33,7 +33,7 @@ paste = "1.0.12" regex = { version = "1.8.4", default-features = false, features = ["std", "perf"] } reqwest = { version = "0.11", features = ["json", "blocking"] } serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0.97" +serde_json = "1.0.99" serde_yaml = "0.9.22" sha2 = "0.10.7" tempfile = "3.6.0" From 35c458163ac11baa4cba73b37dadaf71d41fd13a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 21:34:48 +0000 Subject: [PATCH 191/236] chore(deps): Bump toml from 0.7.4 to 0.7.5 (#17751) Bumps [toml](https://github.com/toml-rs/toml) from 0.7.4 to 0.7.5.
Commits
  • 22fb58e chore: Don't release serde_toml
  • bd45f69 chore: Release
  • 8ef76fb docs: Update changelog
  • 224dca8 Merge pull request #574 from cuviper/indexmap-2
  • ef0a3fc chore: Upgrade to indexmap 2
  • f4a5aa7 Merge pull request #570 from sksat/replace-unmaintained-toolchain-action
  • dcc2ca1 fix: Replace unmaintained actions-rs/toolchain with dtolnay/rust-toolchain
  • 48fb5a2 Merge pull request #569 from joshtriplett/workspace
  • 9c4faad Inherit package.homepage from the workspace
  • c506198 Inherit package.repository from the workspace
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=toml&package-manager=cargo&previous-version=0.7.4&new-version=0.7.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 28 ++++++++++++++-------------- Cargo.toml | 2 +- lib/vector-config/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 4 ++-- vdev/Cargo.toml | 2 +- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d7a6b4221055e..d79012d62cc26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7302,7 +7302,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a78072b550e5c20bc4a9d1384be28809cbdb7b25b2b4707ddc6d908b7e6de3bf" dependencies = [ - "toml 0.7.4", + "toml 0.7.5", ] [[package]] @@ -7423,9 +7423,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" dependencies = [ "serde", ] @@ -8431,9 +8431,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6135d499e69981f9ff0ef2167955a5333c35e36f6937d382974566b3d5b94ec" +checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240" dependencies = [ "serde", "serde_spanned", @@ -8443,20 +8443,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.9" +version = "0.19.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d964908cec0d030b812013af25a0e57fddfadb1e066ecc6681d86253129d4f" +checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7" dependencies = [ - "indexmap 1.9.3", + "indexmap 2.0.0", "serde", "serde_spanned", "toml_datetime", @@ -9116,7 +9116,7 @@ dependencies = [ "serde_yaml 0.9.22", "sha2 0.10.7", "tempfile", - "toml 0.7.4", + "toml 0.7.5", ] [[package]] @@ -9286,7 +9286,7 @@ dependencies = [ "tokio-test", "tokio-tungstenite 0.19.0", "tokio-util", - "toml 0.7.4", + "toml 0.7.5", "tonic", "tonic-build", "tower", @@ -9434,7 +9434,7 @@ dependencies = [ "serde_json", "serde_with 2.3.2", "snafu", - "toml 0.7.4", + "toml 0.7.5", "tracing 0.1.37", "url", "vector-config-common", @@ -9534,7 +9534,7 @@ dependencies = [ "tokio-stream", "tokio-test", "tokio-util", - "toml 0.7.4", + "toml 0.7.5", "tonic", "tower", "tracing 0.1.37", diff --git a/Cargo.toml b/Cargo.toml index 2ea1fc9912f6b..ff34795c30e96 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -309,7 +309,7 @@ syslog = { version = "6.1.0", default-features = false, optional = true } tikv-jemallocator = { version = "0.5.0", default-features = false, optional = true } tokio-postgres = { version = "0.7.7", default-features = false, features = ["runtime", "with-chrono-0_4"], optional = true } tokio-tungstenite = {version = "0.19.0", default-features = false, features = ["connect"], optional = true} -toml = { version = "0.7.4", default-features = false, features = ["parse", "display"] } +toml = { version = "0.7.5", default-features = false, features = ["parse", "display"] } tonic = { version = "0.9", optional = true, default-features = false, features = ["transport", "codegen", "prost", "tls", "tls-roots", "gzip"] } trust-dns-proto = { version = "0.22.0", default-features = false, features = ["dnssec"], optional = true } typetag = { version = "0.2.8", default-features = false } diff --git a/lib/vector-config/Cargo.toml b/lib/vector-config/Cargo.toml index bae87581cc42f..b94fa49ca54b0 100644 --- a/lib/vector-config/Cargo.toml +++ b/lib/vector-config/Cargo.toml @@ -23,7 +23,7 @@ serde = { version = "1.0", default-features = false } serde_json = { version = "1.0", default-features = false, features = ["std"] } serde_with = { version = "2.3.2", default-features = false, features = ["std"] } snafu = { version = "0.7.4", default-features = false } -toml = { version = "0.7.4", default-features = false } +toml = { version = "0.7.5", default-features = false } tracing = { version = "0.1.34", default-features = false } url = { version = "2.4.0", default-features = false, features = ["serde"] } vrl = { version = "0.4.0", default-features = false, features = ["compiler"] } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index e12aa71d00f5c..c14da07f9f27b 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -50,7 +50,7 @@ tokio = { version = "1.28.2", default-features = false, features = ["net"] } tokio-openssl = { version = "0.6.3", default-features = false } tokio-stream = { version = "0.1", default-features = false, features = ["time"], optional = true } tokio-util = { version = "0.7.0", default-features = false, features = ["time"] } -toml = { version = "0.7.4", default-features = false } +toml = { version = "0.7.5", default-features = false } tonic = { version = "0.9", default-features = false, features = ["transport"] } tower = { version = "0.4", default-features = false, features = ["util"] } tracing = { version = "0.1.34", default-features = false } @@ -86,7 +86,7 @@ quickcheck_macros = "1" proptest = "1.2" similar-asserts = "1.4.2" tokio-test = "0.4.2" -toml = { version = "0.7.4", default-features = false, features = ["parse"] } +toml = { version = "0.7.5", default-features = false, features = ["parse"] } ndarray = "0.15.6" ndarray-stats = "0.5.1" noisy_float = "0.2.0" diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 0fbffa7064b5a..97b4f68f9f4b4 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -37,4 +37,4 @@ serde_json = "1.0.99" serde_yaml = "0.9.22" sha2 = "0.10.7" tempfile = "3.6.0" -toml = { version = "0.7.4", default-features = false, features = ["parse"] } +toml = { version = "0.7.5", default-features = false, features = ["parse"] } From fdf02d954286288f435c84395bc9b9be13806899 Mon Sep 17 00:00:00 2001 From: Stephen Wakely Date: Wed, 28 Jun 2023 10:40:52 +0100 Subject: [PATCH 192/236] fix(observability): issues with event_cache PR (#17768) Ref #17580 Ref #17581 Re this PR #17549 This fixes a couple of issues with the mentioned PR. - When telemetry was turned off it was still emitting the empty tags. - The service tag was being emitted by converting the value using `to_string`. This meant the value was delimited by `"`. - A `telemetry` section is added to the global configuration docs. --------- Signed-off-by: Stephen Wakely --- lib/vector-common/src/request_metadata.rs | 10 +++- lib/vector-core/src/event/log_event.rs | 2 +- website/cue/reference/configuration.cue | 56 +++++++++++++++++++++++ 3 files changed, 66 insertions(+), 2 deletions(-) diff --git a/lib/vector-common/src/request_metadata.rs b/lib/vector-common/src/request_metadata.rs index d28d7da681a58..9b93a63df7626 100644 --- a/lib/vector-common/src/request_metadata.rs +++ b/lib/vector-common/src/request_metadata.rs @@ -25,6 +25,14 @@ impl EventCountTags { service: OptionalTag::Specified(None), } } + + #[must_use] + pub fn new_unspecified() -> Self { + Self { + source: OptionalTag::Ignored, + service: OptionalTag::Ignored, + } + } } /// Must be implemented by events to get the tags that will be attached to @@ -133,7 +141,7 @@ impl GroupedCountByteSize { } } GroupedCountByteSize::Untagged { size } => { - event_cache.emit(&EventCountTags::new_empty(), *size); + event_cache.emit(&EventCountTags::new_unspecified(), *size); } } } diff --git a/lib/vector-core/src/event/log_event.rs b/lib/vector-core/src/event/log_event.rs index c782476b5c515..e5755f12d7e66 100644 --- a/lib/vector-core/src/event/log_event.rs +++ b/lib/vector-core/src/event/log_event.rs @@ -224,7 +224,7 @@ impl GetEventCountTags for LogEvent { let service = if telemetry().tags().emit_service { self.get_by_meaning("service") - .map(ToString::to_string) + .map(|value| value.to_string_lossy().to_string()) .into() } else { OptionalTag::Ignored diff --git a/website/cue/reference/configuration.cue b/website/cue/reference/configuration.cue index 6fcacb5c6c3c6..d1a1476d3de38 100644 --- a/website/cue/reference/configuration.cue +++ b/website/cue/reference/configuration.cue @@ -252,6 +252,62 @@ configuration: { } } + telemetry: { + common: false + description: """ + Configures options for how Vector emits telemetry. + """ + required: false + type: object: { + examples: [] + options: { + tags: { + required: false + description: """ + Controls which tags should be included with the `vector_component_sent_events_total` and + `vector_component_sent_event_bytes_total` metrics. + """ + type: object: { + examples: [] + options: { + emit_source: { + common: true + description: """ + Add a `source` tag with the source component the event was received from. + + If there is no source component, for example if the event was generated by + the `lua` transform a `-` is emitted for this tag. + """ + required: false + type: bool: { + default: false + } + } + emit_service: { + common: false + description: """ + Adds a `service` tag with the service component the event was received from. + + For logs this is the field that has been determined to mean `service`. Each source may + define different fields for this. For example, with `syslog` events the `appname` field + is used. + + Metric events will use the tag named `service`. + + If no service is available a `-` is emitted for this tag. + """ + required: false + type: bool: { + default: false + } + } + } + } + } + } + } + } + log_schema: { common: false description: """ From 23a3e0ebf44fd8efa46a6861aa91404806be3831 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B4=AA=E9=98=BF=E5=8D=97?= Date: Wed, 28 Jun 2023 22:28:55 +0800 Subject: [PATCH 193/236] fix(file source): Fix tailing problem when source number greater than 512 (#17717) fix: #17360 --- src/app.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/app.rs b/src/app.rs index 12658a01115d3..49d57c0db9523 100644 --- a/src/app.rs +++ b/src/app.rs @@ -421,6 +421,7 @@ fn get_log_levels(default: &str) -> String { pub fn build_runtime(threads: Option, thread_name: &str) -> Result { let mut rt_builder = runtime::Builder::new_multi_thread(); + rt_builder.max_blocking_threads(20_000); rt_builder.enable_all().thread_name(thread_name); if let Some(threads) = threads { From ec4785a9f2ce948a2d44f777f13e69d1e8b7400c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 15:16:38 +0000 Subject: [PATCH 194/236] chore(deps): Bump opendal from 0.37.0 to 0.38.0 (#17777) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [opendal](https://github.com/apache/incubator-opendal) from 0.37.0 to 0.38.0.
Release notes

Sourced from opendal's releases.

v0.38.0

Upgrade to v0.38

There are no public API changes.

Raw API

OpenDAL add the Write::sink API to enable streaming writing. This is a breaking change for users who depend on the raw API.

For a quick fix, users who have implemented opendal::raw::oio::Write can return an Unsupported error for Write::sink().

More detailes could be found at RFC: Writer sink API.

What's Changed

Added

Changed

Fixed

... (truncated)

Changelog

Sourced from opendal's changelog.

[v0.38.0] - 2023-06-27

Added

Changed

Fixed

Docs

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=opendal&package-manager=cargo&previous-version=0.37.0&new-version=0.38.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d79012d62cc26..090d5156c0ddc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5523,9 +5523,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "opendal" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37de9fe637d53550bf3f76d5c731f69cb6f9685ada6afd390ada98994a3f91" +checksum = "717f47be1760a6a651f81eeba8239444a077d0d229409a016298d2b2483c408c" dependencies = [ "anyhow", "async-compat", diff --git a/Cargo.toml b/Cargo.toml index ff34795c30e96..dc9de46473c74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -183,7 +183,7 @@ azure_storage = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = azure_storage_blobs = { git = "https://github.com/Azure/azure-sdk-for-rust.git", rev = "b4544d4920fa3064eb921340054cd9cc130b7664", default-features = false, optional = true } # OpenDAL -opendal = {version = "0.37", default-features = false, features = ["native-tls", "services-webhdfs"], optional = true} +opendal = {version = "0.38", default-features = false, features = ["native-tls", "services-webhdfs"], optional = true} # Tower tower = { version = "0.4.13", default-features = false, features = ["buffer", "limit", "retry", "timeout", "util", "balance", "discover"] } From 935babf1ab6edcc345960af77a387712ffe36304 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 15:17:41 +0000 Subject: [PATCH 195/236] chore(deps): Bump uuid from 1.3.4 to 1.4.0 (#17775) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.3.4 to 1.4.0.
Release notes

Sourced from uuid's releases.

1.4.0

What's Changed

New Contributors

Full Changelog: https://github.com/uuid-rs/uuid/compare/1.3.4...1.4.0

Commits
  • 0fc3101 Merge pull request #692 from uuid-rs/cargo/1.4.0
  • d9f72db prepare for 1.4.0 release
  • cb2aac0 Merge pull request #691 from uuid-rs/fix/timestamp-gen
  • 0cb9232 add missing wasm import
  • cb80ba2 run fmt
  • 8babf97 add missing wasm test attr
  • 759c971 fix a warning in arbitrary support
  • 646bd98 wrap rather than overflow timestamps
  • 7da3f69 remove dbg call from wasm-based timestamp
  • 952f75f Merge pull request #686 from pod2co/borsh
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=uuid&package-manager=cargo&previous-version=1.3.4&new-version=1.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 090d5156c0ddc..cf61255776d29 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9064,9 +9064,9 @@ checksum = "936e4b492acfd135421d8dca4b1aa80a7bfc26e702ef3af710e0752684df5372" [[package]] name = "uuid" -version = "1.3.4" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa2982af2eec27de306107c027578ff7f423d65f7250e40ce0fea8f45248b81" +checksum = "d023da39d1fde5a8a3fe1f3e01ca9632ada0a63e9797de55a879d6e2236277be" dependencies = [ "getrandom 0.2.10", "rand 0.8.5", From a2a3609c58287240631c409172c4b1944bc7864f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 15:31:58 +0000 Subject: [PATCH 196/236] chore(deps): Bump hyper from 0.14.26 to 0.14.27 (#17766) Bumps [hyper](https://github.com/hyperium/hyper) from 0.14.26 to 0.14.27.
Release notes

Sourced from hyper's releases.

v0.14.27

Bug Fixes

  • http1:

Features

  • client: include connection info in Client::send_request errors (#2749)
Changelog

Sourced from hyper's changelog.

v0.14.27 (2023-06-26)

Bug Fixes

  • http1:

Features

  • client: include connection info in Client::send_request errors (#2749)
Commits
  • d77c259 v0.14.27
  • a7b2c82 chore(lib): disable log feature of tower dependency
  • b107655 fix(http1): send error on Incoming body when connection errors (#3256)
  • 32422c4 fix(http1): properly end chunked bodies when it was known to be empty (#3254)
  • 297dc4c feat(client): include connection info in Client::send_request errors (#2749)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=hyper&package-manager=cargo&previous-version=0.14.26&new-version=0.14.27)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf61255776d29..a44d9c034ca52 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3883,9 +3883,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes 1.4.0", "futures-channel", diff --git a/Cargo.toml b/Cargo.toml index dc9de46473c74..2aea2037e2c3e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -263,7 +263,7 @@ headers = { version = "0.3.8", default-features = false } hostname = { version = "0.3.1", default-features = false } http = { version = "0.2.9", default-features = false } http-body = { version = "0.4.5", default-features = false } -hyper = { version = "0.14.26", default-features = false, features = ["client", "runtime", "http1", "http2", "server", "stream"] } +hyper = { version = "0.14.27", default-features = false, features = ["client", "runtime", "http1", "http2", "server", "stream"] } hyper-openssl = { version = "0.9.2", default-features = false } hyper-proxy = { version = "0.9.1", default-features = false, features = ["openssl-tls"] } indexmap = { version = "~1.9.3", default-features = false, features = ["serde"] } From 3b67a80f44c8abf9ba0e0a9bd77ee19d4a51d91a Mon Sep 17 00:00:00 2001 From: Dominic Burkart Date: Wed, 28 Jun 2023 18:03:12 +0200 Subject: [PATCH 197/236] docs: explain how to run tests locally (#17783) This PR explains that we use`vdev` for testing and gives instructions for using vdev to run tests locally. --- CONTRIBUTING.md | 12 ++++++++++++ vdev/README.md | 8 ++++++++ 2 files changed, 20 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7f4a52ed229f9..c993c8de4dc1a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -24,6 +24,7 @@ Vector team member will find this document useful. - [Daily tests](#daily-tests) - [Flakey tests](#flakey-tests) - [Test harness](#test-harness) + - [Running Tests Locally](#running-tests-locally) - [Deprecations](#deprecations) - [Dependencies](#dependencies) - [Next steps](#next-steps) @@ -210,6 +211,17 @@ any pull request with: /test -t ``` +### Running Tests Locally + +To run tests locally, use [cargo vdev](https://github.com/vectordotdev/vector/blob/master/vdev/README.md). + +Unit tests can be run by calling `cargo vdev test`. + +Integration tests are not run by default when running +`cargo vdev test`. Instead, they are accessible via the integration subcommand (example: +`cargo vdev int test aws` runs aws-related integration tests). You can find the list of available integration tests using `cargo vdev int show`. Integration tests require docker or podman to run. + + ### Deprecations When deprecating functionality in Vector, see [DEPRECATION.md](DEPRECATION.md). diff --git a/vdev/README.md b/vdev/README.md index 14e599e100cb7..88dc6c9bed54c 100644 --- a/vdev/README.md +++ b/vdev/README.md @@ -11,6 +11,7 @@ Table of Contents: - [Repository](#repository) - [Starship](#starship) - [CLI](#cli) +- [Running Tests](#running-tests) ## Installation @@ -65,3 +66,10 @@ when = true The CLI uses [Clap](https://github.com/clap-rs/clap) with the `derive` construction mechanism and is stored in the [commands](src/commands) directory. Every command group/namespace has its own directory with a `cli` module, including the root `vdev` command group. All commands have an `exec` method that provides the actual implementation, which in the case of command groups will be calling sub-commands. + + +## Running Tests + +Unit tests can be run by calling `cargo vdev test`. + +Integration tests are not run by default when running`cargo vdev test`. Instead, they are accessible via the integration subcommand (example: `cargo vdev int test aws` runs aws-related integration tests). You can find the list of available integration tests using `cargo vdev int show`. Integration tests require docker or podman to run. From e26e8b804c7fed0affad156160b65ba5e0df5a6e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 16:07:29 +0000 Subject: [PATCH 198/236] chore(deps): Bump tokio from 1.28.2 to 1.29.0 (#17776) Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.28.2 to 1.29.0.
Release notes

Sourced from tokio's releases.

Tokio v1.29.0

Technically a breaking change, the Send implementation is removed from runtime::EnterGuard. This change fixes a bug and should not impact most users.

Breaking

  • rt: EnterGuard should not be Send (#5766)

Fixed

  • fs: reduce blocking ops in fs::read_dir (#5653)
  • rt: fix possible starvation (#5686, #5712)
  • rt: fix stacked borrows issue in JoinSet (#5693)
  • rt: panic if EnterGuard dropped incorrect order (#5772)
  • time: do not overflow to signal value (#5710)
  • fs: wait for in-flight ops before cloning File (#5803)

Changed

  • rt: reduce time to poll tasks scheduled from outside the runtime (#5705, #5720)

Added

  • net: add uds doc alias for unix sockets (#5659)
  • rt: add metric for number of tasks (#5628)
  • sync: implement more traits for channel errors (#5666)
  • net: add nodelay methods on TcpSocket (#5672)
  • sync: add broadcast::Receiver::blocking_recv (#5690)
  • process: add raw_arg method to Command (#5704)
  • io: support PRIORITY epoll events (#5566)
  • task: add JoinSet::poll_join_next (#5721)
  • net: add support for Redox OS (#5790)

Unstable

  • rt: add the ability to dump task backtraces (#5608, #5676, #5708, #5717)
  • rt: instrument task poll times with a histogram (#5685)

#5766: tokio-rs/tokio#5766 #5653: tokio-rs/tokio#5653 #5686: tokio-rs/tokio#5686 #5712: tokio-rs/tokio#5712 #5693: tokio-rs/tokio#5693 #5772: tokio-rs/tokio#5772 #5710: tokio-rs/tokio#5710 #5803: tokio-rs/tokio#5803 #5705: tokio-rs/tokio#5705 #5720: tokio-rs/tokio#5720 #5659: tokio-rs/tokio#5659

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tokio&package-manager=cargo&previous-version=1.28.2&new-version=1.29.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Doug Smith --- Cargo.lock | 67 ++++++++++++++++++++++++++++--- Cargo.toml | 4 +- LICENSE-3rdparty.csv | 5 +++ lib/file-source/Cargo.toml | 2 +- lib/k8s-e2e-tests/Cargo.toml | 2 +- lib/k8s-test-framework/Cargo.toml | 2 +- lib/vector-api-client/Cargo.toml | 2 +- lib/vector-buffers/Cargo.toml | 2 +- lib/vector-common/Cargo.toml | 4 +- lib/vector-core/Cargo.toml | 2 +- 10 files changed, 76 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a44d9c034ca52..b6b3c424c35a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,6 +18,15 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b5ace29ee3216de37c0546865ad08edef58b0f9e76838ed8959a84a990e58c5" +[[package]] +name = "addr2line" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" +dependencies = [ + "gimli", +] + [[package]] name = "adler" version = "1.0.2" @@ -1306,6 +1315,21 @@ dependencies = [ "tokio", ] +[[package]] +name = "backtrace" +version = "0.3.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide 0.6.2", + "object", + "rustc-demangle", +] + [[package]] name = "base16" version = "0.2.1" @@ -3169,7 +3193,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.7.1", ] [[package]] @@ -3414,6 +3438,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "gimli" +version = "0.27.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" + [[package]] name = "glob" version = "0.3.1" @@ -4956,6 +4986,15 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + [[package]] name = "miniz_oxide" version = "0.7.1" @@ -4967,14 +5006,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] @@ -5472,6 +5511,15 @@ dependencies = [ "malloc_buf", ] +[[package]] +name = "object" +version = "0.30.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" +dependencies = [ + "memchr", +] + [[package]] name = "ofb" version = "0.6.1" @@ -6951,6 +6999,12 @@ dependencies = [ "serde_json", ] +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + [[package]] name = "rustc-hash" version = "1.1.0" @@ -8238,11 +8292,12 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.28.2" +version = "1.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2" +checksum = "374442f06ee49c3a28a8fc9f01a2596fed7559c6b99b31279c3261778e77d84f" dependencies = [ "autocfg", + "backtrace", "bytes 1.4.0", "libc", "mio", diff --git a/Cargo.toml b/Cargo.toml index 2aea2037e2c3e..5309c4242852a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -140,7 +140,7 @@ loki-logproto = { path = "lib/loki-logproto", optional = true } async-stream = { version = "0.3.5", default-features = false } async-trait = { version = "0.1.68", default-features = false } futures = { version = "0.3.28", default-features = false, features = ["compat", "io-compat"], package = "futures" } -tokio = { version = "1.28.2", default-features = false, features = ["full"] } +tokio = { version = "1.29.0", default-features = false, features = ["full"] } tokio-openssl = { version = "0.6.3", default-features = false } tokio-stream = { version = "0.1.14", default-features = false, features = ["net", "sync", "time"] } tokio-util = { version = "0.7", default-features = false, features = ["io", "time"] } @@ -356,7 +356,7 @@ reqwest = { version = "0.11", features = ["json"] } tempfile = "3.6.0" test-generator = "0.3.1" tokio-test = "0.4.2" -tokio = { version = "1.28.2", features = ["test-util"] } +tokio = { version = "1.29.0", features = ["test-util"] } tower-test = "0.4.0" vector-core = { path = "lib/vector-core", default-features = false, features = ["vrl", "test"] } wiremock = "0.5.19" diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index c137f340d73e9..418c470b2563a 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -1,6 +1,7 @@ Component,Origin,License,Copyright Inflector,https://github.com/whatisinternet/inflector,BSD-2-Clause,Josh Teeter RustyXML,https://github.com/Florob/RustyXML,MIT OR Apache-2.0,Florian Zeitz +addr2line,https://github.com/gimli-rs/addr2line,Apache-2.0 OR MIT,The addr2line Authors adler,https://github.com/jonas-schievink/adler,0BSD OR MIT OR Apache-2.0,Jonas Schievink adler32,https://github.com/remram44/adler32-rs,Zlib,Remi Rampin aes,https://github.com/RustCrypto/block-ciphers,MIT OR Apache-2.0,RustCrypto Developers @@ -72,6 +73,7 @@ azure_storage,https://github.com/azure/azure-sdk-for-rust,MIT,Microsoft Corp. azure_storage_blobs,https://github.com/azure/azure-sdk-for-rust,MIT,Microsoft Corp. backoff,https://github.com/ihrwein/backoff,MIT OR Apache-2.0,Tibor Benke backon,https://github.com/Xuanwo/backon,Apache-2.0,Xuanwo +backtrace,https://github.com/rust-lang/backtrace-rs,MIT OR Apache-2.0,The Rust Project Developers base16,https://github.com/thomcc/rust-base16,CC0-1.0,Thom Chiovoloni base64,https://github.com/marshallpierce/rust-base64,MIT OR Apache-2.0,"Alice Maz , Marshall Pierce " base64-simd,https://github.com/Nugine/simd,MIT,The base64-simd Authors @@ -214,6 +216,7 @@ futures-util,https://github.com/rust-lang/futures-rs,MIT OR Apache-2.0,The futur generic-array,https://github.com/fizyk20/generic-array,MIT,"Bartłomiej Kamiński , Aaron Trent " getrandom,https://github.com/rust-random/getrandom,MIT OR Apache-2.0,The Rand Project Developers ghost,https://github.com/dtolnay/ghost,MIT OR Apache-2.0,David Tolnay +gimli,https://github.com/gimli-rs/gimli,MIT OR Apache-2.0,The gimli Authors glob,https://github.com/rust-lang/glob,MIT OR Apache-2.0,The Rust Project Developers goauth,https://github.com/durch/rust-goauth,MIT,Drazen Urch governor,https://github.com/antifuchs/governor,MIT,Andreas Fuchs @@ -347,6 +350,7 @@ num_threads,https://github.com/jhpratt/num_threads,MIT OR Apache-2.0,Jacob Pratt number_prefix,https://github.com/ogham/rust-number-prefix,MIT,Benjamin Sago oauth2,https://github.com/ramosbugs/oauth2-rs,MIT OR Apache-2.0,"Alex Crichton , Florin Lipan , David A. Ramos " objc,http://github.com/SSheldon/rust-objc,MIT,Steven Sheldon +object,https://github.com/gimli-rs/object,Apache-2.0 OR MIT,The object Authors ofb,https://github.com/RustCrypto/block-modes,MIT OR Apache-2.0,RustCrypto Developers once_cell,https://github.com/matklad/once_cell,MIT OR Apache-2.0,Aleksey Kladov onig,http://github.com/iwillspeak/rust-onig,MIT,"Will Speak , Ivan Ivashchenko " @@ -432,6 +436,7 @@ roaring,https://github.com/RoaringBitmap/roaring-rs,MIT OR Apache-2.0,"Wim Looma roxmltree,https://github.com/RazrFalcon/roxmltree,MIT OR Apache-2.0,Evgeniy Reizner roxmltree,https://github.com/RazrFalcon/roxmltree,MIT OR Apache-2.0,Yevhenii Reizner rust_decimal,https://github.com/paupino/rust-decimal,MIT,Paul Mason +rustc-demangle,https://github.com/alexcrichton/rustc-demangle,MIT OR Apache-2.0,Alex Crichton rustc-hash,https://github.com/rust-lang-nursery/rustc-hash,Apache-2.0 OR MIT,The Rust Project Developers rustc_version,https://github.com/Kimundi/rustc-version-rs,MIT OR Apache-2.0,Marvin Löbel rustc_version_runtime,https://github.com/seppo0010/rustc-version-runtime-rs,MIT,Sebastian Waisbrot diff --git a/lib/file-source/Cargo.toml b/lib/file-source/Cargo.toml index a9203b9f21acb..1b131a369ca7f 100644 --- a/lib/file-source/Cargo.toml +++ b/lib/file-source/Cargo.toml @@ -69,7 +69,7 @@ default-features = false features = [] [dependencies.tokio] -version = "1.28.2" +version = "1.29.0" default-features = false features = ["full"] diff --git a/lib/k8s-e2e-tests/Cargo.toml b/lib/k8s-e2e-tests/Cargo.toml index 8e28fb6c89f7f..fd1a5aa7ab672 100644 --- a/lib/k8s-e2e-tests/Cargo.toml +++ b/lib/k8s-e2e-tests/Cargo.toml @@ -14,7 +14,7 @@ k8s-test-framework = { version = "0.1", path = "../k8s-test-framework" } regex = "1" reqwest = { version = "0.11.18", features = ["json"] } serde_json = "1" -tokio = { version = "1.28.2", features = ["full"] } +tokio = { version = "1.29.0", features = ["full"] } indoc = "2.0.1" env_logger = "0.10" tracing = { version = "0.1", features = ["log"] } diff --git a/lib/k8s-test-framework/Cargo.toml b/lib/k8s-test-framework/Cargo.toml index 534a97f81262d..7ab378add13ff 100644 --- a/lib/k8s-test-framework/Cargo.toml +++ b/lib/k8s-test-framework/Cargo.toml @@ -11,5 +11,5 @@ license = "MPL-2.0" k8s-openapi = { version = "0.16.0", default-features = false, features = ["v1_19"] } serde_json = "1" tempfile = "3" -tokio = { version = "1.28.2", features = ["full"] } +tokio = { version = "1.29.0", features = ["full"] } log = "0.4" diff --git a/lib/vector-api-client/Cargo.toml b/lib/vector-api-client/Cargo.toml index 16100f5efb28d..ed243d3e368ec 100644 --- a/lib/vector-api-client/Cargo.toml +++ b/lib/vector-api-client/Cargo.toml @@ -18,7 +18,7 @@ anyhow = { version = "1.0.71", default-features = false, features = ["std"] } # Tokio / Futures async-trait = { version = "0.1", default-features = false } futures = { version = "0.3", default-features = false, features = ["compat", "io-compat"] } -tokio = { version = "1.28.2", default-features = false, features = ["macros", "rt", "sync"] } +tokio = { version = "1.29.0", default-features = false, features = ["macros", "rt", "sync"] } tokio-stream = { version = "0.1.14", default-features = false, features = ["sync"] } # GraphQL diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index 1936760090a09..3f774b21ae5b6 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -24,7 +24,7 @@ rkyv = { version = "0.7.40", default-features = false, features = ["size_32", "s serde = { version = "1.0.164", default-features = false, features = ["derive"] } snafu = { version = "0.7.4", default-features = false, features = ["std"] } tokio-util = { version = "0.7.0", default-features = false } -tokio = { version = "1.28.2", default-features = false, features = ["rt", "macros", "rt-multi-thread", "sync", "fs", "io-util", "time"] } +tokio = { version = "1.29.0", default-features = false, features = ["rt", "macros", "rt-multi-thread", "sync", "fs", "io-util", "time"] } tracing = { version = "0.1.34", default-features = false, features = ["attributes"] } vector-config = { path = "../vector-config", default-features = false } vector-config-common = { path = "../vector-config-common", default-features = false } diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 6cc524d2f9ceb..7215b61c13482 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -60,7 +60,7 @@ serde = { version = "1.0.164", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false } snafu = { version = "0.7", optional = true } stream-cancel = { version = "0.8.1", default-features = false } -tokio = { version = "1.28.2", default-features = false, features = ["macros", "time"] } +tokio = { version = "1.29.0", default-features = false, features = ["macros", "time"] } tracing = { version = "0.1.34", default-features = false } vrl = { version = "0.4.0", default-features = false, features = ["value", "core", "compiler"] } vector-config = { path = "../vector-config" } @@ -69,6 +69,6 @@ vector-config-macros = { path = "../vector-config-macros" } [dev-dependencies] futures = { version = "0.3.28", default-features = false, features = ["async-await", "std"] } -tokio = { version = "1.28.2", default-features = false, features = ["rt", "time"] } +tokio = { version = "1.29.0", default-features = false, features = ["rt", "time"] } quickcheck = "1" quickcheck_macros = "1" diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index c14da07f9f27b..a1ab1e4981201 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -46,7 +46,7 @@ serde_with = { version = "2.3.2", default-features = false, features = ["std", " smallvec = { version = "1", default-features = false, features = ["serde", "const_generics"] } snafu = { version = "0.7.4", default-features = false } socket2 = { version = "0.5.3", default-features = false } -tokio = { version = "1.28.2", default-features = false, features = ["net"] } +tokio = { version = "1.29.0", default-features = false, features = ["net"] } tokio-openssl = { version = "0.6.3", default-features = false } tokio-stream = { version = "0.1", default-features = false, features = ["time"], optional = true } tokio-util = { version = "0.7.0", default-features = false, features = ["time"] } From 248ccb8d8252fff386d1b67c17424cd263361cb3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 16:30:14 +0000 Subject: [PATCH 199/236] chore(deps): Bump indexmap from 1.9.3 to 2.0.0 (#17755) Bumps [indexmap](https://github.com/bluss/indexmap) from 1.9.3 to 2.0.0.
Changelog

Sourced from indexmap's changelog.

  • 2.0.0

    • MSRV: Rust 1.64.0 or later is now required.

    • The "std" feature is no longer auto-detected. It is included in the default feature set, or else can be enabled like any other Cargo feature.

    • The "serde-1" feature has been removed, leaving just the optional "serde" dependency to be enabled like a feature itself.

    • IndexMap::get_index_mut now returns Option<(&K, &mut V)>, changing the key part from &mut K to &K. There is also a new alternative MutableKeys::get_index_mut2 to access the former behavior.

    • The new map::Slice<K, V> and set::Slice<T> offer a linear view of maps and sets, behaving a lot like normal [(K, V)] and [T] slices. Notably, comparison traits like Eq only consider items in order, rather than hash lookups, and slices even implement Hash.

    • IndexMap and IndexSet now have sort_by_cached_key and par_sort_by_cached_key methods which perform stable sorts in place using a key extraction function.

    • IndexMap and IndexSet now have reserve_exact, try_reserve, and try_reserve_exact methods that correspond to the same methods on Vec. However, exactness only applies to the direct capacity for items, while the raw hash table still follows its own rules for capacity and load factor.

    • The Equivalent trait is now re-exported from the equivalent crate, intended as a common base to allow types to work with multiple map types.

    • The hashbrown dependency has been updated to version 0.14.

    • The serde_seq module has been moved from the crate root to below the map module.

Commits
  • 8e47be8 Merge pull request #267 from cuviper/release-2.0.0
  • ad694fb Release 2.0.0
  • b5b2814 Merge pull request #266 from cuviper/doc-capacity
  • d3ea289 Document the lower-bound semantics of capacity
  • 74e14da Merge pull request #264 from cuviper/equivalent
  • 677c605 Add a relnote for Equivalent
  • 6d83bc1 pub use equivalent::Equivalent;
  • bb48357 Merge pull request #263 from cuviper/insert_in_slot
  • c37dae6 Use hashbrown's new single-lookup insertion
  • ee71507 Merge pull request #262 from daxpedda/hashbrown-v0.14
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=indexmap&package-manager=cargo&previous-version=1.9.3&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Doug Smith --- Cargo.lock | 15 ++++++++------- Cargo.toml | 2 +- lib/file-source/Cargo.toml | 2 +- lib/prometheus-parser/Cargo.toml | 2 +- lib/vector-common/Cargo.toml | 2 +- lib/vector-config/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- vdev/Cargo.toml | 2 +- 8 files changed, 15 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b6b3c424c35a7..1b0b771572fec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3126,7 +3126,7 @@ dependencies = [ "flate2", "futures 0.3.28", "glob", - "indexmap 1.9.3", + "indexmap 2.0.0", "libc", "quickcheck", "scan_fmt", @@ -4106,6 +4106,7 @@ checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", "hashbrown 0.14.0", + "serde", ] [[package]] @@ -6294,7 +6295,7 @@ dependencies = [ name = "prometheus-parser" version = "0.1.0" dependencies = [ - "indexmap 1.9.3", + "indexmap 2.0.0", "nom", "num_enum 0.6.1", "prost", @@ -9156,7 +9157,7 @@ dependencies = [ "dunce", "glob", "hex", - "indexmap 1.9.3", + "indexmap 2.0.0", "indicatif", "itertools 0.11.0", "log", @@ -9259,7 +9260,7 @@ dependencies = [ "hyper", "hyper-openssl", "hyper-proxy", - "indexmap 1.9.3", + "indexmap 2.0.0", "indoc", "infer 0.14.0", "inventory", @@ -9450,7 +9451,7 @@ dependencies = [ "crossbeam-utils", "derivative", "futures 0.3.28", - "indexmap 1.9.3", + "indexmap 2.0.0", "metrics", "nom", "ordered-float 3.7.0", @@ -9480,7 +9481,7 @@ dependencies = [ "chrono", "chrono-tz", "encoding_rs", - "indexmap 1.9.3", + "indexmap 2.0.0", "inventory", "no-proxy", "num-traits", @@ -9550,7 +9551,7 @@ dependencies = [ "headers", "http", "hyper-proxy", - "indexmap 1.9.3", + "indexmap 2.0.0", "metrics", "metrics-tracing-context", "metrics-util", diff --git a/Cargo.toml b/Cargo.toml index 5309c4242852a..63014558b28f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -266,7 +266,7 @@ http-body = { version = "0.4.5", default-features = false } hyper = { version = "0.14.27", default-features = false, features = ["client", "runtime", "http1", "http2", "server", "stream"] } hyper-openssl = { version = "0.9.2", default-features = false } hyper-proxy = { version = "0.9.1", default-features = false, features = ["openssl-tls"] } -indexmap = { version = "~1.9.3", default-features = false, features = ["serde"] } +indexmap = { version = "~2.0.0", default-features = false, features = ["serde", "std"] } infer = { version = "0.14.0", default-features = false, optional = true} indoc = { version = "2.0.1", default-features = false } inventory = { version = "0.3.6", default-features = false } diff --git a/lib/file-source/Cargo.toml b/lib/file-source/Cargo.toml index 1b131a369ca7f..50631a6765175 100644 --- a/lib/file-source/Cargo.toml +++ b/lib/file-source/Cargo.toml @@ -39,7 +39,7 @@ default-features = false features = [] [dependencies.indexmap] -version = "~1.9.3" +version = "~2.0.0" default-features = false features = ["serde"] diff --git a/lib/prometheus-parser/Cargo.toml b/lib/prometheus-parser/Cargo.toml index c05961e957842..497fa3ef52eaf 100644 --- a/lib/prometheus-parser/Cargo.toml +++ b/lib/prometheus-parser/Cargo.toml @@ -9,7 +9,7 @@ license = "MPL-2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -indexmap = "~1.9.3" +indexmap = "~2.0.0" nom = "7.1.3" num_enum = "0.6.1" prost = "0.11" diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 7215b61c13482..7ea21e0baac9d 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -48,7 +48,7 @@ chrono = { version = "0.4", default-features = false, optional = true, features crossbeam-utils = { version = "0.8.16", default-features = false } derivative = { version = "2.2.0", default-features = false } futures = { version = "0.3.28", default-features = false, features = ["std"] } -indexmap = { version = "~1.9.3", default-features = false } +indexmap = { version = "~2.0.0", default-features = false, features = ["std"] } metrics = "0.21.0" nom = { version = "7", optional = true } ordered-float = { version = "3.7.0", default-features = false } diff --git a/lib/vector-config/Cargo.toml b/lib/vector-config/Cargo.toml index b94fa49ca54b0..cc3684bc6951e 100644 --- a/lib/vector-config/Cargo.toml +++ b/lib/vector-config/Cargo.toml @@ -14,7 +14,7 @@ path = "tests/integration/lib.rs" chrono = { version = "0.4.19", default-features = false } chrono-tz = { version = "0.8.2", default-features = false } encoding_rs = { version = "0.8", default-features = false, features = ["alloc", "serde"] } -indexmap = { version = "1.9", default-features = false } +indexmap = { version = "2.0", default-features = false, features = ["std"] } inventory = { version = "0.3" } no-proxy = { version = "0.3.1", default-features = false, features = ["serialize"] } num-traits = { version = "0.2.15", default-features = false } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index a1ab1e4981201..5326901b16bc0 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -22,7 +22,7 @@ futures-util = { version = "0.3.28", default-features = false, features = ["std" headers = { version = "0.3.8", default-features = false } http = { version = "0.2.9", default-features = false } hyper-proxy = { version = "0.9.1", default-features = false, features = ["openssl-tls"] } -indexmap = { version = "~1.9.3", default-features = false, features = ["serde"] } +indexmap = { version = "~2.0.0", default-features = false, features = ["serde", "std"] } lookup = { package = "vector-lookup", path = "../vector-lookup" } metrics = "0.21.0" metrics-tracing-context = { version = "0.14.0", default-features = false } diff --git a/vdev/Cargo.toml b/vdev/Cargo.toml index 97b4f68f9f4b4..968c89eccb169 100644 --- a/vdev/Cargo.toml +++ b/vdev/Cargo.toml @@ -21,7 +21,7 @@ directories = "5.0.1" dunce = "1.0.4" glob = { version = "0.3.1", default-features = false } hex = "0.4.3" -indexmap = { version = "1.9", default-features = false, features = ["serde"] } +indexmap = { version = "2.0", default-features = false, features = ["serde", "std"] } indicatif = { version = "0.17.5", features = ["improved_unicode"] } itertools = "0.11.0" log = "0.4.19" From 53a575f21d65bc188324ad3bcd2e89d03bbf548c Mon Sep 17 00:00:00 2001 From: neuronull Date: Wed, 28 Jun 2023 11:08:26 -0600 Subject: [PATCH 200/236] chore(ci): reduce runner sizing to 4 core and free tier (#17785) - In order to reduce CI costs, experiments were run on each of the altered workflows to understand the duration (and thus impact to DX) and changes in cost. - The two cases of concern are what checks are run on each PR commit and in the merge queue: - in the merge queue- there was a trivial change in runtime. This is because the bottleneck workflow is the mac unit test, which takes about 1 hour to run. All runner-size-reduced workflows in this PR that are in the merge queue, still run within that time frame. - on each PR commit- this PR does not affect this case. However, a sister PR (#17724) does, because that PR fundamentally changes the integration test workflow to introduce the runner there, and so in that PR, we reduced to 4 core. That change adds 3 minutes of duration to the integration tests, which is trivial (and integration tests only run on some PR pushes, if that PR touches integrations). - Thus the empirically anticipated outcome is a ~20% reduction in cost with a none-to-trivial increase in duration. --------- Co-authored-by: Spencer Gilbert --- .github/workflows/cli.yml | 2 +- .github/workflows/cross.yml | 2 +- .github/workflows/integration-test.yml | 4 ++-- .github/workflows/k8s_e2e.yml | 6 +++--- .github/workflows/misc.yml | 2 +- .github/workflows/msrv.yml | 2 +- .github/workflows/regression.yml | 4 ++-- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 7b03d7fef1876..2801cc78bb945 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -5,7 +5,7 @@ on: jobs: test-cli: - runs-on: [linux, ubuntu-20.04-8core] + runs-on: ubuntu-latest env: CARGO_INCREMENTAL: 0 steps: diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index a3afadb8bddac..19b53a9fae27f 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -6,7 +6,7 @@ on: jobs: cross-linux: name: Cross - ${{ matrix.target }} - runs-on: [linux, ubuntu-20.04-8core] + runs-on: ubuntu-latest env: CARGO_INCREMENTAL: 0 strategy: diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index a5f02316cdaa4..ffb741158c87e 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -40,8 +40,8 @@ env: jobs: test-integration: - runs-on: [linux, ubuntu-20.04-8core] - timeout-minutes: 30 + runs-on: [linux, ubuntu-20.04-4core] + timeout-minutes: 40 if: inputs.if || github.event_name == 'workflow_dispatch' steps: - name: (PR comment) Get PR branch diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index bbdea21a59588..61196de8befa0 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -53,7 +53,7 @@ jobs: build-x86_64-unknown-linux-gnu: name: Build - x86_64-unknown-linux-gnu - runs-on: [linux, ubuntu-20.04-8core] + runs-on: [linux, ubuntu-20.04-4core] needs: changes if: github.event_name != 'pull_request' || needs.changes.outputs.k8s == 'true' # cargo-deb requires a release build, but we don't need optimizations for tests @@ -129,7 +129,7 @@ jobs: # See https://github.community/t/feature-request-and-use-case-example-to-allow-matrix-in-if-s/126067 compute-k8s-test-plan: name: Compute K8s test plan - runs-on: [linux, ubuntu-20.04-8core] + runs-on: ubuntu-latest needs: changes if: github.event_name != 'pull_request' || needs.changes.outputs.k8s == 'true' outputs: @@ -190,7 +190,7 @@ jobs: test-e2e-kubernetes: name: K8s ${{ matrix.kubernetes_version.version }} / ${{ matrix.container_runtime }} (${{ matrix.kubernetes_version.role }}) - runs-on: [linux, ubuntu-20.04-8core] + runs-on: [linux, ubuntu-20.04-4core] needs: - build-x86_64-unknown-linux-gnu - compute-k8s-test-plan diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index b106c65ee2655..035bbbc92a66e 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -5,7 +5,7 @@ on: jobs: test-misc: - runs-on: [linux, ubuntu-20.04-8core] + runs-on: [linux, ubuntu-20.04-4core] env: CARGO_INCREMENTAL: 0 steps: diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 00ced7b7504b5..c910d2394053d 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -13,7 +13,7 @@ env: jobs: check-msrv: - runs-on: [ubuntu-20.04] + runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 with: diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index ea6882a2447aa..e27d867a02b33 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -283,7 +283,7 @@ jobs: build-baseline: name: Build baseline Vector container - runs-on: [linux, ubuntu-20.04-8core] + runs-on: [linux, ubuntu-20.04-4core] needs: - compute-metadata steps: @@ -323,7 +323,7 @@ jobs: build-comparison: name: Build comparison Vector container - runs-on: [linux, soak-builder] + runs-on: [linux, ubuntu-20.04-4core] needs: - compute-metadata steps: From dbdff9e8b1df36dd45fb8ef2181926224b5dd294 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 17:20:01 +0000 Subject: [PATCH 201/236] chore(ci): Bump docker/setup-buildx-action from 2.7.0 to 2.8.0 (#17786) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 2.7.0 to 2.8.0.
Release notes

Sourced from docker/setup-buildx-action's releases.

v2.8.0

Full Changelog: https://github.com/docker/setup-buildx-action/compare/v2.7.0...v2.8.0

Commits
  • 16c0bc4 Merge pull request #242 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • ebcacb9 update generated content
  • 496a823 Bump @​docker/actions-toolkit from 0.5.0 to 0.6.0
  • a56031a Merge pull request #241 from nicks/nicks/driver
  • 922550f context: only append flags if we know the driver supports them
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/setup-buildx-action&package-manager=github_actions&previous-version=2.7.0&new-version=2.8.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/environment.yml | 2 +- .github/workflows/publish.yml | 2 +- .github/workflows/regression.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 29c1901f31b67..2c17383d169ac 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -45,7 +45,7 @@ jobs: - name: Set up QEMU uses: docker/setup-qemu-action@v2.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2.7.0 + uses: docker/setup-buildx-action@v2.8.0 - name: Login to DockerHub uses: docker/login-action@v2.1.0 if: github.ref == 'refs/heads/master' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 381be90c0806f..bf2efe25dba7f 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -448,7 +448,7 @@ jobs: platforms: all - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2.7.0 + uses: docker/setup-buildx-action@v2.8.0 with: version: latest install: true diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index e27d867a02b33..f3370293198ef 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -301,7 +301,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2.7.0 + uses: docker/setup-buildx-action@v2.8.0 - name: Build 'vector' target image uses: docker/build-push-action@v4.1.1 @@ -341,7 +341,7 @@ jobs: - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2.7.0 + uses: docker/setup-buildx-action@v2.8.0 - name: Build 'vector' target image uses: docker/build-push-action@v4.1.1 From 48ec2e8bc51f3f4f68566a64e6fe7d7327a73591 Mon Sep 17 00:00:00 2001 From: neuronull Date: Wed, 28 Jun 2023 11:38:40 -0600 Subject: [PATCH 202/236] fix(datadog_agent source): remove duplicate internal metrics emission (#17720) --- src/sources/datadog_agent/metrics.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/sources/datadog_agent/metrics.rs b/src/sources/datadog_agent/metrics.rs index 0a02e2a11dc95..60be1e7cd2791 100644 --- a/src/sources/datadog_agent/metrics.rs +++ b/src/sources/datadog_agent/metrics.rs @@ -212,7 +212,7 @@ fn decode_datadog_series_v2( return Ok(Vec::new()); } - let metrics = decode_ddseries_v2(body, &api_key, events_received).map_err(|error| { + let metrics = decode_ddseries_v2(body, &api_key).map_err(|error| { ErrorMessage::new( StatusCode::UNPROCESSABLE_ENTITY, format!("Error decoding Datadog sketch: {:?}", error), @@ -230,7 +230,6 @@ fn decode_datadog_series_v2( pub(crate) fn decode_ddseries_v2( frame: Bytes, api_key: &Option>, - events_received: &Registered, ) -> crate::Result> { let payload = MetricPayload::decode(frame)?; let decoded_metrics: Vec = payload @@ -336,11 +335,6 @@ pub(crate) fn decode_ddseries_v2( }) .collect(); - events_received.emit(CountByteSize( - decoded_metrics.len(), - decoded_metrics.estimated_json_encoded_size_of(), - )); - Ok(decoded_metrics) } From ab39c6ac6816c8499cc87050a21945f984638dab Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Wed, 28 Jun 2023 13:55:21 -0400 Subject: [PATCH 203/236] chore: Fix publish workflow for older OS images (#17787) Resolves git submodule issue on older OS images. Based on https://github.com/actions/checkout/issues/758#issuecomment-1097027361 --- .github/workflows/publish.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index bf2efe25dba7f..e4ea253a558e8 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -320,7 +320,11 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" + # Workaround for older OS images + # https://github.com/actions/checkout/issues/758 + - name: Checkout submodules + run: | + git submodule update --init --recursive - name: Download staged package artifacts (x86_64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: From d7bc531ee29f563822ed152c97adfc7d7bb0ef81 Mon Sep 17 00:00:00 2001 From: neuronull Date: Wed, 28 Jun 2023 12:21:45 -0600 Subject: [PATCH 204/236] chore(docs): add note about const strings (#17774) --- STYLE.md | 12 ++++++++++++ docs/DEVELOPING.md | 1 + 2 files changed, 13 insertions(+) diff --git a/STYLE.md b/STYLE.md index a1403ecb7fe4b..4e3e70818f708 100644 --- a/STYLE.md +++ b/STYLE.md @@ -24,6 +24,18 @@ As an additional note, `rustfmt` sometimes can fail to format code within macros to see such code that doesn't look like it's formatted correctly, you may need to manually tweak it if `rustfmt` cannot be persuaded to format it correctly for you. :) +### Const strings + +When re-typing the same raw string literal more than once, this can lead to typo +errors, especially when names ares similar. In general, when reasonable, it is +preferred to use [Compile-time constants](https://doc.rust-lang.org/std/keyword.const.html) +when dealing with non-dynamic strings. For example, when working with field names +for event metadata. + +As this has not always been a consistently enforced code style for the project, +please take the opportunity to update existing raw strings to use constants +when modifying existing code + ## Code Organization Code is primarily split into two main directories: `lib/` and `src/`. diff --git a/docs/DEVELOPING.md b/docs/DEVELOPING.md index 31a91795b7b75..0fa32c68d30c6 100644 --- a/docs/DEVELOPING.md +++ b/docs/DEVELOPING.md @@ -8,6 +8,7 @@ - [Makefile](#makefile) - [Code style](#code-style) - [Logging style](#logging-style) + - [Panics](#panics) - [Feature flags](#feature-flags) - [Dependencies](#dependencies) - [Guidelines](#guidelines) From 13c3c788e2225ba25ca49500ebde270915c2e7bc Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Thu, 29 Jun 2023 10:00:18 -0400 Subject: [PATCH 205/236] chore(ci): fix team membership action (#17791) `GH_PAT_ORG` has the correct permissions for `read:org` --- .github/workflows/gardener_open_pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gardener_open_pr.yml b/.github/workflows/gardener_open_pr.yml index 701bfbacae7f4..e9b0fef67ba05 100644 --- a/.github/workflows/gardener_open_pr.yml +++ b/.github/workflows/gardener_open_pr.yml @@ -18,7 +18,7 @@ jobs: with: username: ${{ github.actor }} team: vector - GITHUB_TOKEN: ${{ secrets.GH_PROJECT_PAT }} + GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} - uses: actions/add-to-project@v0.5.0 if: ${{ steps.checkVectorMember.outputs.isTeamMember == 'false' }} with: From cba983e381af933ac360812aec82d013e7e84fa4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Jun 2023 14:02:16 +0000 Subject: [PATCH 206/236] chore(deps): Bump quote from 1.0.28 to 1.0.29 (#17798) Bumps [quote](https://github.com/dtolnay/quote) from 1.0.28 to 1.0.29.
Release notes

Sourced from quote's releases.

1.0.29

  • Fix proc_macro_span_shrink-related build error when built with -Zminimal-versions
Commits
  • e99862e Release 1.0.29
  • 0c68465 Fix -Zminimal-versions build
  • 200f56a Remove .clippy.toml in favor of respecting rust-version from Cargo.toml
  • 47e1066 Ignore uninlined_format_args pedantic clippy lint
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=quote&package-manager=cargo&previous-version=1.0.28&new-version=1.0.29)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 138 ++++++++++++++++++++++++++--------------------------- 1 file changed, 69 insertions(+), 69 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1b0b771572fec..e96f6160f8dd2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -260,7 +260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c6368f9ae5c6ec403ca910327ae0c9437b0a85255b6950c90d497e6177f6e5e" dependencies = [ "proc-macro-hack", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -473,7 +473,7 @@ dependencies = [ "darling 0.14.2", "proc-macro-crate 1.2.1", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "thiserror", ] @@ -593,7 +593,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -615,7 +615,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -632,7 +632,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -1417,7 +1417,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd9e32d7420c85055e8107e5b2463c4eeefeaac18b52359fe9f9c08a18f342b2" dependencies = [ - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1561,7 +1561,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61820b4c5693eafb998b1e67485423c923db4a75f72585c247bdee32bad81e7b" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1572,7 +1572,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c76cdbfa13def20d1f8af3ae7b3c6771f06352a74221d8851262ac384c122b8e" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1643,7 +1643,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1726,7 +1726,7 @@ dependencies = [ "cached_proc_macro_types", "darling 0.14.2", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1951,7 +1951,7 @@ checksum = "81d7dc0031c3a59a04fc2ba395c8e2dd463cba1859275f065d225f6122221b45" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -2405,7 +2405,7 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2459,7 +2459,7 @@ dependencies = [ "codespan-reporting", "once_cell", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "scratch", "syn 1.0.109", ] @@ -2477,7 +2477,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2510,7 +2510,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "strsim 0.10.0", "syn 1.0.109", ] @@ -2524,7 +2524,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "strsim 0.10.0", "syn 1.0.109", ] @@ -2536,7 +2536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2547,7 +2547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" dependencies = [ "darling_core 0.14.2", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2623,7 +2623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2634,7 +2634,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2646,7 +2646,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -2902,7 +2902,7 @@ checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2914,7 +2914,7 @@ checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2926,7 +2926,7 @@ checksum = "11f36e95862220b211a6e2aa5eca09b4fa391b13cd52ceb8035a24bf65a79de2" dependencies = [ "once_cell", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2946,7 +2946,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -3074,7 +3074,7 @@ checksum = "f47da3a72ec598d9c8937a7ebca8962a5c7a1f28444e38c2b33c771ba3f55f05" dependencies = [ "proc-macro-error", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -3170,7 +3170,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4c81935e123ab0741c4c4f0d9b8377e5fb21d3de7e062fa4b1263b1fbcba1ea" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -3351,7 +3351,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -3434,7 +3434,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb19fe8de3ea0920d282f7b77dd4227aea6b8b999b42cdf0ca41b2472b14443a" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -3540,7 +3540,7 @@ dependencies = [ "heck 0.4.0", "lazy_static", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "serde", "serde_json", "syn 1.0.109", @@ -4926,7 +4926,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -5452,7 +5452,7 @@ checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.2.1", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -5464,7 +5464,7 @@ checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.2.1", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -5647,7 +5647,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -5891,7 +5891,7 @@ dependencies = [ "pest", "pest_meta", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -5979,7 +5979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -6245,7 +6245,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "version_check", ] @@ -6257,7 +6257,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "version_check", ] @@ -6366,7 +6366,7 @@ dependencies = [ "anyhow", "itertools 0.10.5", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -6395,7 +6395,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -6494,7 +6494,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -6509,9 +6509,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.28" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" +checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ "proc-macro2 1.0.63", ] @@ -6909,7 +6909,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7409,7 +7409,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -7420,7 +7420,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7472,7 +7472,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7531,7 +7531,7 @@ checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7543,7 +7543,7 @@ checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" dependencies = [ "darling 0.14.2", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7814,7 +7814,7 @@ checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7955,7 +7955,7 @@ dependencies = [ "heck 0.3.3", "proc-macro-error", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7973,7 +7973,7 @@ checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "rustversion", "syn 1.0.109", ] @@ -8012,7 +8012,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "unicode-ident", ] @@ -8023,7 +8023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "unicode-ident", ] @@ -8040,7 +8040,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -8194,7 +8194,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -8340,7 +8340,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -8561,7 +8561,7 @@ dependencies = [ "prettyplease", "proc-macro2 1.0.63", "prost-build", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -8665,7 +8665,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -8936,7 +8936,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -8966,7 +8966,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3e1c30cedd24fc597f7d37a721efdbdc2b1acae012c1ef1218f4c7c2c0f3e7" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -9506,7 +9506,7 @@ dependencies = [ "darling 0.13.4", "once_cell", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "serde", "serde_json", "syn 1.0.109", @@ -9519,7 +9519,7 @@ version = "0.1.0" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "serde", "serde_derive_internals", "syn 1.0.109", @@ -9779,7 +9779,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", ] [[package]] @@ -9880,7 +9880,7 @@ dependencies = [ "log", "once_cell", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", "wasm-bindgen-shared", ] @@ -9903,7 +9903,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.28", + "quote 1.0.29", "wasm-bindgen-macro-support", ] @@ -9914,7 +9914,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -10325,7 +10325,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6505e6815af7de1746a08f69c69606bb45695a17149517680f3b2149713b19a3" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -10345,7 +10345,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "synstructure", ] From 062224b485f27193288443cede0ae1c2f5c66196 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Thu, 29 Jun 2023 08:08:59 -0600 Subject: [PATCH 207/236] chore(deps): Export more common bits for components (#17788) --- src/sinks/azure_common/mod.rs | 6 +++--- src/sinks/azure_common/service.rs | 2 +- src/sinks/s3_common/mod.rs | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/sinks/azure_common/mod.rs b/src/sinks/azure_common/mod.rs index f3e4ff73ebe40..4d1c931977f03 100644 --- a/src/sinks/azure_common/mod.rs +++ b/src/sinks/azure_common/mod.rs @@ -1,3 +1,3 @@ -pub(crate) mod config; -pub(crate) mod service; -pub(crate) mod sink; +pub mod config; +pub mod service; +pub mod sink; diff --git a/src/sinks/azure_common/service.rs b/src/sinks/azure_common/service.rs index 122bd66525b18..eed5a068c48fa 100644 --- a/src/sinks/azure_common/service.rs +++ b/src/sinks/azure_common/service.rs @@ -12,7 +12,7 @@ use tracing::Instrument; use crate::sinks::azure_common::config::{AzureBlobRequest, AzureBlobResponse}; #[derive(Clone)] -pub(crate) struct AzureBlobService { +pub struct AzureBlobService { client: Arc, } diff --git a/src/sinks/s3_common/mod.rs b/src/sinks/s3_common/mod.rs index 193857740a297..37a3669c195ca 100644 --- a/src/sinks/s3_common/mod.rs +++ b/src/sinks/s3_common/mod.rs @@ -1,4 +1,4 @@ -pub(crate) mod config; -pub(crate) mod partitioner; -pub(crate) mod service; -pub(crate) mod sink; +pub mod config; +pub mod partitioner; +pub mod service; +pub mod sink; From 75ae967ebed2231a93b62e2c7a5a08685fa7d654 Mon Sep 17 00:00:00 2001 From: neuronull Date: Thu, 29 Jun 2023 12:20:19 -0600 Subject: [PATCH 208/236] chore(ci): fix comment author validation (#17794) The PR comment author membership was being detected but that wasn't being used to validate if the workflow(s) should proceed. --- .github/workflows/comment-trigger.yml | 6 +++++- .github/workflows/cross.yml | 7 ------- .github/workflows/integration-comment.yml | 8 ++++++-- .github/workflows/unit_windows.yml | 8 -------- 4 files changed, 11 insertions(+), 18 deletions(-) diff --git a/.github/workflows/comment-trigger.yml b/.github/workflows/comment-trigger.yml index 8d3128ba14300..7be887247ce73 100644 --- a/.github/workflows/comment-trigger.yml +++ b/.github/workflows/comment-trigger.yml @@ -59,7 +59,7 @@ jobs: || contains(github.event.comment.body, '/ci-run-regression') ) steps: - - name: Validate issue comment + - name: Get PR comment author id: comment uses: tspascoal/get-user-teams-membership@v2 with: @@ -67,6 +67,10 @@ jobs: team: 'Vector' GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + - name: Validate author membership + if: steps.comment.outputs.isTeamMember == 'false' + run: exit 1 + cli: needs: validate if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-cli') diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index 19b53a9fae27f..869405b5b9076 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -85,13 +85,6 @@ jobs: needs: cross-linux if: needs.cross-linux.result == 'success' && github.event_name == 'issue_comment' steps: - - name: Validate issue comment - uses: tspascoal/get-user-teams-membership@v2 - with: - username: ${{ github.actor }} - team: 'Vector' - GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} - - name: (PR comment) Get PR branch uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch diff --git a/.github/workflows/integration-comment.yml b/.github/workflows/integration-comment.yml index 629278ff95c20..a35f994f82f25 100644 --- a/.github/workflows/integration-comment.yml +++ b/.github/workflows/integration-comment.yml @@ -46,14 +46,18 @@ jobs: runs-on: ubuntu-latest if: contains(github.event.comment.body, '/ci-run-integration') || contains(github.event.comment.body, '/ci-run-all') steps: - - name: Validate issue comment - if: github.event_name == 'issue_comment' + - name: Get PR comment author + id: comment uses: tspascoal/get-user-teams-membership@v2 with: username: ${{ github.actor }} team: 'Vector' GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + - name: Validate author membership + if: steps.comment.outputs.isTeamMember == 'false' + run: exit 1 + - name: (PR comment) Get PR branch uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch diff --git a/.github/workflows/unit_windows.yml b/.github/workflows/unit_windows.yml index 61128cf5a801a..4d73e8f829a99 100644 --- a/.github/workflows/unit_windows.yml +++ b/.github/workflows/unit_windows.yml @@ -8,14 +8,6 @@ jobs: test-windows: runs-on: [windows, windows-2019-8core] steps: - - name: Validate issue comment - if: github.event_name == 'issue_comment' - uses: tspascoal/get-user-teams-membership@v2 - with: - username: ${{ github.actor }} - team: 'Vector' - GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} - - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} uses: xt0rted/pull-request-comment-branch@v2 From 6eecda55020214364fda844cf8ed16a9b6cc2a5c Mon Sep 17 00:00:00 2001 From: Nathan Fox Date: Thu, 29 Jun 2023 14:17:40 -0400 Subject: [PATCH 209/236] feat: track runtime schema definitions for log events (#17692) closes https://github.com/vectordotdev/vector/issues/16732 In order for sinks to use semantic meaning, they need a mapping of meanings to fields. This is included in the schema definition of events, but the exact definition that needs to be used depends on the path the event took to get to the sink. The schema definition of an event is tracked at runtime so this can be determined. A `parent_id` was added to event metadata to track the previous component that an event came from, which lets the topology select the correct schema definition to attach to events. For sources, there is only one definition that can be attached (for each port). This is automatically attached in the topology layer (after an event is emitted by a source), so there is no additional work in each source to support this. For transforms, it's slightly more complicated. The schema definition depends on both the output port _and_ the component the event came from. A map is generated at Vector startup, and the correct definition is obtained from that at runtime. This also happens in the topology layer so transforms don't need to worry about this. Previously the `remap` transform had custom code to support runtime schema definitions (for the VRL meaning functions). This was removed since it's now handled automatically. The `reduce` and `lua` transforms are special cases since there is no clear "path" that an event takes through the topology, since multiple events can be merged (from different inputs) in `reduce`. For `lua`, output events may not be related to input events at all. In these cases the schema definition map will have the same value for all inputs (they are all merged). The topology will then arbitrarily pick one (since they are all the same). --------- Signed-off-by: Stephen Wakely Co-authored-by: Stephen Wakely --- lib/vector-core/Cargo.toml | 2 +- lib/vector-core/src/config/mod.rs | 28 +- lib/vector-core/src/event/metadata.rs | 27 +- lib/vector-core/src/event/mod.rs | 14 +- lib/vector-core/src/transform/mod.rs | 155 +++++----- src/config/transform.rs | 3 + src/source_sender/mod.rs | 90 ++++-- src/sources/kafka.rs | 2 +- src/sources/opentelemetry/tests.rs | 8 +- src/sources/socket/mod.rs | 2 +- src/sources/statsd/mod.rs | 4 +- src/test_util/mock/mod.rs | 12 +- src/topology/builder.rs | 29 +- src/topology/test/compliance.rs | 14 + src/topology/test/mod.rs | 54 ++++ src/transforms/aggregate.rs | 9 +- src/transforms/dedupe.rs | 69 +++++ src/transforms/filter.rs | 5 + src/transforms/log_to_metric.rs | 52 +++- src/transforms/lua/v2/mod.rs | 5 +- src/transforms/metric_to_log.rs | 274 +++++++++--------- src/transforms/reduce/mod.rs | 242 ++++++++++------ src/transforms/remap.rs | 113 ++------ src/transforms/route.rs | 4 +- src/transforms/tag_cardinality_limit/tests.rs | 58 ++++ 25 files changed, 847 insertions(+), 428 deletions(-) diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 5326901b16bc0..7317a47e0ea70 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -94,7 +94,7 @@ rand = "0.8.5" rand_distr = "0.4.3" tracing-subscriber = { version = "0.3.17", default-features = false, features = ["env-filter", "fmt", "ansi", "registry"] } vector-common = { path = "../vector-common", default-features = false, features = ["test"] } -vrl = { version = "0.4.0", default-features = false, features = ["value", "arbitrary", "lua"] } +vrl = { version = "0.4.0", default-features = false, features = ["value", "arbitrary", "lua", "test"] } [features] api = ["dep:async-graphql"] diff --git a/lib/vector-core/src/config/mod.rs b/lib/vector-core/src/config/mod.rs index 3ff5152a293a7..71786155d1d8f 100644 --- a/lib/vector-core/src/config/mod.rs +++ b/lib/vector-core/src/config/mod.rs @@ -1,3 +1,4 @@ +use std::sync::Arc; use std::{collections::HashMap, fmt, num::NonZeroUsize}; use bitmask_enum::bitmask; @@ -111,7 +112,7 @@ pub struct SourceOutput { // NOTE: schema definitions are only implemented/supported for log-type events. There is no // inherent blocker to support other types as well, but it'll require additional work to add // the relevant schemas, and store them separately in this type. - pub schema_definition: Option, + pub schema_definition: Option>, } impl SourceOutput { @@ -129,7 +130,7 @@ impl SourceOutput { Self { port: None, ty, - schema_definition: Some(schema_definition), + schema_definition: Some(Arc::new(schema_definition)), } } @@ -168,17 +169,15 @@ impl SourceOutput { /// Schema enabled is set in the users configuration. #[must_use] pub fn schema_definition(&self, schema_enabled: bool) -> Option { + use std::ops::Deref; + self.schema_definition.as_ref().map(|definition| { if schema_enabled { - definition.clone() + definition.deref().clone() } else { let mut new_definition = schema::Definition::default_for_namespace(definition.log_namespaces()); - - if definition.log_namespaces().contains(&LogNamespace::Vector) { - new_definition.add_meanings(definition.meanings()); - } - + new_definition.add_meanings(definition.meanings()); new_definition } }) @@ -203,7 +202,7 @@ pub struct TransformOutput { /// enabled, at least one definition should be output. If the transform /// has multiple connected sources, it is possible to have multiple output /// definitions - one for each input. - log_schema_definitions: HashMap, + pub log_schema_definitions: HashMap, } impl TransformOutput { @@ -245,11 +244,7 @@ impl TransformOutput { .map(|(output, definition)| { let mut new_definition = schema::Definition::default_for_namespace(definition.log_namespaces()); - - if definition.log_namespaces().contains(&LogNamespace::Vector) { - new_definition.add_meanings(definition.meanings()); - } - + new_definition.add_meanings(definition.meanings()); (output.clone(), new_definition) }) .collect() @@ -606,7 +601,10 @@ mod test { // There should be the default legacy definition without schemas enabled. assert_eq!( - Some(schema::Definition::default_legacy_namespace()), + Some( + schema::Definition::default_legacy_namespace() + .with_meaning(OwnedTargetPath::event(owned_value_path!("zork")), "zork") + ), output.schema_definition(false) ); } diff --git a/lib/vector-core/src/event/metadata.rs b/lib/vector-core/src/event/metadata.rs index f13bee6a5e009..d86884be7582c 100644 --- a/lib/vector-core/src/event/metadata.rs +++ b/lib/vector-core/src/event/metadata.rs @@ -7,7 +7,10 @@ use vector_common::{config::ComponentKey, EventDataEq}; use vrl::value::{Kind, Secrets, Value}; use super::{BatchNotifier, EventFinalizer, EventFinalizers, EventStatus}; -use crate::{config::LogNamespace, schema, ByteSizeOf}; +use crate::{ + config::{LogNamespace, OutputId}, + schema, ByteSizeOf, +}; const DATADOG_API_KEY: &str = "datadog_api_key"; const SPLUNK_HEC_TOKEN: &str = "splunk_hec_token"; @@ -30,8 +33,15 @@ pub struct EventMetadata { /// The id of the source source_id: Option>, + /// The id of the component this event originated from. This is used to + /// determine which schema definition to attach to an event in transforms. + /// This should always have a value set for events in transforms. It will always be `None` + /// in a source, and there is currently no use-case for reading the value in a sink. + upstream_id: Option>, + /// An identifier for a globally registered schema definition which provides information about /// the event shape (type information, and semantic meaning of fields). + /// This definition is only currently valid for logs, and shouldn't be used for other event types. /// /// TODO(Jean): must not skip serialization to track schemas across restarts. #[serde(default = "default_schema_definition", skip)] @@ -71,17 +81,29 @@ impl EventMetadata { &mut self.secrets } - /// Returns a reference to the metadata source. + /// Returns a reference to the metadata source id. #[must_use] pub fn source_id(&self) -> Option<&Arc> { self.source_id.as_ref() } + /// Returns a reference to the metadata parent id. This is the `OutputId` + /// of the previous component the event was sent through (if any). + #[must_use] + pub fn upstream_id(&self) -> Option<&OutputId> { + self.upstream_id.as_deref() + } + /// Sets the `source_id` in the metadata to the provided value. pub fn set_source_id(&mut self, source_id: Arc) { self.source_id = Some(source_id); } + /// Sets the `upstream_id` in the metadata to the provided value. + pub fn set_upstream_id(&mut self, upstream_id: Arc) { + self.upstream_id = Some(upstream_id); + } + /// Return the datadog API key, if it exists pub fn datadog_api_key(&self) -> Option> { self.secrets.get(DATADOG_API_KEY).cloned() @@ -111,6 +133,7 @@ impl Default for EventMetadata { finalizers: Default::default(), schema_definition: default_schema_definition(), source_id: None, + upstream_id: None, } } } diff --git a/lib/vector-core/src/event/mod.rs b/lib/vector-core/src/event/mod.rs index ae2e51e8a23a8..9547f58dc5ed3 100644 --- a/lib/vector-core/src/event/mod.rs +++ b/lib/vector-core/src/event/mod.rs @@ -5,7 +5,7 @@ use std::{ sync::Arc, }; -use crate::ByteSizeOf; +use crate::{config::OutputId, ByteSizeOf}; pub use array::{into_event_stream, EventArray, EventContainer, LogArray, MetricArray, TraceArray}; pub use estimated_json_encoded_size_of::EstimatedJsonEncodedSizeOf; pub use finalization::{ @@ -309,12 +309,24 @@ impl Event { self.metadata_mut().set_source_id(source_id); } + /// Sets the `upstream_id` in the event metadata to the provided value. + pub fn set_upstream_id(&mut self, upstream_id: Arc) { + self.metadata_mut().set_upstream_id(upstream_id); + } + /// Sets the `source_id` in the event metadata to the provided value. #[must_use] pub fn with_source_id(mut self, source_id: Arc) -> Self { self.metadata_mut().set_source_id(source_id); self } + + /// Sets the `upstream_id` in the event metadata to the provided value. + #[must_use] + pub fn with_upstream_id(mut self, upstream_id: Arc) -> Self { + self.metadata_mut().set_upstream_id(upstream_id); + self + } } impl EventDataEq for Event { diff --git a/lib/vector-core/src/transform/mod.rs b/lib/vector-core/src/transform/mod.rs index a60cd85c8200a..af81c51aa69a1 100644 --- a/lib/vector-core/src/transform/mod.rs +++ b/lib/vector-core/src/transform/mod.rs @@ -1,3 +1,4 @@ +use std::sync::Arc; use std::{collections::HashMap, error, pin::Pin}; use futures::{Stream, StreamExt}; @@ -7,13 +8,16 @@ use vector_common::internal_event::{ use vector_common::json_size::JsonSize; use vector_common::EventDataEq; +use crate::config::{ComponentKey, OutputId}; +use crate::event::EventMutRef; +use crate::schema::Definition; use crate::{ config, event::{ into_event_stream, EstimatedJsonEncodedSizeOf, Event, EventArray, EventContainer, EventRef, }, fanout::{self, Fanout}, - ByteSizeOf, + schema, ByteSizeOf, }; #[cfg(any(feature = "lua"))] @@ -178,6 +182,8 @@ impl SyncTransform for Box { struct TransformOutput { fanout: Fanout, events_sent: Registered, + log_schema_definitions: HashMap>, + output_id: Arc, } pub struct TransformOutputs { @@ -189,6 +195,7 @@ pub struct TransformOutputs { impl TransformOutputs { pub fn new( outputs_in: Vec, + component_key: &ComponentKey, ) -> (Self, HashMap, fanout::ControlChannel>) { let outputs_spec = outputs_in.clone(); let mut primary_output = None; @@ -197,6 +204,13 @@ impl TransformOutputs { for output in outputs_in { let (fanout, control) = Fanout::new(); + + let log_schema_definitions = output + .log_schema_definitions + .into_iter() + .map(|(id, definition)| (id, Arc::new(definition))) + .collect(); + match output.port { None => { primary_output = Some(TransformOutput { @@ -204,6 +218,11 @@ impl TransformOutputs { events_sent: register(EventsSent::from(internal_event::Output(Some( DEFAULT_OUTPUT.into(), )))), + log_schema_definitions, + output_id: Arc::new(OutputId { + component: component_key.clone(), + port: None, + }), }); controls.insert(None, control); } @@ -215,6 +234,11 @@ impl TransformOutputs { events_sent: register(EventsSent::from(internal_event::Output(Some( name.clone().into(), )))), + log_schema_definitions, + output_id: Arc::new(OutputId { + component: component_key.clone(), + port: Some(name.clone()), + }), }, ); controls.insert(Some(name.clone()), control); @@ -246,31 +270,61 @@ impl TransformOutputs { buf: &mut TransformOutputsBuf, ) -> Result<(), Box> { if let Some(primary) = self.primary_output.as_mut() { - let count = buf.primary_buffer.as_ref().map_or(0, OutputBuffer::len); - let byte_size = buf.primary_buffer.as_ref().map_or( - JsonSize::new(0), - EstimatedJsonEncodedSizeOf::estimated_json_encoded_size_of, - ); - buf.primary_buffer - .as_mut() - .expect("mismatched outputs") - .send(&mut primary.fanout) - .await?; - primary.events_sent.emit(CountByteSize(count, byte_size)); + let buf = buf.primary_buffer.as_mut().expect("mismatched outputs"); + Self::send_single_buffer(buf, primary).await?; } - for (key, buf) in &mut buf.named_buffers { - let count = buf.len(); - let byte_size = buf.estimated_json_encoded_size_of(); let output = self.named_outputs.get_mut(key).expect("unknown output"); - buf.send(&mut output.fanout).await?; - output.events_sent.emit(CountByteSize(count, byte_size)); + Self::send_single_buffer(buf, output).await?; } + Ok(()) + } + async fn send_single_buffer( + buf: &mut OutputBuffer, + output: &mut TransformOutput, + ) -> Result<(), Box> { + for event in buf.events_mut() { + update_runtime_schema_definition( + event, + &output.output_id, + &output.log_schema_definitions, + ); + } + let count = buf.len(); + let byte_size = buf.estimated_json_encoded_size_of(); + buf.send(&mut output.fanout).await?; + output.events_sent.emit(CountByteSize(count, byte_size)); Ok(()) } } +#[allow(clippy::implicit_hasher)] +/// `event`: The event that will be updated +/// `output_id`: The `output_id` that the current even is being sent to (will be used as the new `parent_id`) +/// `log_schema_definitions`: A mapping of parent `OutputId` to definitions, that will be used to lookup the new runtime definition of the event +pub fn update_runtime_schema_definition( + mut event: EventMutRef, + output_id: &Arc, + log_schema_definitions: &HashMap>, +) { + if let EventMutRef::Log(log) = &mut event { + if let Some(parent_component_id) = log.metadata().upstream_id() { + if let Some(definition) = log_schema_definitions.get(parent_component_id) { + log.metadata_mut().set_schema_definition(definition); + } + } else { + // there is no parent defined. That means this event originated from a component that + // isn't able to track the source, such as `reduce` or `lua`. In these cases, all of the + // schema definitions _must_ be the same, so the first one is picked + if let Some(definition) = log_schema_definitions.values().next() { + log.metadata_mut().set_schema_definition(definition); + } + } + } + event.metadata_mut().set_upstream_id(Arc::clone(output_id)); +} + #[derive(Debug, Clone)] pub struct TransformOutputsBuf { primary_buffer: Option, @@ -299,34 +353,17 @@ impl TransformOutputsBuf { } } - pub fn push(&mut self, event: Event) { - self.primary_buffer - .as_mut() - .expect("no default output") - .push(event); - } - - pub fn push_named(&mut self, name: &str, event: Event) { - self.named_buffers - .get_mut(name) - .expect("unknown output") - .push(event); - } - - pub fn append(&mut self, slice: &mut Vec) { - self.primary_buffer - .as_mut() - .expect("no default output") - .append(slice); - } - - pub fn append_named(&mut self, name: &str, slice: &mut Vec) { - self.named_buffers - .get_mut(name) - .expect("unknown output") - .append(slice); + /// Adds a new event to the transform output buffer + pub fn push(&mut self, name: Option<&str>, event: Event) { + match name { + Some(name) => self.named_buffers.get_mut(name), + None => self.primary_buffer.as_mut(), + } + .expect("unknown output") + .push(event); } + #[cfg(any(feature = "test", test))] pub fn drain(&mut self) -> impl Iterator + '_ { self.primary_buffer .as_mut() @@ -334,6 +371,7 @@ impl TransformOutputsBuf { .drain() } + #[cfg(any(feature = "test", test))] pub fn drain_named(&mut self, name: &str) -> impl Iterator + '_ { self.named_buffers .get_mut(name) @@ -341,33 +379,15 @@ impl TransformOutputsBuf { .drain() } - pub fn extend(&mut self, events: impl Iterator) { - self.primary_buffer - .as_mut() - .expect("no default output") - .extend(events); - } - + #[cfg(any(feature = "test", test))] pub fn take_primary(&mut self) -> OutputBuffer { std::mem::take(self.primary_buffer.as_mut().expect("no default output")) } + #[cfg(any(feature = "test", test))] pub fn take_all_named(&mut self) -> HashMap { std::mem::take(&mut self.named_buffers) } - - pub fn len(&self) -> usize { - self.primary_buffer.as_ref().map_or(0, OutputBuffer::len) - + self - .named_buffers - .values() - .map(OutputBuffer::len) - .sum::() - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } } impl ByteSizeOf for TransformOutputsBuf { @@ -439,6 +459,7 @@ impl OutputBuffer { }) } + #[cfg(any(feature = "test", test))] pub fn drain(&mut self) -> impl Iterator + '_ { self.0.drain(..).flat_map(EventArray::into_events) } @@ -458,12 +479,12 @@ impl OutputBuffer { self.0.iter().flat_map(EventArray::iter_events) } - pub fn into_events(self) -> impl Iterator { - self.0.into_iter().flat_map(EventArray::into_events) + fn events_mut(&mut self) -> impl Iterator { + self.0.iter_mut().flat_map(EventArray::iter_events_mut) } - pub fn take_events(&mut self) -> Vec { - std::mem::take(&mut self.0) + pub fn into_events(self) -> impl Iterator { + self.0.into_iter().flat_map(EventArray::into_events) } } diff --git a/src/config/transform.rs b/src/config/transform.rs index c2be848d53361..1b9f442ef0786 100644 --- a/src/config/transform.rs +++ b/src/config/transform.rs @@ -195,6 +195,9 @@ pub trait TransformConfig: DynClone + NamedComponent + core::fmt::Debug + Send + &self, enrichment_tables: enrichment::TableRegistry, input_definitions: &[(OutputId, schema::Definition)], + + // This only exists for transforms that create logs from non-logs, to know which namespace + // to use, such as `metric_to_log` global_log_namespace: LogNamespace, ) -> Vec; diff --git a/src/source_sender/mod.rs b/src/source_sender/mod.rs index a4f4eaae3b751..fea4a3980b64d 100644 --- a/src/source_sender/mod.rs +++ b/src/source_sender/mod.rs @@ -1,4 +1,5 @@ #![allow(missing_docs)] +use std::sync::Arc; use std::{collections::HashMap, fmt}; use chrono::Utc; @@ -19,6 +20,8 @@ use vrl::value::Value; mod errors; +use crate::config::{ComponentKey, OutputId}; +use crate::schema::Definition; pub use errors::{ClosedError, StreamSendError}; use lookup::PathPrefix; @@ -48,17 +51,37 @@ impl Builder { } } - pub fn add_source_output(&mut self, output: SourceOutput) -> LimitedReceiver { + pub fn add_source_output( + &mut self, + output: SourceOutput, + component_key: ComponentKey, + ) -> LimitedReceiver { let lag_time = self.lag_time.clone(); + let log_definition = output.schema_definition.clone(); + let output_id = OutputId { + component: component_key, + port: output.port.clone(), + }; match output.port { None => { - let (inner, rx) = - Inner::new_with_buffer(self.buf_size, DEFAULT_OUTPUT.to_owned(), lag_time); + let (inner, rx) = Inner::new_with_buffer( + self.buf_size, + DEFAULT_OUTPUT.to_owned(), + lag_time, + log_definition, + output_id, + ); self.inner = Some(inner); rx } Some(name) => { - let (inner, rx) = Inner::new_with_buffer(self.buf_size, name.clone(), lag_time); + let (inner, rx) = Inner::new_with_buffer( + self.buf_size, + name.clone(), + lag_time, + log_definition, + output_id, + ); self.named_inners.insert(name, inner); rx } @@ -91,9 +114,15 @@ impl SourceSender { } } - pub fn new_with_buffer(n: usize) -> (Self, LimitedReceiver) { + #[cfg(test)] + pub fn new_test_sender_with_buffer(n: usize) -> (Self, LimitedReceiver) { let lag_time = Some(register_histogram!(LAG_TIME_NAME)); - let (inner, rx) = Inner::new_with_buffer(n, DEFAULT_OUTPUT.to_owned(), lag_time); + let output_id = OutputId { + component: "test".to_string().into(), + port: None, + }; + let (inner, rx) = + Inner::new_with_buffer(n, DEFAULT_OUTPUT.to_owned(), lag_time, None, output_id); ( Self { inner: Some(inner), @@ -105,14 +134,14 @@ impl SourceSender { #[cfg(test)] pub fn new_test() -> (Self, impl Stream + Unpin) { - let (pipe, recv) = Self::new_with_buffer(TEST_BUFFER_SIZE); + let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); let recv = recv.into_stream().flat_map(into_event_stream); (pipe, recv) } #[cfg(test)] pub fn new_test_finalize(status: EventStatus) -> (Self, impl Stream + Unpin) { - let (pipe, recv) = Self::new_with_buffer(TEST_BUFFER_SIZE); + let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); // In a source test pipeline, there is no sink to acknowledge // events, so we have to add a map to the receiver to handle the // finalization. @@ -131,7 +160,7 @@ impl SourceSender { pub fn new_test_errors( error_at: impl Fn(usize) -> bool, ) -> (Self, impl Stream + Unpin) { - let (pipe, recv) = Self::new_with_buffer(TEST_BUFFER_SIZE); + let (pipe, recv) = Self::new_test_sender_with_buffer(TEST_BUFFER_SIZE); // In a source test pipeline, there is no sink to acknowledge // events, so we have to add a map to the receiver to handle the // finalization. @@ -161,7 +190,11 @@ impl SourceSender { ) -> impl Stream + Unpin { // The lag_time parameter here will need to be filled in if this function is ever used for // non-test situations. - let (inner, recv) = Inner::new_with_buffer(100, name.clone(), None); + let output_id = OutputId { + component: "test".to_string().into(), + port: Some(name.clone()), + }; + let (inner, recv) = Inner::new_with_buffer(100, name.clone(), None, None, output_id); let recv = recv.into_stream().map(move |mut events| { events.iter_events_mut().for_each(|mut event| { let metadata = event.metadata_mut(); @@ -225,6 +258,11 @@ struct Inner { output: String, lag_time: Option, events_sent: Registered, + /// The schema definition that will be attached to Log events sent through here + log_definition: Option>, + /// The OutputId related to this source sender. This is set as the `upstream_id` in + /// `EventMetadata` for all event sent through here. + output_id: Arc, } impl fmt::Debug for Inner { @@ -242,6 +280,8 @@ impl Inner { n: usize, output: String, lag_time: Option, + log_definition: Option>, + output_id: OutputId, ) -> (Self, LimitedReceiver) { let (tx, rx) = channel::limited(n); ( @@ -252,16 +292,29 @@ impl Inner { events_sent: register!(EventsSent::from(internal_event::Output(Some( output.into() )))), + log_definition, + output_id: Arc::new(output_id), }, rx, ) } - async fn send(&mut self, events: EventArray) -> Result<(), ClosedError> { + async fn send(&mut self, mut events: EventArray) -> Result<(), ClosedError> { let reference = Utc::now().timestamp_millis(); events .iter_events() .for_each(|event| self.emit_lag_time(event, reference)); + + events.iter_events_mut().for_each(|mut event| { + // attach runtime schema definitions from the source + if let Some(log_definition) = &self.log_definition { + event.metadata_mut().set_schema_definition(log_definition); + } + event + .metadata_mut() + .set_upstream_id(Arc::clone(&self.output_id)); + }); + let byte_size = events.estimated_json_encoded_size_of(); let count = events.len(); self.inner.send(events).await.map_err(|_| ClosedError)?; @@ -290,23 +343,10 @@ impl Inner { E: Into + ByteSizeOf, I: IntoIterator, { - let reference = Utc::now().timestamp_millis(); let events = events.into_iter().map(Into::into); for events in array::events_into_arrays(events, Some(CHUNK_SIZE)) { - events - .iter_events() - .for_each(|event| self.emit_lag_time(event, reference)); - let cbs = CountByteSize(events.len(), events.estimated_json_encoded_size_of()); - match self.inner.send(events).await { - Ok(()) => { - self.events_sent.emit(cbs); - } - Err(error) => { - return Err(error.into()); - } - } + self.send(events).await?; } - Ok(()) } diff --git a/src/sources/kafka.rs b/src/sources/kafka.rs index ca904de314a62..8ba6511c53557 100644 --- a/src/sources/kafka.rs +++ b/src/sources/kafka.rs @@ -1121,7 +1121,7 @@ mod integration_test { delay: Duration, status: EventStatus, ) -> (SourceSender, impl Stream + Unpin) { - let (pipe, recv) = SourceSender::new_with_buffer(100); + let (pipe, recv) = SourceSender::new_test_sender_with_buffer(100); let recv = BufferReceiver::new(recv.into()).into_stream(); let recv = recv.then(move |mut events| async move { events.iter_logs_mut().for_each(|log| { diff --git a/src/sources/opentelemetry/tests.rs b/src/sources/opentelemetry/tests.rs index 798759fa1138d..fc538efad199d 100644 --- a/src/sources/opentelemetry/tests.rs +++ b/src/sources/opentelemetry/tests.rs @@ -10,10 +10,12 @@ use opentelemetry_proto::proto::{ }; use similar_asserts::assert_eq; use std::collections::BTreeMap; +use std::sync::Arc; use tonic::Request; use vector_core::config::LogNamespace; use vrl::value; +use crate::config::OutputId; use crate::{ config::{SourceConfig, SourceContext}, event::{into_event_stream, Event, EventStatus, LogEvent, Value}, @@ -269,7 +271,11 @@ async fn receive_grpc_logs_legacy_namespace() { ("observed_timestamp", Utc.timestamp_nanos(2).into()), ("source_type", "opentelemetry".into()), ]); - let expect_event = Event::from(LogEvent::from(expect_vec)); + let mut expect_event = Event::from(LogEvent::from(expect_vec)); + expect_event.set_upstream_id(Arc::new(OutputId { + component: "test".into(), + port: Some("logs".into()), + })); assert_eq!(actual_event, expect_event); }) .await; diff --git a/src/sources/socket/mod.rs b/src/sources/socket/mod.rs index 93366629f3420..58ef30c3fcf99 100644 --- a/src/sources/socket/mod.rs +++ b/src/sources/socket/mod.rs @@ -727,7 +727,7 @@ mod test { // shutdown. let addr = next_addr(); - let (source_tx, source_rx) = SourceSender::new_with_buffer(10_000); + let (source_tx, source_rx) = SourceSender::new_test_sender_with_buffer(10_000); let source_key = ComponentKey::from("tcp_shutdown_infinite_stream"); let (source_cx, mut shutdown) = SourceContext::new_shutdown(&source_key, source_tx); diff --git a/src/sources/statsd/mod.rs b/src/sources/statsd/mod.rs index 467dae41fec30..a1ae446c56d36 100644 --- a/src/sources/statsd/mod.rs +++ b/src/sources/statsd/mod.rs @@ -453,7 +453,7 @@ mod test { // packet we send has a lot of metrics per packet. We could technically count them all up // and have a more accurate number here, but honestly, who cares? This is big enough. let component_key = ComponentKey::from("statsd"); - let (tx, rx) = SourceSender::new_with_buffer(4096); + let (tx, rx) = SourceSender::new_test_sender_with_buffer(4096); let (source_ctx, shutdown) = SourceContext::new_shutdown(&component_key, tx); let sink = statsd_config .build(source_ctx) @@ -547,7 +547,7 @@ mod test { // packet we send has a lot of metrics per packet. We could technically count them all up // and have a more accurate number here, but honestly, who cares? This is big enough. let component_key = ComponentKey::from("statsd"); - let (tx, _rx) = SourceSender::new_with_buffer(4096); + let (tx, _rx) = SourceSender::new_test_sender_with_buffer(4096); let (source_ctx, shutdown) = SourceContext::new_shutdown(&component_key, tx); let sink = statsd_config .build(source_ctx) diff --git a/src/test_util/mock/mod.rs b/src/test_util/mock/mod.rs index 62b0d96d76f10..3fb594b677e7a 100644 --- a/src/test_util/mock/mod.rs +++ b/src/test_util/mock/mod.rs @@ -30,12 +30,12 @@ pub fn backpressure_source(counter: &Arc) -> BackpressureSourceConf } pub fn basic_source() -> (SourceSender, BasicSourceConfig) { - let (tx, rx) = SourceSender::new_with_buffer(1); + let (tx, rx) = SourceSender::new_test_sender_with_buffer(1); (tx, BasicSourceConfig::new(rx)) } pub fn basic_source_with_data(data: &str) -> (SourceSender, BasicSourceConfig) { - let (tx, rx) = SourceSender::new_with_buffer(1); + let (tx, rx) = SourceSender::new_test_sender_with_buffer(1); (tx, BasicSourceConfig::new_with_data(rx, data)) } @@ -43,7 +43,7 @@ pub fn basic_source_with_event_counter( force_shutdown: bool, ) -> (SourceSender, BasicSourceConfig, Arc) { let event_counter = Arc::new(AtomicUsize::new(0)); - let (tx, rx) = SourceSender::new_with_buffer(1); + let (tx, rx) = SourceSender::new_test_sender_with_buffer(1); let mut source = BasicSourceConfig::new_with_event_counter(rx, Arc::clone(&event_counter)); source.set_force_shutdown(force_shutdown); @@ -75,7 +75,7 @@ pub const fn backpressure_sink(num_to_consume: usize) -> BackpressureSinkConfig } pub fn basic_sink(channel_size: usize) -> (impl Stream, BasicSinkConfig) { - let (tx, rx) = SourceSender::new_with_buffer(channel_size); + let (tx, rx) = SourceSender::new_test_sender_with_buffer(channel_size); let sink = BasicSinkConfig::new(tx, true); (rx.into_stream(), sink) } @@ -84,7 +84,7 @@ pub fn basic_sink_with_data( channel_size: usize, data: &str, ) -> (impl Stream, BasicSinkConfig) { - let (tx, rx) = SourceSender::new_with_buffer(channel_size); + let (tx, rx) = SourceSender::new_test_sender_with_buffer(channel_size); let sink = BasicSinkConfig::new_with_data(tx, true, data); (rx.into_stream(), sink) } @@ -92,7 +92,7 @@ pub fn basic_sink_with_data( pub fn basic_sink_failing_healthcheck( channel_size: usize, ) -> (impl Stream, BasicSinkConfig) { - let (tx, rx) = SourceSender::new_with_buffer(channel_size); + let (tx, rx) = SourceSender::new_test_sender_with_buffer(channel_size); let sink = BasicSinkConfig::new(tx, false); (rx.into_stream(), sink) } diff --git a/src/topology/builder.rs b/src/topology/builder.rs index 4a858acb7d113..b7ace14acd57b 100644 --- a/src/topology/builder.rs +++ b/src/topology/builder.rs @@ -20,6 +20,7 @@ use vector_common::internal_event::{ self, CountByteSize, EventsSent, InternalEventHandle as _, Registered, }; use vector_core::config::LogNamespace; +use vector_core::transform::update_runtime_schema_definition; use vector_core::{ buffers::{ topology::{ @@ -242,7 +243,7 @@ impl<'a> Builder<'a> { let mut schema_definitions = HashMap::with_capacity(source_outputs.len()); for output in source_outputs.into_iter() { - let mut rx = builder.add_source_output(output.clone()); + let mut rx = builder.add_source_output(output.clone(), key.clone()); let (mut fanout, control) = Fanout::new(); let source = Arc::new(key.clone()); @@ -735,6 +736,7 @@ fn build_transform( node.input_details.data_type(), node.typetag, &node.key, + &node.outputs, ), } } @@ -744,7 +746,7 @@ fn build_sync_transform( node: TransformNode, input_rx: BufferReceiver, ) -> (Task, HashMap) { - let (outputs, controls) = TransformOutputs::new(node.outputs); + let (outputs, controls) = TransformOutputs::new(node.outputs, &node.key); let runner = Runner::new(t, input_rx, node.input_details.data_type(), outputs); let transform = if node.enable_concurrency { @@ -926,6 +928,7 @@ fn build_task_transform( input_type: DataType, typetag: &str, key: &ComponentKey, + outputs: &[TransformOutput], ) -> (Task, HashMap) { let (mut fanout, control) = Fanout::new(); @@ -941,8 +944,30 @@ fn build_task_transform( )) }); let events_sent = register!(EventsSent::from(internal_event::Output(None))); + let output_id = Arc::new(OutputId { + component: key.clone(), + port: None, + }); + + // Task transforms can only write to the default output, so only a single schema def map is needed + let schema_definition_map = outputs + .iter() + .find(|x| x.port.is_none()) + .expect("output for default port required for task transforms") + .log_schema_definitions + .clone() + .into_iter() + .map(|(key, value)| (key, Arc::new(value))) + .collect(); + let stream = t .transform(Box::pin(filtered)) + .map(move |mut events| { + for event in events.iter_events_mut() { + update_runtime_schema_definition(event, &output_id, &schema_definition_map); + } + events + }) .inspect(move |events: &EventArray| { events_sent.emit(CountByteSize( events.len(), diff --git a/src/topology/test/compliance.rs b/src/topology/test/compliance.rs index a716d29593998..8f4602aa1bba3 100644 --- a/src/topology/test/compliance.rs +++ b/src/topology/test/compliance.rs @@ -2,8 +2,10 @@ use std::sync::Arc; use tokio::sync::oneshot::{channel, Receiver}; use vector_common::config::ComponentKey; +use vector_core::config::OutputId; use vector_core::event::{Event, EventArray, EventContainer, LogEvent}; +use crate::config::schema::Definition; use crate::{ config::{unit_test::UnitTestSourceConfig, ConfigBuilder}, test_util::{ @@ -57,6 +59,10 @@ async fn test_function_transform_single_event() { assert_eq!(events.len(), 1); original_event.set_source_id(Arc::new(ComponentKey::from("in"))); + original_event.set_upstream_id(Arc::new(OutputId::from("transform"))); + original_event + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); let event = events.remove(0); assert_eq!(original_event, event); @@ -78,6 +84,10 @@ async fn test_sync_transform_single_event() { assert_eq!(events.len(), 1); original_event.set_source_id(Arc::new(ComponentKey::from("in"))); + original_event.set_upstream_id(Arc::new(OutputId::from("transform"))); + original_event + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); let event = events.remove(0); assert_eq!(original_event, event); @@ -98,6 +108,10 @@ async fn test_task_transform_single_event() { assert_eq!(events.len(), 1); original_event.set_source_id(Arc::new(ComponentKey::from("in"))); + original_event.set_upstream_id(Arc::new(OutputId::from("transform"))); + original_event + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); let event = events.remove(0); assert_eq!(original_event, event); diff --git a/src/topology/test/mod.rs b/src/topology/test/mod.rs index aa5720382e96c..b8b9c3a0fd5d0 100644 --- a/src/topology/test/mod.rs +++ b/src/topology/test/mod.rs @@ -7,6 +7,7 @@ use std::{ }, }; +use crate::schema::Definition; use crate::{ config::{Config, ConfigDiff, SinkOuter}, event::{into_event_stream, Event, EventArray, EventContainer, LogEvent}, @@ -27,6 +28,7 @@ use tokio::{ }; use vector_buffers::{BufferConfig, BufferType, WhenFull}; use vector_common::config::ComponentKey; +use vector_core::config::OutputId; mod backpressure; mod compliance; @@ -149,6 +151,10 @@ async fn topology_source_and_sink() { let res = out1.flat_map(into_event_stream).collect::>().await; event.set_source_id(Arc::new(ComponentKey::from("in1"))); + event.set_upstream_id(Arc::new(OutputId::from("test"))); + event + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(vec![event], res); } @@ -184,6 +190,16 @@ async fn topology_multiple_sources() { event1.set_source_id(Arc::new(ComponentKey::from("in1"))); event2.set_source_id(Arc::new(ComponentKey::from("in2"))); + event1.set_upstream_id(Arc::new(OutputId::from("test"))); + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); + + event2.set_upstream_id(Arc::new(OutputId::from("test"))); + event2 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); + assert_eq!(out_event1, Some(event1.into())); assert_eq!(out_event2, Some(event2.into())); } @@ -218,6 +234,12 @@ async fn topology_multiple_sinks() { // We should see that both sinks got the exact same event: event.set_source_id(Arc::new(ComponentKey::from("in1"))); + + event.set_upstream_id(Arc::new(OutputId::from("test"))); + event + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); + let expected = vec![event]; assert_eq!(expected, res1); assert_eq!(expected, res2); @@ -293,6 +315,11 @@ async fn topology_remove_one_source() { event1.set_source_id(Arc::new(ComponentKey::from("in1"))); + event1.set_upstream_id(Arc::new(OutputId::from("test"))); + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); + let res = h_out1.await.unwrap(); assert_eq!(vec![event1], res); } @@ -332,6 +359,11 @@ async fn topology_remove_one_sink() { event.set_source_id(Arc::new(ComponentKey::from("in1"))); + event.set_upstream_id(Arc::new(OutputId::from("test"))); + event + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); + assert_eq!(vec![event], res1); assert_eq!(Vec::::new(), res2); } @@ -442,6 +474,11 @@ async fn topology_swap_source() { assert_eq!(Vec::::new(), res1); event2.set_source_id(Arc::new(ComponentKey::from("in2"))); + event2.set_upstream_id(Arc::new(OutputId::from("test"))); + event2 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); + assert_eq!(vec![event2], res2); } @@ -554,6 +591,10 @@ async fn topology_swap_sink() { assert_eq!(Vec::::new(), res1); event1.set_source_id(Arc::new(ComponentKey::from("in1"))); + event1.set_upstream_id(Arc::new(OutputId::from("test"))); + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(vec![event1], res2); } @@ -663,6 +704,15 @@ async fn topology_rebuild_connected() { event1.set_source_id(Arc::new(ComponentKey::from("in1"))); event2.set_source_id(Arc::new(ComponentKey::from("in1"))); + event1.set_upstream_id(Arc::new(OutputId::from("test"))); + event2.set_upstream_id(Arc::new(OutputId::from("test"))); + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); + event2 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); + assert_eq!(vec![event1, event2], res); } @@ -715,6 +765,10 @@ async fn topology_rebuild_connected_transform() { assert_eq!(Vec::::new(), res1); event.set_source_id(Arc::new(ComponentKey::from("in1"))); + event.set_upstream_id(Arc::new(OutputId::from("test"))); + event + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(vec![event], res2); } diff --git a/src/transforms/aggregate.rs b/src/transforms/aggregate.rs index a591305764df1..ca5a7ae8679cb 100644 --- a/src/transforms/aggregate.rs +++ b/src/transforms/aggregate.rs @@ -156,8 +156,10 @@ mod tests { use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use vector_common::config::ComponentKey; + use vrl::value::Kind; use super::*; + use crate::schema::Definition; use crate::{ event::{metric, Event, Metric}, test_util::components::assert_transform_compliance, @@ -174,8 +176,13 @@ mod tests { kind: metric::MetricKind, value: metric::MetricValue, ) -> Event { - Event::Metric(Metric::new(name, kind, value)) + let mut event = Event::Metric(Metric::new(name, kind, value)) .with_source_id(Arc::new(ComponentKey::from("in"))) + .with_upstream_id(Arc::new(OutputId::from("transform"))); + event.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + event } #[test] diff --git a/src/transforms/dedupe.rs b/src/transforms/dedupe.rs index 4a6497628d78a..513a91ce9115e 100644 --- a/src/transforms/dedupe.rs +++ b/src/transforms/dedupe.rs @@ -289,7 +289,9 @@ mod tests { use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use vector_common::config::ComponentKey; + use vector_core::config::OutputId; + use crate::config::schema::Definition; use crate::{ event::{Event, LogEvent, Value}, test_util::components::assert_transform_compliance, @@ -363,6 +365,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event1); // Second event differs in matched field so should be output even though it @@ -371,6 +378,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event2.set_source_id(Arc::new(ComponentKey::from("in"))); + event2.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event2 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event2); // Third event has the same value for "matched" as first event, so it should be dropped. @@ -413,6 +425,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event1); // Second event has a different matched field name with the same value, @@ -421,6 +438,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event2.set_source_id(Arc::new(ComponentKey::from("in"))); + event2.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event2 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event2); drop(tx); @@ -466,6 +488,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event1); // Second event is the same just with different field order, so it @@ -511,6 +538,12 @@ mod tests { let new_event = out.recv().await.unwrap(); event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + + // the schema definition is copied from the source for dedupe + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event1); // Second event gets output because it's not a dupe. This causes the first @@ -519,6 +552,12 @@ mod tests { let new_event = out.recv().await.unwrap(); event2.set_source_id(Arc::new(ComponentKey::from("in"))); + event2.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event2 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); + assert_eq!(new_event, event2); // Third event is a dupe but gets output anyway because the first @@ -568,6 +607,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event1); // Second event should also get passed through even though the string @@ -576,6 +620,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event2.set_source_id(Arc::new(ComponentKey::from("in"))); + event2.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event2 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event2); drop(tx); @@ -621,6 +670,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event1); // Second event should also get passed through even though the string @@ -629,6 +683,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event2.set_source_id(Arc::new(ComponentKey::from("in"))); + event2.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event2 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event2); drop(tx); @@ -667,6 +726,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event1.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event1 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event1); // Second event should also get passed through as null is different than @@ -675,6 +739,11 @@ mod tests { let new_event = out.recv().await.unwrap(); event2.set_source_id(Arc::new(ComponentKey::from("in"))); + event2.set_upstream_id(Arc::new(OutputId::from("transform"))); + // the schema definition is copied from the source for dedupe + event2 + .metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); assert_eq!(new_event, event2); drop(tx); diff --git a/src/transforms/filter.rs b/src/transforms/filter.rs index 95e8877bee255..e14f0c7347ab7 100644 --- a/src/transforms/filter.rs +++ b/src/transforms/filter.rs @@ -104,6 +104,7 @@ mod test { use vector_core::event::{Metric, MetricKind, MetricValue}; use super::*; + use crate::config::schema::Definition; use crate::{ conditions::ConditionConfig, event::{Event, LogEvent}, @@ -129,6 +130,10 @@ mod test { tx.send(log.clone()).await.unwrap(); log.set_source_id(Arc::new(ComponentKey::from("in"))); + log.set_upstream_id(Arc::new(OutputId::from("transform"))); + log.metadata_mut() + .set_schema_definition(&Arc::new(Definition::default_legacy_namespace())); + assert_eq!(out.recv().await.unwrap(), log); let metric = Event::from(Metric::new( diff --git a/src/transforms/log_to_metric.rs b/src/transforms/log_to_metric.rs index ad44b0a9e6d55..cb99cb186de8b 100644 --- a/src/transforms/log_to_metric.rs +++ b/src/transforms/log_to_metric.rs @@ -1,3 +1,4 @@ +use std::sync::Arc; use std::{collections::HashMap, num::ParseFloatError}; use chrono::Utc; @@ -5,6 +6,7 @@ use indexmap::IndexMap; use vector_config::configurable_component; use vector_core::config::LogNamespace; +use crate::config::schema::Definition; use crate::{ config::{ DataType, GenerateConfig, Input, OutputId, TransformConfig, TransformContext, @@ -256,7 +258,10 @@ fn to_metric(config: &MetricConfig, event: &Event) -> Result Vec { let log_namespace = global_log_namespace.merge(self.log_namespace); - let mut schema_definition = - Definition::default_for_namespace(&BTreeSet::from([log_namespace])) - .with_event_field(&owned_value_path!("name"), Kind::bytes(), None) - .with_event_field( - &owned_value_path!("namespace"), - Kind::bytes().or_undefined(), - None, - ) - .with_event_field( - &owned_value_path!("tags"), - Kind::object(Collection::empty().with_unknown(Kind::bytes())).or_undefined(), - None, - ) - .with_event_field(&owned_value_path!("kind"), Kind::bytes(), None) - .with_event_field( - &owned_value_path!("counter"), - Kind::object(Collection::empty().with_known("value", Kind::float())) - .or_undefined(), - None, - ) - .with_event_field( - &owned_value_path!("gauge"), - Kind::object(Collection::empty().with_known("value", Kind::float())) - .or_undefined(), - None, - ) - .with_event_field( - &owned_value_path!("set"), - Kind::object(Collection::empty().with_known( - "values", - Kind::array(Collection::empty().with_unknown(Kind::bytes())), - )) - .or_undefined(), - None, - ) - .with_event_field( - &owned_value_path!("distribution"), - Kind::object( - Collection::empty() - .with_known( - "samples", - Kind::array( - Collection::empty().with_unknown(Kind::object( - Collection::empty() - .with_known("value", Kind::float()) - .with_known("rate", Kind::integer()), - )), - ), - ) - .with_known("statistic", Kind::bytes()), - ) - .or_undefined(), - None, - ) - .with_event_field( - &owned_value_path!("aggregated_histogram"), - Kind::object( - Collection::empty() - .with_known( - "buckets", - Kind::array( - Collection::empty().with_unknown(Kind::object( - Collection::empty() - .with_known("upper_limit", Kind::float()) - .with_known("count", Kind::integer()), - )), - ), - ) - .with_known("count", Kind::integer()) - .with_known("sum", Kind::float()), - ) - .or_undefined(), - None, - ) - .with_event_field( - &owned_value_path!("aggregated_summary"), - Kind::object( - Collection::empty() - .with_known( - "quantiles", - Kind::array( - Collection::empty().with_unknown(Kind::object( - Collection::empty() - .with_known("quantile", Kind::float()) - .with_known("value", Kind::float()), - )), - ), - ) - .with_known("count", Kind::integer()) - .with_known("sum", Kind::float()), - ) - .or_undefined(), - None, - ) - .with_event_field( - &owned_value_path!("sketch"), - Kind::any().or_undefined(), - None, - ); - - match log_namespace { - LogNamespace::Vector => { - // from serializing the Metric (Legacy moves it to another field) - schema_definition = schema_definition.with_event_field( - &owned_value_path!("timestamp"), - Kind::bytes().or_undefined(), - None, - ); - - // This is added as a "marker" field to determine which namespace is being used at runtime. - // This is normally handled automatically by sources, but this is a special case. - schema_definition = schema_definition.with_metadata_field( - &owned_value_path!("vector"), - Kind::object(Collection::empty()), - None, - ); - } - LogNamespace::Legacy => { - if let Some(timestamp_key) = log_schema().timestamp_key() { - schema_definition = - schema_definition.with_event_field(timestamp_key, Kind::timestamp(), None); - } - - schema_definition = schema_definition.with_event_field( - &parse_value_path(log_schema().host_key()).expect("valid host key"), - Kind::bytes().or_undefined(), - None, - ); - } - } + let schema_definition = schema_definition(log_namespace); vec![TransformOutput::new( DataType::Log, @@ -249,6 +120,137 @@ impl TransformConfig for MetricToLogConfig { } } +fn schema_definition(log_namespace: LogNamespace) -> Definition { + let mut schema_definition = Definition::default_for_namespace(&BTreeSet::from([log_namespace])) + .with_event_field(&owned_value_path!("name"), Kind::bytes(), None) + .with_event_field( + &owned_value_path!("namespace"), + Kind::bytes().or_undefined(), + None, + ) + .with_event_field( + &owned_value_path!("tags"), + Kind::object(Collection::empty().with_unknown(Kind::bytes())).or_undefined(), + None, + ) + .with_event_field(&owned_value_path!("kind"), Kind::bytes(), None) + .with_event_field( + &owned_value_path!("counter"), + Kind::object(Collection::empty().with_known("value", Kind::float())).or_undefined(), + None, + ) + .with_event_field( + &owned_value_path!("gauge"), + Kind::object(Collection::empty().with_known("value", Kind::float())).or_undefined(), + None, + ) + .with_event_field( + &owned_value_path!("set"), + Kind::object(Collection::empty().with_known( + "values", + Kind::array(Collection::empty().with_unknown(Kind::bytes())), + )) + .or_undefined(), + None, + ) + .with_event_field( + &owned_value_path!("distribution"), + Kind::object( + Collection::empty() + .with_known( + "samples", + Kind::array( + Collection::empty().with_unknown(Kind::object( + Collection::empty() + .with_known("value", Kind::float()) + .with_known("rate", Kind::integer()), + )), + ), + ) + .with_known("statistic", Kind::bytes()), + ) + .or_undefined(), + None, + ) + .with_event_field( + &owned_value_path!("aggregated_histogram"), + Kind::object( + Collection::empty() + .with_known( + "buckets", + Kind::array( + Collection::empty().with_unknown(Kind::object( + Collection::empty() + .with_known("upper_limit", Kind::float()) + .with_known("count", Kind::integer()), + )), + ), + ) + .with_known("count", Kind::integer()) + .with_known("sum", Kind::float()), + ) + .or_undefined(), + None, + ) + .with_event_field( + &owned_value_path!("aggregated_summary"), + Kind::object( + Collection::empty() + .with_known( + "quantiles", + Kind::array( + Collection::empty().with_unknown(Kind::object( + Collection::empty() + .with_known("quantile", Kind::float()) + .with_known("value", Kind::float()), + )), + ), + ) + .with_known("count", Kind::integer()) + .with_known("sum", Kind::float()), + ) + .or_undefined(), + None, + ) + .with_event_field( + &owned_value_path!("sketch"), + Kind::any().or_undefined(), + None, + ); + + match log_namespace { + LogNamespace::Vector => { + // from serializing the Metric (Legacy moves it to another field) + schema_definition = schema_definition.with_event_field( + &owned_value_path!("timestamp"), + Kind::bytes().or_undefined(), + None, + ); + + // This is added as a "marker" field to determine which namespace is being used at runtime. + // This is normally handled automatically by sources, but this is a special case. + schema_definition = schema_definition.with_metadata_field( + &owned_value_path!("vector"), + Kind::object(Collection::empty()), + None, + ); + } + LogNamespace::Legacy => { + if let Some(timestamp_key) = log_schema().timestamp_key() { + schema_definition = + schema_definition.with_event_field(timestamp_key, Kind::timestamp(), None); + } + + schema_definition = schema_definition.with_event_field( + &parse_value_path(log_schema().host_key()).expect("valid host key"), + Kind::bytes().or_undefined(), + None, + ); + } + } + schema_definition +} + #[derive(Clone, Debug)] pub struct MetricToLog { host_tag: String, @@ -412,6 +414,8 @@ mod tests { .with_timestamp(Some(ts())); let mut metadata = counter.metadata().clone(); metadata.set_source_id(Arc::new(ComponentKey::from("in"))); + metadata.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata.set_schema_definition(&Arc::new(schema_definition(LogNamespace::Legacy))); let log = do_transform(counter).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); @@ -440,6 +444,8 @@ mod tests { .with_timestamp(Some(ts())); let mut metadata = gauge.metadata().clone(); metadata.set_source_id(Arc::new(ComponentKey::from("in"))); + metadata.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata.set_schema_definition(&Arc::new(schema_definition(LogNamespace::Legacy))); let log = do_transform(gauge).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); @@ -468,6 +474,8 @@ mod tests { .with_timestamp(Some(ts())); let mut metadata = set.metadata().clone(); metadata.set_source_id(Arc::new(ComponentKey::from("in"))); + metadata.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata.set_schema_definition(&Arc::new(schema_definition(LogNamespace::Legacy))); let log = do_transform(set).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); @@ -498,6 +506,8 @@ mod tests { .with_timestamp(Some(ts())); let mut metadata = distro.metadata().clone(); metadata.set_source_id(Arc::new(ComponentKey::from("in"))); + metadata.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata.set_schema_definition(&Arc::new(schema_definition(LogNamespace::Legacy))); let log = do_transform(distro).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); @@ -547,6 +557,8 @@ mod tests { .with_timestamp(Some(ts())); let mut metadata = histo.metadata().clone(); metadata.set_source_id(Arc::new(ComponentKey::from("in"))); + metadata.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata.set_schema_definition(&Arc::new(schema_definition(LogNamespace::Legacy))); let log = do_transform(histo).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); @@ -594,6 +606,8 @@ mod tests { .with_timestamp(Some(ts())); let mut metadata = summary.metadata().clone(); metadata.set_source_id(Arc::new(ComponentKey::from("in"))); + metadata.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata.set_schema_definition(&Arc::new(schema_definition(LogNamespace::Legacy))); let log = do_transform(summary).await.unwrap(); let collected: Vec<_> = log.all_fields().unwrap().collect(); diff --git a/src/transforms/reduce/mod.rs b/src/transforms/reduce/mod.rs index 455a4b142e4d6..90c9294b0cb63 100644 --- a/src/transforms/reduce/mod.rs +++ b/src/transforms/reduce/mod.rs @@ -26,6 +26,7 @@ use crate::{ mod merge_strategy; +use crate::config::schema::Definition; use crate::event::Value; pub use merge_strategy::*; use vector_core::config::LogNamespace; @@ -133,94 +134,101 @@ impl TransformConfig for ReduceConfig { input_definitions: &[(OutputId, schema::Definition)], _: LogNamespace, ) -> Vec { - let mut output_definitions = HashMap::new(); - - for (output, input) in input_definitions { - let mut schema_definition = input.clone(); - - for (key, merge_strategy) in self.merge_strategies.iter() { - let key = if let Ok(key) = parse_target_path(key) { - key - } else { - continue; - }; - - let input_kind = match key.prefix { - PathPrefix::Event => schema_definition.event_kind().at_path(&key.path), - PathPrefix::Metadata => schema_definition.metadata_kind().at_path(&key.path), - }; - - let new_kind = match merge_strategy { - MergeStrategy::Discard | MergeStrategy::Retain => { - /* does not change the type */ - input_kind.clone() + // Events may be combined, so there isn't a true single "source" for events. + // All of the definitions must be merged. + let merged_definition: Definition = input_definitions + .iter() + .map(|(_output, definition)| definition.clone()) + .reduce(Definition::merge) + .unwrap_or_else(Definition::any); + + let mut schema_definition = merged_definition; + + for (key, merge_strategy) in self.merge_strategies.iter() { + let key = if let Ok(key) = parse_target_path(key) { + key + } else { + continue; + }; + + let input_kind = match key.prefix { + PathPrefix::Event => schema_definition.event_kind().at_path(&key.path), + PathPrefix::Metadata => schema_definition.metadata_kind().at_path(&key.path), + }; + + let new_kind = match merge_strategy { + MergeStrategy::Discard | MergeStrategy::Retain => { + /* does not change the type */ + input_kind.clone() + } + MergeStrategy::Sum | MergeStrategy::Max | MergeStrategy::Min => { + // only keeps integer / float values + match (input_kind.contains_integer(), input_kind.contains_float()) { + (true, true) => Kind::float().or_integer(), + (true, false) => Kind::integer(), + (false, true) => Kind::float(), + (false, false) => Kind::undefined(), } - MergeStrategy::Sum | MergeStrategy::Max | MergeStrategy::Min => { - // only keeps integer / float values - match (input_kind.contains_integer(), input_kind.contains_float()) { - (true, true) => Kind::float().or_integer(), - (true, false) => Kind::integer(), - (false, true) => Kind::float(), - (false, false) => Kind::undefined(), - } + } + MergeStrategy::Array => { + let unknown_kind = input_kind.clone(); + Kind::array(Collection::empty().with_unknown(unknown_kind)) + } + MergeStrategy::Concat => { + let mut new_kind = Kind::never(); + + if input_kind.contains_bytes() { + new_kind.add_bytes(); } - MergeStrategy::Array => { - let unknown_kind = input_kind.clone(); - Kind::array(Collection::empty().with_unknown(unknown_kind)) + if let Some(array) = input_kind.as_array() { + // array elements can be either any type that the field can be, or any + // element of the array + let array_elements = array.reduced_kind().union(input_kind.without_array()); + new_kind.add_array(Collection::empty().with_unknown(array_elements)); } - MergeStrategy::Concat => { - let mut new_kind = Kind::never(); - - if input_kind.contains_bytes() { - new_kind.add_bytes(); - } - if let Some(array) = input_kind.as_array() { - // array elements can be either any type that the field can be, or any - // element of the array - let array_elements = - array.reduced_kind().union(input_kind.without_array()); - new_kind.add_array(Collection::empty().with_unknown(array_elements)); - } - new_kind + new_kind + } + MergeStrategy::ConcatNewline | MergeStrategy::ConcatRaw => { + // can only produce bytes (or undefined) + if input_kind.contains_bytes() { + Kind::bytes() + } else { + Kind::undefined() } - MergeStrategy::ConcatNewline | MergeStrategy::ConcatRaw => { - // can only produce bytes (or undefined) - if input_kind.contains_bytes() { - Kind::bytes() - } else { - Kind::undefined() - } + } + MergeStrategy::ShortestArray | MergeStrategy::LongestArray => { + if let Some(array) = input_kind.as_array() { + Kind::array(array.clone()) + } else { + Kind::undefined() } - MergeStrategy::ShortestArray | MergeStrategy::LongestArray => { - if let Some(array) = input_kind.as_array() { - Kind::array(array.clone()) - } else { - Kind::undefined() - } + } + MergeStrategy::FlatUnique => { + let mut array_elements = input_kind.without_array().without_object(); + if let Some(array) = input_kind.as_array() { + array_elements = array_elements.union(array.reduced_kind()); } - MergeStrategy::FlatUnique => { - let mut array_elements = input_kind.without_array().without_object(); - if let Some(array) = input_kind.as_array() { - array_elements = array_elements.union(array.reduced_kind()); - } - if let Some(object) = input_kind.as_object() { - array_elements = array_elements.union(object.reduced_kind()); - } - Kind::array(Collection::empty().with_unknown(array_elements)) + if let Some(object) = input_kind.as_object() { + array_elements = array_elements.union(object.reduced_kind()); } - }; + Kind::array(Collection::empty().with_unknown(array_elements)) + } + }; - // all of the merge strategies are optional. They won't produce a value unless a value actually exists - let new_kind = if input_kind.contains_undefined() { - new_kind.or_undefined() - } else { - new_kind - }; + // all of the merge strategies are optional. They won't produce a value unless a value actually exists + let new_kind = if input_kind.contains_undefined() { + new_kind.or_undefined() + } else { + new_kind + }; - schema_definition = schema_definition.with_field(&key, new_kind, None); - } + schema_definition = schema_definition.with_field(&key, new_kind, None); + } - output_definitions.insert(output.clone(), schema_definition); + // the same schema definition is used for all inputs + let mut output_definitions = HashMap::new(); + for (output, _input) in input_definitions { + output_definitions.insert(output.clone(), schema_definition.clone()); } vec![TransformOutput::new(DataType::Log, output_definitions)] @@ -474,12 +482,15 @@ impl TaskTransform for Reduce { #[cfg(test)] mod test { + use enrichment::TableRegistry; use serde_json::json; + use std::sync::Arc; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use vrl::value::Kind; use super::*; + use crate::config::schema::Definition; use crate::event::{LogEvent, Value}; use crate::test_util::components::assert_transform_compliance; use crate::transforms::test::create_topology; @@ -528,18 +539,33 @@ group_by = [ "request_id" ] .schema_definitions(true) .clone(); + let new_schema_definition = reduce_config.outputs( + TableRegistry::default(), + &[(OutputId::from("in"), Definition::default_legacy_namespace())], + LogNamespace::Legacy, + )[0] + .clone() + .log_schema_definitions + .get(&OutputId::from("in")) + .unwrap() + .clone(); + let (tx, rx) = mpsc::channel(1); let (topology, mut out) = create_topology(ReceiverStream::new(rx), reduce_config).await; let mut e_1 = LogEvent::from("test message 1"); e_1.insert("counter", 1); e_1.insert("request_id", "1"); - let metadata_1 = e_1.metadata().clone(); + let mut metadata_1 = e_1.metadata().clone(); + metadata_1.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata_1.set_schema_definition(&Arc::new(new_schema_definition.clone())); let mut e_2 = LogEvent::from("test message 2"); e_2.insert("counter", 2); e_2.insert("request_id", "2"); - let metadata_2 = e_2.metadata().clone(); + let mut metadata_2 = e_2.metadata().clone(); + metadata_2.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata_2.set_schema_definition(&Arc::new(new_schema_definition.clone())); let mut e_3 = LogEvent::from("test message 3"); e_3.insert("counter", 3); @@ -603,6 +629,18 @@ merge_strategies.baz = "max" assert_transform_compliance(async move { let (tx, rx) = mpsc::channel(1); + + let new_schema_definition = reduce_config.outputs( + TableRegistry::default(), + &[(OutputId::from("in"), Definition::default_legacy_namespace())], + LogNamespace::Legacy, + )[0] + .clone() + .log_schema_definitions + .get(&OutputId::from("in")) + .unwrap() + .clone(); + let (topology, mut out) = create_topology(ReceiverStream::new(rx), reduce_config).await; let mut e_1 = LogEvent::from("test message 1"); @@ -610,7 +648,9 @@ merge_strategies.baz = "max" e_1.insert("bar", "first bar"); e_1.insert("baz", 2); e_1.insert("request_id", "1"); - let metadata = e_1.metadata().clone(); + let mut metadata = e_1.metadata().clone(); + metadata.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata.set_schema_definition(&Arc::new(new_schema_definition.clone())); tx.send(e_1.into()).await.unwrap(); let mut e_2 = LogEvent::from("test message 2"); @@ -660,17 +700,32 @@ group_by = [ "request_id" ] assert_transform_compliance(async move { let (tx, rx) = mpsc::channel(1); + let new_schema_definition = reduce_config.outputs( + TableRegistry::default(), + &[(OutputId::from("in"), Definition::default_legacy_namespace())], + LogNamespace::Legacy, + )[0] + .clone() + .log_schema_definitions + .get(&OutputId::from("in")) + .unwrap() + .clone(); + let (topology, mut out) = create_topology(ReceiverStream::new(rx), reduce_config).await; let mut e_1 = LogEvent::from("test message 1"); e_1.insert("counter", 1); e_1.insert("request_id", "1"); - let metadata_1 = e_1.metadata().clone(); + let mut metadata_1 = e_1.metadata().clone(); + metadata_1.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata_1.set_schema_definition(&Arc::new(new_schema_definition.clone())); tx.send(e_1.into()).await.unwrap(); let mut e_2 = LogEvent::from("test message 2"); e_2.insert("counter", 2); - let metadata_2 = e_2.metadata().clone(); + let mut metadata_2 = e_2.metadata().clone(); + metadata_2.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata_2.set_schema_definition(&Arc::new(new_schema_definition)); tx.send(e_2.into()).await.unwrap(); let mut e_3 = LogEvent::from("test message 3"); @@ -852,20 +907,37 @@ merge_strategies.bar = "concat" assert_transform_compliance(async move { let (tx, rx) = mpsc::channel(1); + + let new_schema_definition = reduce_config.outputs( + TableRegistry::default(), + &[(OutputId::from("in"), Definition::default_legacy_namespace())], + LogNamespace::Legacy, + )[0] + .clone() + .log_schema_definitions + .get(&OutputId::from("in")) + .unwrap() + .clone(); + let (topology, mut out) = create_topology(ReceiverStream::new(rx), reduce_config).await; let mut e_1 = LogEvent::from("test message 1"); e_1.insert("foo", json!([1, 3])); e_1.insert("bar", json!([1, 3])); e_1.insert("request_id", "1"); - let metadata_1 = e_1.metadata().clone(); + let mut metadata_1 = e_1.metadata().clone(); + metadata_1.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata_1.set_schema_definition(&Arc::new(new_schema_definition.clone())); + tx.send(e_1.into()).await.unwrap(); let mut e_2 = LogEvent::from("test message 2"); e_2.insert("foo", json!([2, 4])); e_2.insert("bar", json!([2, 4])); e_2.insert("request_id", "2"); - let metadata_2 = e_2.metadata().clone(); + let mut metadata_2 = e_2.metadata().clone(); + metadata_2.set_upstream_id(Arc::new(OutputId::from("transform"))); + metadata_2.set_schema_definition(&Arc::new(new_schema_definition)); tx.send(e_2.into()).await.unwrap(); let mut e_3 = LogEvent::from("test message 3"); diff --git a/src/transforms/remap.rs b/src/transforms/remap.rs index a6b01dbc8844d..0cbd2af1c119e 100644 --- a/src/transforms/remap.rs +++ b/src/transforms/remap.rs @@ -1,5 +1,4 @@ use std::collections::HashMap; -use std::sync::Arc; use std::{ collections::BTreeMap, fs::File, @@ -376,8 +375,6 @@ where drop_on_error: bool, drop_on_abort: bool, reroute_dropped: bool, - default_schema_definition: Arc, - dropped_schema_definition: Arc, runner: Runner, metric_tag_values: MetricTagValues, } @@ -444,28 +441,6 @@ where program: Program, runner: Runner, ) -> crate::Result { - let default_schema_definition = context - .schema_definitions - .get(&None) - .expect("default schema required") - // TODO we can now have multiple possible definitions. - // This is going to need to be updated to store these possible definitions and then - // choose the correct one based on the input the event has come from. - .iter() - .map(|(_output, definition)| definition.clone()) - .next() - .unwrap_or_else(Definition::any); - - let dropped_schema_definition = context - .schema_definitions - .get(&Some(DROPPED.to_owned())) - .or_else(|| context.schema_definitions.get(&None)) - .expect("dropped schema required") - .iter() - .map(|(_output, definition)| definition.clone()) - .next() - .unwrap_or_else(Definition::any); - Ok(Remap { component_key: context.key.clone(), program, @@ -475,8 +450,6 @@ where drop_on_error: config.drop_on_error, drop_on_abort: config.drop_on_abort, reroute_dropped: config.reroute_dropped, - default_schema_definition: Arc::new(default_schema_definition), - dropped_schema_definition: Arc::new(dropped_schema_definition), runner, metric_tag_values: config.metric_tag_values, }) @@ -587,13 +560,11 @@ where match result { Ok(_) => match target.into_events() { - TargetEvents::One(event) => { - push_default(event, output, &self.default_schema_definition) + TargetEvents::One(event) => push_default(event, output), + TargetEvents::Logs(events) => events.for_each(|event| push_default(event, output)), + TargetEvents::Traces(events) => { + events.for_each(|event| push_default(event, output)) } - TargetEvents::Logs(events) => events - .for_each(|event| push_default(event, output, &self.default_schema_definition)), - TargetEvents::Traces(events) => events - .for_each(|event| push_default(event, output, &self.default_schema_definition)), }, Err(reason) => { let (reason, error, drop) = match reason { @@ -617,12 +588,12 @@ where if !drop { let event = original_event.expect("event will be set"); - push_default(event, output, &self.default_schema_definition); + push_default(event, output); } else if self.reroute_dropped { let mut event = original_event.expect("event will be set"); self.annotate_dropped(&mut event, reason, error); - push_dropped(event, output, &self.dropped_schema_definition); + push_dropped(event, output); } } } @@ -630,29 +601,13 @@ where } #[inline] -fn push_default( - mut event: Event, - output: &mut TransformOutputsBuf, - schema_definition: &Arc, -) { - event - .metadata_mut() - .set_schema_definition(schema_definition); - - output.push(event) +fn push_default(event: Event, output: &mut TransformOutputsBuf) { + output.push(None, event) } #[inline] -fn push_dropped( - mut event: Event, - output: &mut TransformOutputsBuf, - schema_definition: &Arc, -) { - event - .metadata_mut() - .set_schema_definition(schema_definition); - - output.push_named(DROPPED, event) +fn push_dropped(event: Event, output: &mut TransformOutputsBuf) { + output.push(Some(DROPPED), event); } /// If the VRL returns a value that is not an array (see [`merge_array_definitions`]), @@ -721,6 +676,7 @@ pub enum BuildError { #[cfg(test)] mod tests { use std::collections::{HashMap, HashSet}; + use std::sync::Arc; use indoc::{formatdoc, indoc}; use vector_core::{config::GlobalOptions, event::EventMetadata, metric_tags}; @@ -841,10 +797,6 @@ mod tests { let result1 = transform_one(&mut tform, event1).unwrap(); assert_eq!(get_field_string(&result1, "message"), "event1"); assert_eq!(get_field_string(&result1, "foo"), "bar"); - assert_eq!( - result1.metadata().schema_definition(), - &test_default_schema_definition() - ); assert!(tform.runner().runtime.is_empty()); let event2 = { @@ -854,10 +806,6 @@ mod tests { let result2 = transform_one(&mut tform, event2).unwrap(); assert_eq!(get_field_string(&result2, "message"), "event2"); assert_eq!(result2.as_log().get("foo"), Some(&Value::Null)); - assert_eq!( - result2.metadata().schema_definition(), - &test_default_schema_definition() - ); assert!(tform.runner().runtime.is_empty()); } @@ -889,11 +837,6 @@ mod tests { assert_eq!(get_field_string(&result, "foo"), "bar"); assert_eq!(get_field_string(&result, "bar"), "baz"); assert_eq!(get_field_string(&result, "copy"), "buz"); - - assert_eq!( - result.metadata().schema_definition(), - &test_default_schema_definition() - ); } #[test] @@ -927,17 +870,8 @@ mod tests { let r = result.next().unwrap(); assert_eq!(get_field_string(&r, "message"), "foo"); - assert_eq!( - r.metadata().schema_definition(), - &test_default_schema_definition() - ); let r = result.next().unwrap(); assert_eq!(get_field_string(&r, "message"), "bar"); - - assert_eq!( - r.metadata().schema_definition(), - &test_default_schema_definition() - ); } #[test] @@ -1103,7 +1037,9 @@ mod tests { "zork", MetricKind::Incremental, MetricValue::Counter { value: 1.0 }, - metadata.with_schema_definition(&Arc::new(test_default_schema_definition())), + // The schema definition is set in the topology, which isn't used in this test. Setting the definition + // to the actual value to skip the assertion here + metadata ) .with_namespace(Some("zerk")) .with_tags(Some(metric_tags! { @@ -1313,8 +1249,11 @@ mod tests { "counter", MetricKind::Absolute, MetricValue::Counter { value: 1.0 }, - EventMetadata::default() - .with_schema_definition(&Arc::new(test_default_schema_definition())), + // The schema definition is set in the topology, which isn't used in this test. Setting the definition + // to the actual value to skip the assertion here + EventMetadata::default().with_schema_definition(&Arc::new( + output.metadata().schema_definition().clone() + )), ) .with_tags(Some(metric_tags! { "hello" => "world", @@ -1331,8 +1270,11 @@ mod tests { "counter", MetricKind::Absolute, MetricValue::Counter { value: 1.0 }, - EventMetadata::default() - .with_schema_definition(&Arc::new(test_dropped_schema_definition())), + // The schema definition is set in the topology, which isn't used in this test. Setting the definition + // to the actual value to skip the assertion here + EventMetadata::default().with_schema_definition(&Arc::new( + output.metadata().schema_definition().clone() + )), ) .with_tags(Some(metric_tags! { "hello" => "goodbye", @@ -1352,8 +1294,11 @@ mod tests { "counter", MetricKind::Absolute, MetricValue::Counter { value: 1.0 }, - EventMetadata::default() - .with_schema_definition(&Arc::new(test_dropped_schema_definition())), + // The schema definition is set in the topology, which isn't used in this test. Setting the definition + // to the actual value to skip the assertion here + EventMetadata::default().with_schema_definition(&Arc::new( + output.metadata().schema_definition().clone() + )), ) .with_tags(Some(metric_tags! { "not_hello" => "oops", diff --git a/src/transforms/route.rs b/src/transforms/route.rs index adcac43ff504c..e410277914a8f 100644 --- a/src/transforms/route.rs +++ b/src/transforms/route.rs @@ -42,13 +42,13 @@ impl SyncTransform for Route { for (output_name, condition) in &self.conditions { let (result, event) = condition.check(event.clone()); if result { - output.push_named(output_name, event); + output.push(Some(output_name), event); } else { check_failed += 1; } } if check_failed == self.conditions.len() { - output.push_named(UNMATCHED_ROUTE, event); + output.push(Some(UNMATCHED_ROUTE), event); } } } diff --git a/src/transforms/tag_cardinality_limit/tests.rs b/src/transforms/tag_cardinality_limit/tests.rs index 8488658e8ea55..5753d0176dd3b 100644 --- a/src/transforms/tag_cardinality_limit/tests.rs +++ b/src/transforms/tag_cardinality_limit/tests.rs @@ -1,9 +1,12 @@ use std::sync::Arc; use vector_common::config::ComponentKey; +use vector_core::config::OutputId; use vector_core::metric_tags; use super::*; +use crate::config::schema::Definition; +use crate::config::LogNamespace; use crate::event::metric::TagValue; use crate::event::{metric, Event, Metric, MetricTags}; use crate::test_util::components::assert_transform_compliance; @@ -13,6 +16,7 @@ use crate::transforms::tag_cardinality_limit::config::{ use crate::transforms::test::create_topology; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; +use vrl::compiler::prelude::Kind; #[test] fn generate_config() { @@ -88,6 +92,16 @@ async fn drop_event(config: TagCardinalityLimitConfig) { event1.set_source_id(Arc::new(ComponentKey::from("in"))); event2.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + event2.set_upstream_id(Arc::new(OutputId::from("transform"))); + + event1.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + event2.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + assert_eq!(new_event1, Some(event1)); assert_eq!(new_event2, Some(event2)); // Third value rejected since value_limit is 2. @@ -135,6 +149,20 @@ async fn drop_tag(config: TagCardinalityLimitConfig) { event2.set_source_id(Arc::new(ComponentKey::from("in"))); event3.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + event2.set_upstream_id(Arc::new(OutputId::from("transform"))); + event3.set_upstream_id(Arc::new(OutputId::from("transform"))); + + event1.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + event2.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + event3.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + assert_eq!(new_event1, Some(event1)); assert_eq!(new_event2, Some(event2)); // The third event should have been modified to remove "tag1" @@ -207,6 +235,21 @@ async fn drop_tag_multi_value(config: TagCardinalityLimitConfig) { event2.set_source_id(Arc::new(ComponentKey::from("in"))); event3.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + event2.set_upstream_id(Arc::new(OutputId::from("transform"))); + event3.set_upstream_id(Arc::new(OutputId::from("transform"))); + + // definitions aren't valid for metrics yet, it's just set to the default (anything). + event1.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + event2.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + event3.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + drop(tx); topology.stop().await; @@ -257,6 +300,21 @@ async fn separate_value_limit_per_tag(config: TagCardinalityLimitConfig) { event2.set_source_id(Arc::new(ComponentKey::from("in"))); event3.set_source_id(Arc::new(ComponentKey::from("in"))); + event1.set_upstream_id(Arc::new(OutputId::from("transform"))); + event2.set_upstream_id(Arc::new(OutputId::from("transform"))); + event3.set_upstream_id(Arc::new(OutputId::from("transform"))); + + // definitions aren't valid for metrics yet, it's just set to the default (anything). + event1.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + event2.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + event3.metadata_mut().set_schema_definition(&Arc::new( + Definition::new_with_default_metadata(Kind::any_object(), [LogNamespace::Legacy]), + )); + assert_eq!(new_event1, Some(event1)); assert_eq!(new_event2, Some(event2)); assert_eq!(new_event3, Some(event3)); From d8d57e55c0c51c6fdb8c41f2fa48b0876ef4d356 Mon Sep 17 00:00:00 2001 From: Spencer Gilbert Date: Thu, 29 Jun 2023 14:17:44 -0400 Subject: [PATCH 210/236] chore: Revert all submodule introductions to fix CI (#17800) - Revert "chore: Fix publish workflow for older OS images (#17787)" - Revert "chore: Add submodules to all checkouts (#17770)" - Revert "chore: Download submodules in the CI checkouts (#17760)" --- .github/audit.yml | 4 +--- .github/workflows/changes.yml | 4 ---- .github/workflows/cli.yml | 3 --- .github/workflows/compilation-timings.yml | 10 --------- .github/workflows/component_features.yml | 3 --- .github/workflows/cross.yml | 3 --- .github/workflows/environment.yml | 3 --- .../gardener_remove_waiting_author.yml | 2 -- .github/workflows/install-sh.yml | 3 --- .github/workflows/integration-test.yml | 3 --- .github/workflows/k8s_e2e.yml | 6 ------ .github/workflows/misc.yml | 3 --- .github/workflows/msrv.yml | 2 -- .github/workflows/publish.yml | 21 ------------------- .github/workflows/regression.yml | 13 ------------ .github/workflows/test.yml | 1 - .github/workflows/unit_mac.yml | 3 --- .github/workflows/unit_windows.yml | 3 --- 18 files changed, 1 insertion(+), 89 deletions(-) diff --git a/.github/audit.yml b/.github/audit.yml index 6a3007248db03..83db5d60151bd 100644 --- a/.github/audit.yml +++ b/.github/audit.yml @@ -11,9 +11,7 @@ jobs: security_audit: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v3 - with: - submodules: "recursive" + - uses: actions/checkout@v1 - uses: actions-rs/audit-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/changes.yml b/.github/workflows/changes.yml index 4b6697fed8726..db6c4225073e0 100644 --- a/.github/workflows/changes.yml +++ b/.github/workflows/changes.yml @@ -119,8 +119,6 @@ jobs: k8s: ${{ steps.filter.outputs.k8s }} steps: - uses: actions/checkout@v3 - with: - submodules: "recursive" - uses: dorny/paths-filter@v2 id: filter @@ -214,8 +212,6 @@ jobs: webhdfs: ${{ steps.filter.outputs.webhdfs }} steps: - uses: actions/checkout@v3 - with: - submodules: "recursive" # creates a yaml file that contains the filters for each integration, # extracted from the output of the `vdev int ci-paths` command, which diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 2801cc78bb945..9d22fda8527bb 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -28,13 +28,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - name: Cache Cargo registry + index uses: actions/cache@v3 diff --git a/.github/workflows/compilation-timings.yml b/.github/workflows/compilation-timings.yml index 94616e04af086..e96bea65ea946 100644 --- a/.github/workflows/compilation-timings.yml +++ b/.github/workflows/compilation-timings.yml @@ -17,8 +17,6 @@ jobs: steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 - with: - submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: cargo clean @@ -35,8 +33,6 @@ jobs: steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 - with: - submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: cargo clean @@ -48,8 +44,6 @@ jobs: steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 - with: - submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: cargo clean @@ -61,8 +55,6 @@ jobs: steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 - with: - submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: cargo clean @@ -76,8 +68,6 @@ jobs: steps: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 - with: - submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh - run: cargo clean diff --git a/.github/workflows/component_features.yml b/.github/workflows/component_features.yml index a888fe6c83d05..2705483f598c4 100644 --- a/.github/workflows/component_features.yml +++ b/.github/workflows/component_features.yml @@ -26,13 +26,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: bash scripts/environment/prepare.sh diff --git a/.github/workflows/cross.yml b/.github/workflows/cross.yml index 869405b5b9076..9d36cb4cb3240 100644 --- a/.github/workflows/cross.yml +++ b/.github/workflows/cross.yml @@ -39,13 +39,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - uses: actions/cache@v3 name: Cache Cargo registry + index diff --git a/.github/workflows/environment.yml b/.github/workflows/environment.yml index 2c17383d169ac..cdddb0a980db8 100644 --- a/.github/workflows/environment.yml +++ b/.github/workflows/environment.yml @@ -34,13 +34,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - name: Set up QEMU uses: docker/setup-qemu-action@v2.2.0 diff --git a/.github/workflows/gardener_remove_waiting_author.yml b/.github/workflows/gardener_remove_waiting_author.yml index 37f6665034a14..9fe063e50b40d 100644 --- a/.github/workflows/gardener_remove_waiting_author.yml +++ b/.github/workflows/gardener_remove_waiting_author.yml @@ -9,8 +9,6 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - with: - submodules: "recursive" - uses: actions-ecosystem/action-remove-labels@v1 with: labels: "meta: awaiting author" diff --git a/.github/workflows/install-sh.yml b/.github/workflows/install-sh.yml index 83d7f9517035e..045319a191642 100644 --- a/.github/workflows/install-sh.yml +++ b/.github/workflows/install-sh.yml @@ -28,13 +28,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - run: pip3 install awscli --upgrade --user - env: diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index ffb741158c87e..4d8d635fcb477 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -54,13 +54,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - run: sudo npm -g install @datadog/datadog-ci diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index 61196de8befa0..50de3f5db9a2c 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -88,13 +88,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - uses: actions/cache@v3 with: @@ -208,13 +205,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/misc.yml b/.github/workflows/misc.yml index 035bbbc92a66e..4af7ab44a3bef 100644 --- a/.github/workflows/misc.yml +++ b/.github/workflows/misc.yml @@ -28,13 +28,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - uses: actions/cache@v3 name: Cache Cargo registry + index diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index c910d2394053d..9a82d1ddecbd0 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -16,8 +16,6 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - with: - submodules: "recursive" - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - run: cargo install cargo-msrv --version 0.15.1 - run: cargo msrv verify diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index e4ea253a558e8..e8cdfee808f6b 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -39,7 +39,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Generate publish metadata id: generate-publish-metadata run: make ci-generate-publish-metadata @@ -57,7 +56,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -83,7 +81,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -109,7 +106,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -137,7 +133,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -165,7 +160,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -193,7 +187,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Bootstrap runner environment (Ubuntu-specific) run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh - name: Bootstrap runner environment (generic) @@ -221,7 +214,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Bootstrap runner environment (macOS-specific) run: bash scripts/environment/bootstrap-macos-10.sh - name: Build Vector @@ -252,7 +244,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Bootstrap runner environment (Windows-specific) run: .\scripts\environment\bootstrap-windows-2019.ps1 - name: Install Wix @@ -320,11 +311,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - # Workaround for older OS images - # https://github.com/actions/checkout/issues/758 - - name: Checkout submodules - run: | - git submodule update --init --recursive - name: Download staged package artifacts (x86_64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: @@ -381,7 +367,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Download staged package artifacts (x86_64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: @@ -409,7 +394,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Download staged package artifacts (x86_64-apple-darwin) uses: actions/download-artifact@v3 with: @@ -440,7 +424,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Login to DockerHub uses: docker/login-action@v2.1.0 with: @@ -516,7 +499,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Download staged package artifacts (aarch64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: @@ -588,7 +570,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Download staged package artifacts (aarch64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: @@ -649,7 +630,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Publish update to Homebrew tap env: GITHUB_TOKEN: ${{ secrets.GH_PACKAGE_PUBLISHER_TOKEN }} @@ -675,7 +655,6 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ inputs.git_ref }} - submodules: "recursive" - name: Download staged package artifacts (aarch64-unknown-linux-gnu) uses: actions/download-artifact@v3 with: diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index f3370293198ef..4d770d496486f 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -48,8 +48,6 @@ jobs: comment_valid: ${{ steps.comment.outputs.isTeamMember }} steps: - uses: actions/checkout@v3 - with: - submodules: "recursive" - name: Collect file changes id: changes @@ -131,7 +129,6 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 1000 - submodules: "recursive" # If triggered by issue comment, the event payload doesn't directly contain the head and base sha from the PR. # But, we can retrieve this info from some commands. @@ -290,14 +287,11 @@ jobs: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 - with: - submodules: "recursive" - uses: actions/checkout@v3 with: ref: ${{ needs.compute-metadata.outputs.baseline-sha }} path: baseline-vector - submodules: "recursive" - name: Set up Docker Buildx id: buildx @@ -330,14 +324,11 @@ jobs: - uses: colpal/actions-clean@v1 - uses: actions/checkout@v3 - with: - submodules: "recursive" - uses: actions/checkout@v3 with: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} path: comparison-vector - submodules: "recursive" - name: Set up Docker Buildx id: buildx @@ -482,7 +473,6 @@ jobs: - uses: actions/checkout@v3 with: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} - submodules: "recursive" - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2.2.0 @@ -602,8 +592,6 @@ jobs: - compute-metadata steps: - uses: actions/checkout@v3 - with: - submodules: "recursive" - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2.2.0 @@ -695,7 +683,6 @@ jobs: - uses: actions/checkout@v3 with: ref: ${{ needs.compute-metadata.outputs.comparison-sha }} - submodules: "recursive" - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v2.2.0 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7947529457c90..ca5184b5041c2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -44,7 +44,6 @@ jobs: with: # check-version needs tags fetch-depth: 0 # fetch everything - submodules: "recursive" - uses: actions/cache@v3 name: Cache Cargo registry + index diff --git a/.github/workflows/unit_mac.yml b/.github/workflows/unit_mac.yml index 22476d63efada..abda6ae1e177f 100644 --- a/.github/workflows/unit_mac.yml +++ b/.github/workflows/unit_mac.yml @@ -32,13 +32,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - uses: actions/cache@v3 name: Cache Cargo registry + index diff --git a/.github/workflows/unit_windows.yml b/.github/workflows/unit_windows.yml index 4d73e8f829a99..479d18938e4ec 100644 --- a/.github/workflows/unit_windows.yml +++ b/.github/workflows/unit_windows.yml @@ -27,13 +27,10 @@ jobs: uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - submodules: "recursive" - name: Checkout branch if: ${{ github.event_name != 'issue_comment' }} uses: actions/checkout@v3 - with: - submodules: "recursive" - run: .\scripts\environment\bootstrap-windows-2019.ps1 - run: make test From a3770d872708d7f28a1f834d31baa142f1f11ea4 Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Thu, 29 Jun 2023 14:25:11 -0400 Subject: [PATCH 211/236] chore(ci): remove /ci-run-install comment trigger (#17803) --- .github/workflows/comment-trigger.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.github/workflows/comment-trigger.yml b/.github/workflows/comment-trigger.yml index 7be887247ce73..84cabd825ccf3 100644 --- a/.github/workflows/comment-trigger.yml +++ b/.github/workflows/comment-trigger.yml @@ -14,7 +14,6 @@ # /ci-run-unit-mac : runs Unit - Mac # /ci-run-unit-windows : runs Unit - Windows # /ci-run-environment : runs Environment Suite -# /ci-run-install : runs Update install.sh Suite # /ci-run-regression : runs Regression Detection Suite name: Comment Trigger @@ -55,7 +54,6 @@ jobs: || contains(github.event.comment.body, '/ci-run-unit-mac') || contains(github.event.comment.body, '/ci-run-unit-windows') || contains(github.event.comment.body, '/ci-run-environment') - || contains(github.event.comment.body, '/ci-run-install') || contains(github.event.comment.body, '/ci-run-regression') ) steps: @@ -113,12 +111,6 @@ jobs: uses: ./.github/workflows/environment.yml secrets: inherit - install: - needs: validate - if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-install') - uses: ./.github/workflows/install-sh.yml - secrets: inherit - regression: needs: validate if: contains(github.event.comment.body, '/ci-run-all') || contains(github.event.comment.body, '/ci-run-regression') From f79947cf0125468b141ff8bc09d1c2bc6366780e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Jun 2023 18:26:22 +0000 Subject: [PATCH 212/236] chore(deps): Bump chrono-tz from 0.8.2 to 0.8.3 (#17789) Bumps [chrono-tz](https://github.com/chronotope/chrono-tz) from 0.8.2 to 0.8.3.
Changelog

Sourced from chrono-tz's changelog.

Chrono-tz Changelog

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=chrono-tz&package-manager=cargo&previous-version=0.8.2&new-version=0.8.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- lib/vector-common/Cargo.toml | 2 +- lib/vector-config/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e96f6160f8dd2..41580f90deaa1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1813,9 +1813,9 @@ dependencies = [ [[package]] name = "chrono-tz" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9cc2b23599e6d7479755f3594285efb3f74a1bdca7a7374948bc831e23a552" +checksum = "f1369bc6b9e9a7dfdae2055f6ec151fe9c554a9d23d357c0237cee2e25eaabb7" dependencies = [ "chrono", "chrono-tz-build", @@ -1825,9 +1825,9 @@ dependencies = [ [[package]] name = "chrono-tz-build" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9998fb9f7e9b2111641485bf8beb32f92945f97f92a3d061f744cfef335f751" +checksum = "e2f5ebdc942f57ed96d560a6d1a459bae5851102a25d5bf89dc04ae453e31ecf" dependencies = [ "parse-zoneinfo", "phf", diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 7ea21e0baac9d..31c6c40a2ac5e 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -43,7 +43,7 @@ tokenize = [ [dependencies] async-stream = "0.3.5" bytes = { version = "1.4.0", default-features = false, optional = true } -chrono-tz = { version = "0.8.2", default-features = false, features = ["serde"] } +chrono-tz = { version = "0.8.3", default-features = false, features = ["serde"] } chrono = { version = "0.4", default-features = false, optional = true, features = ["clock"] } crossbeam-utils = { version = "0.8.16", default-features = false } derivative = { version = "2.2.0", default-features = false } diff --git a/lib/vector-config/Cargo.toml b/lib/vector-config/Cargo.toml index cc3684bc6951e..d843f0edb0a17 100644 --- a/lib/vector-config/Cargo.toml +++ b/lib/vector-config/Cargo.toml @@ -12,7 +12,7 @@ path = "tests/integration/lib.rs" [dependencies] chrono = { version = "0.4.19", default-features = false } -chrono-tz = { version = "0.8.2", default-features = false } +chrono-tz = { version = "0.8.3", default-features = false } encoding_rs = { version = "0.8", default-features = false, features = ["alloc", "serde"] } indexmap = { version = "2.0", default-features = false, features = ["std"] } inventory = { version = "0.3" } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 7317a47e0ea70..7e2b0ff88c137 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -78,7 +78,7 @@ prost-build = "0.11" [dev-dependencies] base64 = "0.21.2" -chrono-tz = { version = "0.8.2", default-features = false } +chrono-tz = { version = "0.8.3", default-features = false } criterion = { version = "0.5.1", features = ["html_reports"] } env-test-util = "1.0.1" quickcheck = "1" From e6e776ddb7c93db243f989413581d9116275939e Mon Sep 17 00:00:00 2001 From: neuronull Date: Thu, 29 Jun 2023 12:30:50 -0600 Subject: [PATCH 213/236] chore(ci): move component features check out of merge queue (#17773) Moves the most costly (both $ and time) status check out of the merge queue into a fixed schedule. The workflow can still be run on demand by a PR comment, or from the GHA UI. --- .github/workflows/component_features.yml | 27 ++++++++++++++++++------ .github/workflows/master_merge_queue.yml | 7 ------ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/.github/workflows/component_features.yml b/.github/workflows/component_features.yml index 2705483f598c4..eec8309e3a0d1 100644 --- a/.github/workflows/component_features.yml +++ b/.github/workflows/component_features.yml @@ -1,20 +1,35 @@ +# Component Features - Linux +# +# Validates that each component feature compiles +# +# Runs on: +# - scheduled UTC midnight Tues-Sat +# - on PR comment (see comment-trigger.yml) +# - on demand from github actions UI + name: Component Features - Linux on: workflow_call: + workflow_dispatch: + schedule: + # At midnight UTC Tue-Sat + - cron: '0 0 * * 2-6' jobs: check-component-features: - runs-on: [linux, ubuntu-20.04-8core] + # use free tier on schedule and 8 core to expedite results on demand invocation + runs-on: ${{ github.event_name == 'schedule' && 'ubuntu-latest' || fromJSON('["linux", "ubuntu-20.04-8core"]') }} + if: github.event_name == 'issue_comment' || github.event_name == 'workflow_dispatch' steps: - name: (PR comment) Get PR branch - if: ${{ github.event_name == 'issue_comment' }} + if: github.event_name == 'issue_comment' uses: xt0rted/pull-request-comment-branch@v2 id: comment-branch - name: (PR comment) Set latest commit status as pending + if: github.event_name == 'issue_comment' uses: myrotvorets/set-commit-status-action@v1.1.7 - if: ${{ github.event_name == 'issue_comment' }} with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} @@ -22,13 +37,13 @@ jobs: status: pending - name: (PR comment) Checkout PR branch - if: ${{ github.event_name == 'issue_comment' }} + if: github.event_name == 'issue_comment' uses: actions/checkout@v3 with: ref: ${{ steps.comment-branch.outputs.head_ref }} - name: Checkout branch - if: ${{ github.event_name != 'issue_comment' }} + if: github.event_name != 'issue_comment' uses: actions/checkout@v3 - run: sudo -E bash scripts/environment/bootstrap-ubuntu-20.04.sh @@ -37,8 +52,8 @@ jobs: - run: make check-component-features - name: (PR comment) Set latest commit status as ${{ job.status }} - uses: myrotvorets/set-commit-status-action@v1.1.7 if: always() && github.event_name == 'issue_comment' + uses: myrotvorets/set-commit-status-action@v1.1.7 with: sha: ${{ steps.comment-branch.outputs.head_sha }} token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/master_merge_queue.yml b/.github/workflows/master_merge_queue.yml index eaf5b7bf08295..63b0a8805a2dd 100644 --- a/.github/workflows/master_merge_queue.yml +++ b/.github/workflows/master_merge_queue.yml @@ -74,12 +74,6 @@ jobs: needs: changes secrets: inherit - check-component-features: - if: needs.changes.outputs.source == 'true' - uses: ./.github/workflows/component_features.yml - needs: changes - secrets: inherit - cross-linux: # We run cross checks when dependencies change to ensure they still build. # This helps us avoid adopting dependencies that aren't compatible with other architectures. @@ -117,7 +111,6 @@ jobs: - test-misc - test-environment - check-msrv - - check-component-features - cross-linux - unit-mac - unit-windows From ed59f37e006d63130413e7a4ed21042f4b90dd0e Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Thu, 29 Jun 2023 12:11:20 -0700 Subject: [PATCH 214/236] chore(ci): Remove remaining Discord notification (#17805) This goes to Slack now. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- .github/workflows/publish.yml | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index e8cdfee808f6b..ae7055dd7cbc4 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -748,32 +748,3 @@ jobs: release: "any-version" republish: "true" file: "target/artifacts/vector-${{ env.VECTOR_VERSION }}-1.armv7.rpm" - - publish-failure: - name: Send Publish Failure Notification - if: failure() && inputs.channel != 'custom' - runs-on: ubuntu-20.04 - needs: - - generate-publish-metadata - - build-x86_64-unknown-linux-gnu-packages - - build-x86_64-unknown-linux-musl-packages - - build-aarch64-unknown-linux-musl-packages - - build-aarch64-unknown-linux-gnu-packages - - build-x86_64-apple-darwin-packages - - build-x86_64-pc-windows-msvc-packages - - build-armv7-unknown-linux-gnueabihf-packages - - build-armv7-unknown-linux-musleabihf-packages - - deb-verify - - rpm-verify - - macos-verify - - publish-docker - - publish-s3 - - publish-cloudsmith - - publish-github - steps: - - name: Send notification to Discord - env: - DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }} - uses: Ilshidur/action-discord@0.3.2 - with: - args: "${{ inputs.channel }} failed: " From 659e1e69f56d32939871bc097c6eeb0b950012db Mon Sep 17 00:00:00 2001 From: Artur Malchanau Date: Thu, 29 Jun 2023 22:47:05 +0300 Subject: [PATCH 215/236] fix(config): Fix preloading log_schema (#17759) Preload global parameter log_schema before compiling sources config. It fixes an issue with globally configured log_schema and sources in separate files. Revert 3e971fb9d9503ddac3a238cd6dc643703e725081 --------- Signed-off-by: Artur Malchanau Co-authored-by: Bruce Guenter --- src/app.rs | 6 ++- src/config/mod.rs | 12 +++++- src/config/unit_test/mod.rs | 15 +------- src/validate.rs | 4 +- tests/integration/shutdown.rs | 72 +++++++++++++++++++++++++++++++++++ 5 files changed, 91 insertions(+), 18 deletions(-) diff --git a/src/app.rs b/src/app.rs index 49d57c0db9523..f306504d211c1 100644 --- a/src/app.rs +++ b/src/app.rs @@ -466,12 +466,14 @@ pub async fn load_configs( paths = ?config_paths.iter().map(<&PathBuf>::from).collect::>() ); + // config::init_log_schema should be called before initializing sources. + #[cfg(not(feature = "enterprise-tests"))] + config::init_log_schema(&config_paths, true).map_err(handle_config_errors)?; + let mut config = config::load_from_paths_with_provider_and_secrets(&config_paths, signal_handler) .await .map_err(handle_config_errors)?; - #[cfg(not(feature = "enterprise-tests"))] - config::init_log_schema(config.global.log_schema.clone(), true); config::init_telemetry(config.global.telemetry.clone(), true); diff --git a/src/config/mod.rs b/src/config/mod.rs index b4591199ef886..40d0733f15cb0 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -63,9 +63,19 @@ pub use unit_test::{build_unit_tests, build_unit_tests_main, UnitTestResult}; pub use validation::warnings; pub use vars::{interpolate, ENVIRONMENT_VARIABLE_INTERPOLATION_REGEX}; pub use vector_core::config::{ - init_log_schema, init_telemetry, log_schema, proxy::ProxyConfig, telemetry, LogSchema, OutputId, + init_telemetry, log_schema, proxy::ProxyConfig, telemetry, LogSchema, OutputId, }; +/// Loads Log Schema from configurations and sets global schema. +/// Once this is done, configurations can be correctly loaded using +/// configured log schema defaults. +/// If deny is set, will panic if schema has already been set. +pub fn init_log_schema(config_paths: &[ConfigPath], deny_if_set: bool) -> Result<(), Vec> { + let (builder, _) = load_builder_from_paths(config_paths)?; + vector_core::config::init_log_schema(builder.global.log_schema, deny_if_set); + Ok(()) +} + #[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq)] pub enum ConfigPath { File(PathBuf, FormatHint), diff --git a/src/config/unit_test/mod.rs b/src/config/unit_test/mod.rs index 62b9dabcd70ad..3bfd52ad4b7ee 100644 --- a/src/config/unit_test/mod.rs +++ b/src/config/unit_test/mod.rs @@ -72,24 +72,11 @@ impl UnitTest { } } -/// Loads Log Schema from configurations and sets global schema. -/// Once this is done, configurations can be correctly loaded using -/// configured log schema defaults. -/// If deny is set, will panic if schema has already been set. -fn init_log_schema_from_paths( - config_paths: &[ConfigPath], - deny_if_set: bool, -) -> Result<(), Vec> { - let (builder, _) = config::loading::load_builder_from_paths(config_paths)?; - vector_core::config::init_log_schema(builder.global.log_schema, deny_if_set); - Ok(()) -} - pub async fn build_unit_tests_main( paths: &[ConfigPath], signal_handler: &mut signal::SignalHandler, ) -> Result, Vec> { - init_log_schema_from_paths(paths, false)?; + config::init_log_schema(paths, false)?; let (mut secrets_backends_loader, _) = loading::load_secret_backends_from_paths(paths)?; let (config_builder, _) = if secrets_backends_loader.has_secrets_to_retrieve() { let resolved_secrets = secrets_backends_loader diff --git a/src/validate.rs b/src/validate.rs index f11d41d1845f8..0803e11423a2a 100644 --- a/src/validate.rs +++ b/src/validate.rs @@ -135,10 +135,12 @@ pub fn validate_config(opts: &Opts, fmt: &mut Formatter) -> Option { fmt.title(format!("Failed to load {:?}", &paths_list)); fmt.sub_error(errors); }; + config::init_log_schema(&paths, true) + .map_err(&mut report_error) + .ok()?; let (builder, load_warnings) = config::load_builder_from_paths(&paths) .map_err(&mut report_error) .ok()?; - config::init_log_schema(builder.global.log_schema.clone(), true); // Build let (config, build_warnings) = builder diff --git a/tests/integration/shutdown.rs b/tests/integration/shutdown.rs index 1df88120bfb99..e4f0f75fa7739 100644 --- a/tests/integration/shutdown.rs +++ b/tests/integration/shutdown.rs @@ -1,4 +1,5 @@ use std::{ + fs::create_dir, fs::read_dir, io::Write, net::SocketAddr, @@ -200,6 +201,77 @@ fn log_schema() { assert_eq!(event["test_msg"], json!("42")); } +#[test] +fn log_schema_multiple_config_files() { + // Vector command + let mut cmd = Command::cargo_bin("vector").unwrap(); + + let config_dir = create_directory(); + + let sinks_config_dir = config_dir.join("sinks"); + create_dir(sinks_config_dir.clone()).unwrap(); + + let sources_config_dir = config_dir.join("sources"); + create_dir(sources_config_dir.clone()).unwrap(); + + let input_dir = create_directory(); + let input_file = input_dir.join("input_file"); + + overwrite_file( + config_dir.join("vector.toml"), + r#" + data_dir = "${VECTOR_DATA_DIR}" + log_schema.host_key = "test_host" + "#, + ); + + overwrite_file( + sources_config_dir.join("in_file.toml"), + r#" + type = "file" + include = ["${VECTOR_TEST_INPUT_FILE}"] + "#, + ); + + overwrite_file( + sinks_config_dir.join("out_console.toml"), + r#" + inputs = ["in_file"] + type = "console" + encoding.codec = "json" + "#, + ); + + overwrite_file( + input_file.clone(), + r#"42 + "#, + ); + + cmd.arg("--quiet") + .env("VECTOR_CONFIG_DIR", config_dir) + .env("VECTOR_DATA_DIR", create_directory()) + .env("VECTOR_TEST_INPUT_FILE", input_file.clone()); + + // Run vector + let vector = cmd.stdout(std::process::Stdio::piped()).spawn().unwrap(); + + // Give vector time to start. + sleep(STARTUP_TIME); + + // Signal shutdown + kill(Pid::from_raw(vector.id() as i32), Signal::SIGTERM).unwrap(); + + // Wait for shutdown + let output = vector.wait_with_output().unwrap(); + assert!(output.status.success(), "Vector didn't exit successfully."); + + // Output + let event: Value = serde_json::from_slice(output.stdout.as_slice()).unwrap(); + assert_eq!(event["message"], json!("42")); + assert_eq!(event["test_host"], json!("runner")); +} + #[test] fn configuration_path_recomputed() { // Directory with configuration files From e66e285cbd944bcb65b1262fb91bc1913b1885a6 Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Thu, 29 Jun 2023 14:10:41 -0600 Subject: [PATCH 216/236] chore(sinks): Drop the custom `SinkContext::default` implementation (#17804) The `fn SinkContext::new_test` is actually just `SinkContext::default` in disguise, so drop the custom function in favour of the auto-derived implementation. --- src/config/sink.rs | 12 +------- src/sinks/amqp/integration_tests.rs | 4 +-- src/sinks/appsignal/mod.rs | 2 +- .../aws_cloudwatch_logs/integration_tests.rs | 12 ++++---- .../integration_tests.rs | 4 +-- .../aws_kinesis/firehose/integration_tests.rs | 2 +- src/sinks/aws_kinesis/firehose/tests.rs | 4 +-- .../aws_kinesis/streams/integration_tests.rs | 4 +-- src/sinks/aws_s3/integration_tests.rs | 18 +++++------ src/sinks/axiom.rs | 2 +- src/sinks/azure_monitor_logs.rs | 6 ++-- src/sinks/clickhouse/integration_tests.rs | 12 ++++---- src/sinks/databend/integration_tests.rs | 2 +- src/sinks/elasticsearch/common.rs | 2 +- src/sinks/elasticsearch/integration_tests.rs | 6 ++-- src/sinks/gcp/chronicle_unstructured.rs | 2 +- src/sinks/gcp/cloud_storage.rs | 2 +- src/sinks/gcp/pubsub.rs | 4 +-- src/sinks/gcp/stackdriver_logs.rs | 4 +-- src/sinks/gcp/stackdriver_metrics.rs | 2 +- src/sinks/honeycomb.rs | 2 +- src/sinks/http.rs | 8 ++--- src/sinks/humio/logs.rs | 8 ++--- src/sinks/influxdb/logs.rs | 2 +- src/sinks/influxdb/metrics.rs | 4 +-- src/sinks/new_relic/tests.rs | 2 +- src/sinks/papertrail.rs | 2 +- src/sinks/prometheus/exporter.rs | 10 +++---- src/sinks/prometheus/remote_write.rs | 4 +-- src/sinks/socket.rs | 8 ++--- .../splunk_hec/logs/integration_tests.rs | 30 +++++++++---------- src/sinks/splunk_hec/logs/tests.rs | 2 +- .../splunk_hec/metrics/integration_tests.rs | 8 ++--- src/sinks/splunk_hec/metrics/tests.rs | 2 +- src/sinks/statsd/tests.rs | 2 +- src/sinks/util/test.rs | 2 +- src/sinks/vector/mod.rs | 4 +-- src/sinks/webhdfs/integration_tests.rs | 4 +-- src/sinks/websocket/sink.rs | 4 +-- src/sources/prometheus/remote_write.rs | 4 +-- src/sources/splunk_hec/mod.rs | 2 +- src/sources/vector/mod.rs | 2 +- 42 files changed, 106 insertions(+), 116 deletions(-) diff --git a/src/config/sink.rs b/src/config/sink.rs index c0d6aba694b07..bf865eec40463 100644 --- a/src/config/sink.rs +++ b/src/config/sink.rs @@ -235,7 +235,7 @@ pub trait SinkConfig: DynClone + NamedComponent + core::fmt::Debug + Send + Sync dyn_clone::clone_trait_object!(SinkConfig); -#[derive(Debug, Clone)] +#[derive(Clone, Debug, Default)] pub struct SinkContext { pub healthcheck: SinkHealthcheckOptions, pub globals: GlobalOptions, @@ -244,16 +244,6 @@ pub struct SinkContext { } impl SinkContext { - #[cfg(test)] - pub fn new_test() -> Self { - Self { - healthcheck: SinkHealthcheckOptions::default(), - globals: GlobalOptions::default(), - proxy: ProxyConfig::default(), - schema: schema::Options::default(), - } - } - pub const fn globals(&self) -> &GlobalOptions { &self.globals } diff --git a/src/sinks/amqp/integration_tests.rs b/src/sinks/amqp/integration_tests.rs index ba8a9ee557022..08db9c6e46b27 100644 --- a/src/sinks/amqp/integration_tests.rs +++ b/src/sinks/amqp/integration_tests.rs @@ -72,7 +72,7 @@ async fn amqp_happy_path() { .await .unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, healthcheck) = config.build(cx).await.unwrap(); healthcheck.await.expect("Health check failed"); @@ -153,7 +153,7 @@ async fn amqp_round_trip() { .await .unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (amqp_sink, healthcheck) = config.build(cx).await.unwrap(); healthcheck.await.expect("Health check failed"); diff --git a/src/sinks/appsignal/mod.rs b/src/sinks/appsignal/mod.rs index 32c1247192368..31cb7a8e2ecc2 100644 --- a/src/sinks/appsignal/mod.rs +++ b/src/sinks/appsignal/mod.rs @@ -251,7 +251,7 @@ mod test { .expect("config should be valid"); config.endpoint = mock_endpoint.to_string(); - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); let event = Event::Log(LogEvent::from("simple message")); diff --git a/src/sinks/aws_cloudwatch_logs/integration_tests.rs b/src/sinks/aws_cloudwatch_logs/integration_tests.rs index ece20f2fa1311..c0dbf0204260a 100644 --- a/src/sinks/aws_cloudwatch_logs/integration_tests.rs +++ b/src/sinks/aws_cloudwatch_logs/integration_tests.rs @@ -51,7 +51,7 @@ async fn cloudwatch_insert_log_event() { acknowledgements: Default::default(), }; - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let timestamp = chrono::Utc::now(); @@ -101,7 +101,7 @@ async fn cloudwatch_insert_log_events_sorted() { acknowledgements: Default::default(), }; - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let timestamp = chrono::Utc::now() - Duration::days(1); @@ -176,7 +176,7 @@ async fn cloudwatch_insert_out_of_range_timestamp() { acknowledgements: Default::default(), }; - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let now = chrono::Utc::now(); @@ -255,7 +255,7 @@ async fn cloudwatch_dynamic_group_and_stream_creation() { acknowledgements: Default::default(), }; - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let timestamp = chrono::Utc::now(); @@ -310,7 +310,7 @@ async fn cloudwatch_insert_log_event_batched() { acknowledgements: Default::default(), }; - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let timestamp = chrono::Utc::now(); @@ -360,7 +360,7 @@ async fn cloudwatch_insert_log_event_partitioned() { acknowledgements: Default::default(), }; - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let timestamp = chrono::Utc::now(); diff --git a/src/sinks/aws_cloudwatch_metrics/integration_tests.rs b/src/sinks/aws_cloudwatch_metrics/integration_tests.rs index 57d88e704dacd..3af1c28597fc3 100644 --- a/src/sinks/aws_cloudwatch_metrics/integration_tests.rs +++ b/src/sinks/aws_cloudwatch_metrics/integration_tests.rs @@ -35,7 +35,7 @@ async fn cloudwatch_metrics_healthcheck() { #[tokio::test] async fn cloudwatch_metrics_put_data() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = config(); let client = config.create_client(&cx.globals.proxy).await.unwrap(); let sink = CloudWatchMetricsSvc::new(config, client).unwrap(); @@ -94,7 +94,7 @@ async fn cloudwatch_metrics_put_data() { #[tokio::test] async fn cloudwatch_metrics_namespace_partitioning() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = config(); let client = config.create_client(&cx.globals.proxy).await.unwrap(); let sink = CloudWatchMetricsSvc::new(config, client).unwrap(); diff --git a/src/sinks/aws_kinesis/firehose/integration_tests.rs b/src/sinks/aws_kinesis/firehose/integration_tests.rs index 42d72009cace6..648d5082a1ad7 100644 --- a/src/sinks/aws_kinesis/firehose/integration_tests.rs +++ b/src/sinks/aws_kinesis/firehose/integration_tests.rs @@ -62,7 +62,7 @@ async fn firehose_put_records() { let config = KinesisFirehoseSinkConfig { batch, base }; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _) = config.build(cx).await.unwrap(); diff --git a/src/sinks/aws_kinesis/firehose/tests.rs b/src/sinks/aws_kinesis/firehose/tests.rs index 54c55d9efee1d..bb6b94729d344 100644 --- a/src/sinks/aws_kinesis/firehose/tests.rs +++ b/src/sinks/aws_kinesis/firehose/tests.rs @@ -39,7 +39,7 @@ async fn check_batch_size() { let config = KinesisFirehoseSinkConfig { batch, base }; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let res = config.build(cx).await; assert_eq!( @@ -69,7 +69,7 @@ async fn check_batch_events() { let config = KinesisFirehoseSinkConfig { batch, base }; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let res = config.build(cx).await; assert_eq!( diff --git a/src/sinks/aws_kinesis/streams/integration_tests.rs b/src/sinks/aws_kinesis/streams/integration_tests.rs index 6f25a733d7d08..a800ff2b2960a 100644 --- a/src/sinks/aws_kinesis/streams/integration_tests.rs +++ b/src/sinks/aws_kinesis/streams/integration_tests.rs @@ -52,7 +52,7 @@ fn kinesis_address() -> String { // base, // }; // -// let cx = SinkContext::new_test(); +// let cx = SinkContext::default(); // // let sink = config.build(cx).await.unwrap().0; // @@ -107,7 +107,7 @@ async fn kinesis_put_records_without_partition_key() { base, }; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let sink = config.build(cx).await.unwrap().0; diff --git a/src/sinks/aws_s3/integration_tests.rs b/src/sinks/aws_s3/integration_tests.rs index e9ae49a4db181..5a5cfeb3248df 100644 --- a/src/sinks/aws_s3/integration_tests.rs +++ b/src/sinks/aws_s3/integration_tests.rs @@ -51,7 +51,7 @@ fn s3_address() -> String { #[tokio::test] async fn s3_insert_message_into_with_flat_key_prefix() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let bucket = uuid::Uuid::new_v4().to_string(); @@ -85,7 +85,7 @@ async fn s3_insert_message_into_with_flat_key_prefix() { #[tokio::test] async fn s3_insert_message_into_with_folder_key_prefix() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let bucket = uuid::Uuid::new_v4().to_string(); @@ -119,7 +119,7 @@ async fn s3_insert_message_into_with_folder_key_prefix() { #[tokio::test] async fn s3_insert_message_into_with_ssekms_key_id() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let bucket = uuid::Uuid::new_v4().to_string(); @@ -156,7 +156,7 @@ async fn s3_insert_message_into_with_ssekms_key_id() { #[tokio::test] async fn s3_rotate_files_after_the_buffer_size_is_reached() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let bucket = uuid::Uuid::new_v4().to_string(); @@ -213,7 +213,7 @@ async fn s3_gzip() { // to 1000, and using gzip compression. We test to ensure that all of the keys we end up // writing represent the sum total of the lines: we expect 3 batches, each of which should // have 1000 lines. - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let bucket = uuid::Uuid::new_v4().to_string(); @@ -258,7 +258,7 @@ async fn s3_zstd() { // to 1000, and using zstd compression. We test to ensure that all of the keys we end up // writing represent the sum total of the lines: we expect 3 batches, each of which should // have 1000 lines. - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let bucket = uuid::Uuid::new_v4().to_string(); @@ -303,7 +303,7 @@ async fn s3_zstd() { // https://github.com/localstack/localstack/issues/4166 #[tokio::test] async fn s3_insert_message_into_object_lock() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let bucket = uuid::Uuid::new_v4().to_string(); @@ -357,7 +357,7 @@ async fn s3_insert_message_into_object_lock() { #[tokio::test] async fn acknowledges_failures() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let bucket = uuid::Uuid::new_v4().to_string(); @@ -408,7 +408,7 @@ async fn s3_healthchecks_invalid_bucket() { #[tokio::test] async fn s3_flush_on_exhaustion() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let bucket = uuid::Uuid::new_v4().to_string(); create_bucket(&bucket, false).await; diff --git a/src/sinks/axiom.rs b/src/sinks/axiom.rs index b94c7f6e7f69c..54fa54025430a 100644 --- a/src/sinks/axiom.rs +++ b/src/sinks/axiom.rs @@ -162,7 +162,7 @@ mod integration_tests { assert!(!token.is_empty(), "$AXIOM_TOKEN required"); let dataset = env::var("AXIOM_DATASET").unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = AxiomConfig { url: Some(url.clone()), diff --git a/src/sinks/azure_monitor_logs.rs b/src/sinks/azure_monitor_logs.rs index aef40146c8d2a..03ef6de56c267 100644 --- a/src/sinks/azure_monitor_logs.rs +++ b/src/sinks/azure_monitor_logs.rs @@ -459,7 +459,7 @@ mod tests { default_headers: HeaderMap::new(), }; - let context = SinkContext::new_test(); + let context = SinkContext::default(); let client = HttpClient::new(None, &context.proxy).expect("should not fail to create HTTP client"); @@ -617,7 +617,7 @@ mod tests { "#, ) .unwrap(); - if config.build(SinkContext::new_test()).await.is_ok() { + if config.build(SinkContext::default()).await.is_ok() { panic!("config.build failed to error"); } } @@ -657,7 +657,7 @@ mod tests { "#, ) .unwrap(); - if config.build(SinkContext::new_test()).await.is_ok() { + if config.build(SinkContext::default()).await.is_ok() { panic!("config.build failed to error"); } } diff --git a/src/sinks/clickhouse/integration_tests.rs b/src/sinks/clickhouse/integration_tests.rs index 1061b8ddf8e11..21acee5aecc15 100644 --- a/src/sinks/clickhouse/integration_tests.rs +++ b/src/sinks/clickhouse/integration_tests.rs @@ -63,7 +63,7 @@ async fn insert_events() { ) .await; - let (sink, _hc) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _hc) = config.build(SinkContext::default()).await.unwrap(); let (mut input_event, mut receiver) = make_event(); input_event @@ -114,7 +114,7 @@ async fn skip_unknown_fields() { .create_table(&table, "host String, timestamp String, message String") .await; - let (sink, _hc) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _hc) = config.build(SinkContext::default()).await.unwrap(); let (mut input_event, mut receiver) = make_event(); input_event.as_mut_log().insert("unknown", "mysteries"); @@ -167,7 +167,7 @@ async fn insert_events_unix_timestamps() { ) .await; - let (sink, _hc) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _hc) = config.build(SinkContext::default()).await.unwrap(); let (mut input_event, _receiver) = make_event(); @@ -235,7 +235,7 @@ timestamp_format = "unix""#, ) .await; - let (sink, _hc) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _hc) = config.build(SinkContext::default()).await.unwrap(); let (mut input_event, _receiver) = make_event(); @@ -298,7 +298,7 @@ async fn no_retry_on_incorrect_data() { .create_table(&table, "host String, timestamp String") .await; - let (sink, _hc) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _hc) = config.build(SinkContext::default()).await.unwrap(); let (input_event, mut receiver) = make_event(); @@ -340,7 +340,7 @@ async fn no_retry_on_incorrect_data_warp() { batch, ..Default::default() }; - let (sink, _hc) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _hc) = config.build(SinkContext::default()).await.unwrap(); let (input_event, mut receiver) = make_event(); diff --git a/src/sinks/databend/integration_tests.rs b/src/sinks/databend/integration_tests.rs index b8c647c6f7d2d..5438775a4d385 100644 --- a/src/sinks/databend/integration_tests.rs +++ b/src/sinks/databend/integration_tests.rs @@ -124,7 +124,7 @@ async fn insert_event_with_cfg(cfg: String, table: String, client: DatabendAPICl .unwrap(); let (config, _) = load_sink::(&cfg).unwrap(); - let (sink, _hc) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _hc) = config.build(SinkContext::default()).await.unwrap(); let (input_event, mut receiver) = make_event(); run_and_assert_sink_compliance( diff --git a/src/sinks/elasticsearch/common.rs b/src/sinks/elasticsearch/common.rs index f1a90ac4c9576..d5c5b214b5449 100644 --- a/src/sinks/elasticsearch/common.rs +++ b/src/sinks/elasticsearch/common.rs @@ -238,7 +238,7 @@ impl ElasticsearchCommon { #[cfg(test)] pub async fn parse_single(config: &ElasticsearchConfig) -> crate::Result { let mut commons = - Self::parse_many(config, crate::config::SinkContext::new_test().proxy()).await?; + Self::parse_many(config, crate::config::SinkContext::default().proxy()).await?; assert_eq!(commons.len(), 1); Ok(commons.remove(0)) } diff --git a/src/sinks/elasticsearch/integration_tests.rs b/src/sinks/elasticsearch/integration_tests.rs index 733cc1acaca22..023371e6f4fe9 100644 --- a/src/sinks/elasticsearch/integration_tests.rs +++ b/src/sinks/elasticsearch/integration_tests.rs @@ -146,7 +146,7 @@ async fn structures_events_correctly() { .expect("Config error"); let base_url = common.base_url.clone(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _hc) = config.build(cx.clone()).await.unwrap(); let (batch, mut receiver) = BatchNotifier::new_with_receiver(); @@ -555,7 +555,7 @@ async fn run_insert_tests_with_config( }; let base_url = common.base_url.clone(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, healthcheck) = config .build(cx.clone()) .await @@ -639,7 +639,7 @@ async fn run_insert_tests_with_config( } async fn run_insert_tests_with_multiple_endpoints(config: &ElasticsearchConfig) { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let commons = ElasticsearchCommon::parse_many(config, cx.proxy()) .await .expect("Config error"); diff --git a/src/sinks/gcp/chronicle_unstructured.rs b/src/sinks/gcp/chronicle_unstructured.rs index ddf70f31e4e25..36a537cb548b6 100644 --- a/src/sinks/gcp/chronicle_unstructured.rs +++ b/src/sinks/gcp/chronicle_unstructured.rs @@ -541,7 +541,7 @@ mod integration_tests { log_type: &str, auth_path: &str, ) -> crate::Result<(VectorSink, crate::sinks::Healthcheck)> { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); config(log_type, auth_path).build(cx).await } diff --git a/src/sinks/gcp/cloud_storage.rs b/src/sinks/gcp/cloud_storage.rs index 5b172586ee0cf..ff4f6bb378a21 100644 --- a/src/sinks/gcp/cloud_storage.rs +++ b/src/sinks/gcp/cloud_storage.rs @@ -429,7 +429,7 @@ mod tests { async fn component_spec_compliance() { let mock_endpoint = spawn_blackhole_http_server(always_200_response).await; - let context = SinkContext::new_test(); + let context = SinkContext::default(); let tls = TlsSettings::default(); let client = diff --git a/src/sinks/gcp/pubsub.rs b/src/sinks/gcp/pubsub.rs index a950c3dc46dab..5d2dc458d499a 100644 --- a/src/sinks/gcp/pubsub.rs +++ b/src/sinks/gcp/pubsub.rs @@ -260,7 +260,7 @@ mod tests { encoding.codec = "json" "#}) .unwrap(); - if config.build(SinkContext::new_test()).await.is_ok() { + if config.build(SinkContext::default()).await.is_ok() { panic!("config.build failed to error"); } } @@ -302,7 +302,7 @@ mod integration_tests { } async fn config_build(topic: &str) -> (VectorSink, crate::sinks::Healthcheck) { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); config(topic).build(cx).await.expect("Building sink failed") } diff --git a/src/sinks/gcp/stackdriver_logs.rs b/src/sinks/gcp/stackdriver_logs.rs index e79287429071d..fb24bce81a40b 100644 --- a/src/sinks/gcp/stackdriver_logs.rs +++ b/src/sinks/gcp/stackdriver_logs.rs @@ -451,7 +451,7 @@ mod tests { config.auth.api_key = Some("fake".to_string().into()); config.endpoint = mock_endpoint.to_string(); - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); let event = Event::Log(LogEvent::from("simple message")); @@ -659,7 +659,7 @@ mod tests { resource.namespace = "office" "#}) .unwrap(); - if config.build(SinkContext::new_test()).await.is_ok() { + if config.build(SinkContext::default()).await.is_ok() { panic!("config.build failed to error"); } } diff --git a/src/sinks/gcp/stackdriver_metrics.rs b/src/sinks/gcp/stackdriver_metrics.rs index c18a8318b7f79..faa6989b80bd5 100644 --- a/src/sinks/gcp/stackdriver_metrics.rs +++ b/src/sinks/gcp/stackdriver_metrics.rs @@ -295,7 +295,7 @@ mod tests { config.auth.api_key = Some("fake".to_string().into()); config.endpoint = mock_endpoint.to_string(); - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); let event = Event::Metric(Metric::new( diff --git a/src/sinks/honeycomb.rs b/src/sinks/honeycomb.rs index 559a1f4a5bf85..6cba7079a84e0 100644 --- a/src/sinks/honeycomb.rs +++ b/src/sinks/honeycomb.rs @@ -245,7 +245,7 @@ mod test { .expect("config should be valid"); config.endpoint = mock_endpoint.to_string(); - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); let event = Event::Log(LogEvent::from("simple message")); diff --git a/src/sinks/http.rs b/src/sinks/http.rs index 8f7d0fdf8b633..2b0b5e7b36f26 100644 --- a/src/sinks/http.rs +++ b/src/sinks/http.rs @@ -627,7 +627,7 @@ mod tests { "#; let config: HttpSinkConfig = toml::from_str(config).unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); _ = config.build(cx).await.unwrap(); } @@ -861,7 +861,7 @@ mod tests { let config: HttpSinkConfig = toml::from_str(&config).unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _) = config.build(cx).await.unwrap(); let (rx, trigger, server) = build_test_server(in_addr); @@ -922,7 +922,7 @@ mod tests { let config: HttpSinkConfig = toml::from_str(&config).unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _) = config.build(cx).await.unwrap(); let (rx, trigger, server) = build_test_server(in_addr); @@ -1029,7 +1029,7 @@ mod tests { ); let config: HttpSinkConfig = toml::from_str(&config).unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _) = config.build(cx).await.unwrap(); (in_addr, sink) diff --git a/src/sinks/humio/logs.rs b/src/sinks/humio/logs.rs index 6e30e66f29ee9..b87446ff015f7 100644 --- a/src/sinks/humio/logs.rs +++ b/src/sinks/humio/logs.rs @@ -251,7 +251,7 @@ mod integration_tests { async fn humio_insert_message() { wait_ready().await; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let repo = create_repository().await; @@ -301,7 +301,7 @@ mod integration_tests { async fn humio_insert_source() { wait_ready().await; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let repo = create_repository().await; @@ -337,7 +337,7 @@ mod integration_tests { let mut config = config(&repo.default_ingest_token); config.event_type = Template::try_from("json".to_string()).ok(); - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let message = random_string(100); let mut event = LogEvent::from(message.clone()); @@ -363,7 +363,7 @@ mod integration_tests { { let config = config(&repo.default_ingest_token); - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let message = random_string(100); let event = LogEvent::from(message.clone()); diff --git a/src/sinks/influxdb/logs.rs b/src/sinks/influxdb/logs.rs index fe2e44b950368..ef901ff4e66aa 100644 --- a/src/sinks/influxdb/logs.rs +++ b/src/sinks/influxdb/logs.rs @@ -906,7 +906,7 @@ mod integration_tests { let now = Utc::now(); let measure = format!("vector-{}", now.timestamp_nanos()); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = InfluxDbLogsConfig { namespace: None, diff --git a/src/sinks/influxdb/metrics.rs b/src/sinks/influxdb/metrics.rs index 5c6f98856198a..68dd5182cadf1 100644 --- a/src/sinks/influxdb/metrics.rs +++ b/src/sinks/influxdb/metrics.rs @@ -995,7 +995,7 @@ mod integration_tests { crate::test_util::trace_init(); let database = onboarding_v1(url).await; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = InfluxDbConfig { endpoint: url.to_string(), @@ -1090,7 +1090,7 @@ mod integration_tests { let endpoint = address_v2(); onboarding_v2(&endpoint).await; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = InfluxDbConfig { endpoint, diff --git a/src/sinks/new_relic/tests.rs b/src/sinks/new_relic/tests.rs index ae56843862f09..51be807831f56 100644 --- a/src/sinks/new_relic/tests.rs +++ b/src/sinks/new_relic/tests.rs @@ -28,7 +28,7 @@ async fn component_spec_compliance() { .expect("config should be valid"); config.override_uri = Some(mock_endpoint); - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); let event = Event::Log(LogEvent::from("simple message")); diff --git a/src/sinks/papertrail.rs b/src/sinks/papertrail.rs index f3d3afb55ace0..bf2c45c6710b1 100644 --- a/src/sinks/papertrail.rs +++ b/src/sinks/papertrail.rs @@ -213,7 +213,7 @@ mod tests { config.endpoint = mock_endpoint.into(); config.tls = Some(TlsEnableableConfig::default()); - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); let event = Event::Log(LogEvent::from("simple message")); diff --git a/src/sinks/prometheus/exporter.rs b/src/sinks/prometheus/exporter.rs index bb7e6c775b629..9ba90c8fb1610 100644 --- a/src/sinks/prometheus/exporter.rs +++ b/src/sinks/prometheus/exporter.rs @@ -894,7 +894,7 @@ mod tests { let mut receiver = BatchNotifier::apply_to(&mut events[..]); assert_eq!(receiver.try_recv(), Err(TryRecvError::Empty)); - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let (_, delayed_event) = create_metric_gauge(Some("delayed".to_string()), 123.4); let sink_handle = tokio::spawn(run_and_assert_sink_compliance( sink, @@ -958,7 +958,7 @@ mod tests { let mut receiver = BatchNotifier::apply_to(&mut events[..]); assert_eq!(receiver.try_recv(), Err(TryRecvError::Empty)); - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let (_, delayed_event) = create_metric_gauge(Some("delayed".to_string()), 123.4); let sink_handle = tokio::spawn(run_and_assert_sink_compliance( sink, @@ -1422,7 +1422,7 @@ mod integration_tests { flush_period_secs: Duration::from_secs(2), ..Default::default() }; - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let (name, event) = tests::create_metric_gauge(None, 123.4); let (_, delayed_event) = tests::create_metric_gauge(Some("delayed".to_string()), 123.4); @@ -1460,7 +1460,7 @@ mod integration_tests { flush_period_secs: Duration::from_secs(3), ..Default::default() }; - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let (tx, rx) = mpsc::unbounded_channel(); let input_events = UnboundedReceiverStream::new(rx); @@ -1517,7 +1517,7 @@ mod integration_tests { flush_period_secs: Duration::from_secs(3), ..Default::default() }; - let (sink, _) = config.build(SinkContext::new_test()).await.unwrap(); + let (sink, _) = config.build(SinkContext::default()).await.unwrap(); let (tx, rx) = mpsc::unbounded_channel(); let input_events = UnboundedReceiverStream::new(rx); diff --git a/src/sinks/prometheus/remote_write.rs b/src/sinks/prometheus/remote_write.rs index 1418cc2627924..a1e9b792fed4d 100644 --- a/src/sinks/prometheus/remote_write.rs +++ b/src/sinks/prometheus/remote_write.rs @@ -624,7 +624,7 @@ mod tests { let config = format!("endpoint = \"http://{}/write\"\n{}", addr, config); let config: RemoteWriteConfig = toml::from_str(&config).unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _) = config.build(cx).await.unwrap(); sink.run_events(events).await.unwrap(); @@ -709,7 +709,7 @@ mod integration_tests { assert_sink_compliance(&HTTP_SINK_TAGS, async { let database = onboarding_v1(url).await; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = RemoteWriteConfig { endpoint: format!("{}/api/v1/prom/write?db={}", url, database), diff --git a/src/sinks/socket.rs b/src/sinks/socket.rs index f22339497b47e..c6947f7658512 100644 --- a/src/sinks/socket.rs +++ b/src/sinks/socket.rs @@ -201,7 +201,7 @@ mod test { acknowledgements: Default::default(), }; - let context = SinkContext::new_test(); + let context = SinkContext::default(); assert_sink_compliance(&SINK_TAGS, async move { let (sink, _healthcheck) = config.build(context).await.unwrap(); @@ -256,7 +256,7 @@ mod test { let (lines, events) = random_lines_with_stream(10, 100, None); assert_sink_compliance(&SINK_TAGS, async move { - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); sink.run(events).await @@ -333,7 +333,7 @@ mod test { }), acknowledgements: Default::default(), }; - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); let (mut sender, receiver) = mpsc::channel::>(0); let jh1 = tokio::spawn(async move { @@ -453,7 +453,7 @@ mod test { acknowledgements: Default::default(), }; - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); let (_, events) = random_lines_with_stream(1000, 10000, None); diff --git a/src/sinks/splunk_hec/logs/integration_tests.rs b/src/sinks/splunk_hec/logs/integration_tests.rs index ece387f7f89d4..0510d2dbe9721 100644 --- a/src/sinks/splunk_hec/logs/integration_tests.rs +++ b/src/sinks/splunk_hec/logs/integration_tests.rs @@ -127,7 +127,7 @@ async fn config(encoding: EncodingConfig, indexed_fields: Vec) -> HecLog #[tokio::test] async fn splunk_insert_message() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = config(TextSerializerConfig::default().into(), vec![]).await; let (sink, _) = config.build(cx).await.unwrap(); @@ -147,7 +147,7 @@ async fn splunk_insert_message() { #[tokio::test] async fn splunk_insert_raw_message() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = HecLogsSinkConfig { endpoint_target: EndpointTarget::Raw, @@ -172,7 +172,7 @@ async fn splunk_insert_raw_message() { #[tokio::test] async fn splunk_insert_broken_token() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let mut config = config(TextSerializerConfig::default().into(), vec![]).await; config.default_token = "BROKEN_TOKEN".to_string().into(); @@ -188,7 +188,7 @@ async fn splunk_insert_broken_token() { #[tokio::test] async fn splunk_insert_source() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let mut config = config(TextSerializerConfig::default().into(), vec![]).await; config.source = Template::try_from("/var/log/syslog".to_string()).ok(); @@ -206,7 +206,7 @@ async fn splunk_insert_source() { #[tokio::test] async fn splunk_insert_index() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let mut config = config(TextSerializerConfig::default().into(), vec![]).await; config.index = Template::try_from("custom_index".to_string()).ok(); @@ -223,7 +223,7 @@ async fn splunk_insert_index() { #[tokio::test] async fn splunk_index_is_interpolated() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let indexed_fields = vec!["asdf".to_string()]; let mut config = config(JsonSerializerConfig::default().into(), indexed_fields).await; @@ -244,7 +244,7 @@ async fn splunk_index_is_interpolated() { #[tokio::test] async fn splunk_insert_many() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = config(TextSerializerConfig::default().into(), vec![]).await; let (sink, _) = config.build(cx).await.unwrap(); @@ -257,7 +257,7 @@ async fn splunk_insert_many() { #[tokio::test] async fn splunk_custom_fields() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let indexed_fields = vec!["asdf".into()]; let config = config(JsonSerializerConfig::default().into(), indexed_fields).await; @@ -277,7 +277,7 @@ async fn splunk_custom_fields() { #[tokio::test] async fn splunk_hostname() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let indexed_fields = vec!["asdf".into()]; let config = config(JsonSerializerConfig::default().into(), indexed_fields).await; @@ -300,7 +300,7 @@ async fn splunk_hostname() { #[tokio::test] async fn splunk_sourcetype() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let indexed_fields = vec!["asdf".to_string()]; let mut config = config(JsonSerializerConfig::default().into(), indexed_fields).await; @@ -324,7 +324,7 @@ async fn splunk_sourcetype() { #[tokio::test] async fn splunk_configure_hostname() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = HecLogsSinkConfig { host_key: "roast".into(), @@ -355,7 +355,7 @@ async fn splunk_configure_hostname() { #[tokio::test] async fn splunk_indexer_acknowledgements() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let acknowledgements_config = HecClientAcknowledgementsConfig { query_interval: NonZeroU8::new(1).unwrap(), @@ -385,7 +385,7 @@ async fn splunk_indexer_acknowledgements() { #[tokio::test] async fn splunk_indexer_acknowledgements_disabled_on_server() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = config( JsonSerializerConfig::default().into(), @@ -414,7 +414,7 @@ async fn splunk_auto_extracted_timestamp() { .map(|version| !version.starts_with("7.")) .unwrap_or(true) { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = HecLogsSinkConfig { auto_extract_timestamp: Some(true), @@ -467,7 +467,7 @@ async fn splunk_non_auto_extracted_timestamp() { .map(|version| !version.starts_with("7.")) .unwrap_or(true) { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let config = HecLogsSinkConfig { auto_extract_timestamp: Some(false), diff --git a/src/sinks/splunk_hec/logs/tests.rs b/src/sinks/splunk_hec/logs/tests.rs index 5f6bf777d0d40..0e5d4956e36bf 100644 --- a/src/sinks/splunk_hec/logs/tests.rs +++ b/src/sinks/splunk_hec/logs/tests.rs @@ -218,7 +218,7 @@ async fn splunk_passthrough_token() { auto_extract_timestamp: None, endpoint_target: EndpointTarget::Event, }; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _) = config.build(cx).await.unwrap(); diff --git a/src/sinks/splunk_hec/metrics/integration_tests.rs b/src/sinks/splunk_hec/metrics/integration_tests.rs index 6dc7f4fbf769a..8227e6431a9db 100644 --- a/src/sinks/splunk_hec/metrics/integration_tests.rs +++ b/src/sinks/splunk_hec/metrics/integration_tests.rs @@ -70,7 +70,7 @@ fn get_counter(batch: BatchNotifier) -> Event { #[tokio::test] async fn splunk_insert_counter_metric() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let mut config = config().await; config.index = Template::try_from("testmetrics".to_string()).ok(); @@ -93,7 +93,7 @@ async fn splunk_insert_counter_metric() { #[tokio::test] async fn splunk_insert_gauge_metric() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let mut config = config().await; config.index = Template::try_from("testmetrics".to_string()).ok(); @@ -116,7 +116,7 @@ async fn splunk_insert_gauge_metric() { #[tokio::test] async fn splunk_insert_multiple_counter_metrics() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let mut config = config().await; config.index = Template::try_from("testmetrics".to_string()).ok(); @@ -143,7 +143,7 @@ async fn splunk_insert_multiple_counter_metrics() { #[tokio::test] async fn splunk_insert_multiple_gauge_metrics() { - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let mut config = config().await; config.index = Template::try_from("testmetrics".to_string()).ok(); diff --git a/src/sinks/splunk_hec/metrics/tests.rs b/src/sinks/splunk_hec/metrics/tests.rs index dc05ff8bff1c9..7cfd251e307d9 100644 --- a/src/sinks/splunk_hec/metrics/tests.rs +++ b/src/sinks/splunk_hec/metrics/tests.rs @@ -330,7 +330,7 @@ async fn splunk_passthrough_token() { acknowledgements: Default::default(), default_namespace: None, }; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _) = config.build(cx).await.unwrap(); diff --git a/src/sinks/statsd/tests.rs b/src/sinks/statsd/tests.rs index 4c30a46b78551..b2311bd8fbec9 100644 --- a/src/sinks/statsd/tests.rs +++ b/src/sinks/statsd/tests.rs @@ -71,7 +71,7 @@ async fn test_send_to_statsd() { ]; let (tx, rx) = mpsc::channel(1); - let context = SinkContext::new_test(); + let context = SinkContext::default(); assert_sink_compliance(&SINK_TAGS, async move { let (sink, _healthcheck) = config.build(context).await.unwrap(); diff --git a/src/sinks/util/test.rs b/src/sinks/util/test.rs index b5b1a2ae461bd..9f19903f954be 100644 --- a/src/sinks/util/test.rs +++ b/src/sinks/util/test.rs @@ -20,7 +20,7 @@ where for<'a> T: Deserialize<'a> + SinkConfig, { let sink_config: T = toml::from_str(config)?; - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); Ok((sink_config, cx)) } diff --git a/src/sinks/vector/mod.rs b/src/sinks/vector/mod.rs index 99a4c5bf8a2ab..03d77611ad239 100644 --- a/src/sinks/vector/mod.rs +++ b/src/sinks/vector/mod.rs @@ -77,7 +77,7 @@ mod tests { let config = format!(r#"address = "http://{}/""#, in_addr); let config: VectorConfig = toml::from_str(&config).unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _) = config.build(cx).await.unwrap(); let (rx, trigger, server) = build_test_server_generic(in_addr, move || { @@ -121,7 +121,7 @@ mod tests { let config = format!(r#"address = "http://{}/""#, in_addr); let config: VectorConfig = toml::from_str(&config).unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _) = config.build(cx).await.unwrap(); let (_rx, trigger, server) = build_test_server_generic(in_addr, move || { diff --git a/src/sinks/webhdfs/integration_tests.rs b/src/sinks/webhdfs/integration_tests.rs index c4b64a60deb4e..419c3a7629bd5 100644 --- a/src/sinks/webhdfs/integration_tests.rs +++ b/src/sinks/webhdfs/integration_tests.rs @@ -24,7 +24,7 @@ async fn hdfs_healthchecks_invalid_node_node() { // Point to an invalid endpoint let config = config("http://127.0.0.1:1", 10); let (_, health_check) = config - .build(SinkContext::new_test()) + .build(SinkContext::default()) .await .expect("config build must with success"); let result = health_check.await; @@ -36,7 +36,7 @@ async fn hdfs_healthchecks_invalid_node_node() { async fn hdfs_healthchecks_valid_node_node() { let config = config(&webhdfs_endpoint(), 10); let (_, health_check) = config - .build(SinkContext::new_test()) + .build(SinkContext::default()) .await .expect("config build must with success"); let result = health_check.await; diff --git a/src/sinks/websocket/sink.rs b/src/sinks/websocket/sink.rs index 07ced35ba2a59..6acbf99967b43 100644 --- a/src/sinks/websocket/sink.rs +++ b/src/sinks/websocket/sink.rs @@ -483,7 +483,7 @@ mod tests { let mut receiver = create_count_receiver(addr, tls.clone(), true, None); - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); let (_lines, events) = random_lines_with_stream(10, 100, None); @@ -511,7 +511,7 @@ mod tests { ) { let mut receiver = create_count_receiver(addr, tls, false, auth); - let context = SinkContext::new_test(); + let context = SinkContext::default(); let (sink, _healthcheck) = config.build(context).await.unwrap(); let (lines, events) = random_lines_with_stream(10, 100, None); diff --git a/src/sources/prometheus/remote_write.rs b/src/sources/prometheus/remote_write.rs index 3b27770ba995b..fb8b84a3d724b 100644 --- a/src/sources/prometheus/remote_write.rs +++ b/src/sources/prometheus/remote_write.rs @@ -205,7 +205,7 @@ mod test { ..Default::default() }; let (sink, _) = sink - .build(SinkContext::new_test()) + .build(SinkContext::default()) .await .expect("Error building config."); @@ -299,7 +299,7 @@ mod test { ..Default::default() }; let (sink, _) = sink - .build(SinkContext::new_test()) + .build(SinkContext::default()) .await .expect("Error building config."); diff --git a/src/sources/splunk_hec/mod.rs b/src/sources/splunk_hec/mod.rs index 0c465ffaef97e..222ffc2487041 100644 --- a/src/sources/splunk_hec/mod.rs +++ b/src/sources/splunk_hec/mod.rs @@ -1290,7 +1290,7 @@ mod tests { auto_extract_timestamp: None, endpoint_target: Default::default(), } - .build(SinkContext::new_test()) + .build(SinkContext::default()) .await .unwrap() } diff --git a/src/sources/vector/mod.rs b/src/sources/vector/mod.rs index a6fdaa494d60e..44b0921d75bcd 100644 --- a/src/sources/vector/mod.rs +++ b/src/sources/vector/mod.rs @@ -299,7 +299,7 @@ mod tests { // but the sink side already does such a test and this is good // to ensure interoperability. let sink: SinkConfig = toml::from_str(vector_source_config_str).unwrap(); - let cx = SinkContext::new_test(); + let cx = SinkContext::default(); let (sink, _) = sink.build(cx).await.unwrap(); let (mut events, stream) = test_util::random_events_with_stream(100, 100, None); From 20d62f11f9bd255185fadd58c79891d730997768 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Thu, 29 Jun 2023 14:22:38 -0700 Subject: [PATCH 217/236] chore(ci): Update publish workflow test Ubuntu versions (#17781) To currently supported versions. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index ae7055dd7cbc4..b490037ba14c1 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -286,11 +286,11 @@ jobs: strategy: matrix: container: - - ubuntu:14.04 - ubuntu:16.04 - ubuntu:18.04 - ubuntu:20.04 - ubuntu:22.04 + - ubuntu:22.10 - ubuntu:23.04 - debian:10 - debian:11 From 671aa795136e319889a710986d41fadae9ec980f Mon Sep 17 00:00:00 2001 From: Nathan Fox Date: Fri, 30 Jun 2023 07:53:26 -0400 Subject: [PATCH 218/236] Update VRL to `0.5.0` (#17793) This updates VRL: `0.4.0` -> `0.5.0` I'm also trying out the newish "workspace" dependencies, so the VRL version is only specified a single time. VRL changes can be viewed here: https://github.com/vectordotdev/vrl/blob/main/CHANGELOG.md --------- Co-authored-by: Jesse Szwedko --- .github/actions/spelling/allow.txt | 1 + Cargo.lock | 187 +++++++++--------- Cargo.toml | 8 +- lib/codecs/Cargo.toml | 2 +- lib/enrichment/Cargo.toml | 5 +- lib/opentelemetry-proto/Cargo.toml | 2 +- lib/vector-common/Cargo.toml | 2 +- lib/vector-config/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 4 +- lib/vector-lookup/Cargo.toml | 2 +- lib/vector-vrl/cli/Cargo.toml | 2 +- lib/vector-vrl/functions/Cargo.toml | 2 +- lib/vector-vrl/tests/Cargo.toml | 2 +- lib/vector-vrl/web-playground/Cargo.toml | 2 +- src/transforms/remap.rs | 2 +- .../remap/functions/format_timestamp.cue | 6 + .../remap/functions/from_unix_timestamp.cue | 58 ++++++ .../remap/functions/parse_nginx_log.cue | 5 +- 18 files changed, 180 insertions(+), 114 deletions(-) create mode 100644 website/cue/reference/remap/functions/from_unix_timestamp.cue diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index c3a73ef09667c..d9b13fcf742c0 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -411,6 +411,7 @@ timespan timestamped tzdata ubuntu +upstreaminfo useragents usergroups userguide diff --git a/Cargo.lock b/Cargo.lock index 41580f90deaa1..3364f48bce98e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -260,7 +260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c6368f9ae5c6ec403ca910327ae0c9437b0a85255b6950c90d497e6177f6e5e" dependencies = [ "proc-macro-hack", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -473,7 +473,7 @@ dependencies = [ "darling 0.14.2", "proc-macro-crate 1.2.1", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", "thiserror", ] @@ -593,7 +593,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -615,7 +615,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -632,7 +632,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -1417,7 +1417,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd9e32d7420c85055e8107e5b2463c4eeefeaac18b52359fe9f9c08a18f342b2" dependencies = [ - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -1561,7 +1561,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61820b4c5693eafb998b1e67485423c923db4a75f72585c247bdee32bad81e7b" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -1572,7 +1572,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c76cdbfa13def20d1f8af3ae7b3c6771f06352a74221d8851262ac384c122b8e" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -1643,7 +1643,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -1726,7 +1726,7 @@ dependencies = [ "cached_proc_macro_types", "darling 0.14.2", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -1951,7 +1951,7 @@ checksum = "81d7dc0031c3a59a04fc2ba395c8e2dd463cba1859275f065d225f6122221b45" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -1963,9 +1963,9 @@ checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" [[package]] name = "clipboard-win" -version = "4.4.2" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4ab1b92798304eedc095b53942963240037c0516452cb11aeba709d420b2219" +checksum = "7191c27c2357d9b7ef96baac1773290d4ca63b24205b82a3fd8a0637afcf0362" dependencies = [ "error-code", "str-buf", @@ -2405,7 +2405,7 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -2459,7 +2459,7 @@ dependencies = [ "codespan-reporting", "once_cell", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "scratch", "syn 1.0.109", ] @@ -2477,7 +2477,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -2510,7 +2510,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", ] @@ -2524,7 +2524,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "strsim 0.10.0", "syn 1.0.109", ] @@ -2536,7 +2536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -2547,7 +2547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" dependencies = [ "darling_core 0.14.2", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -2623,19 +2623,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] [[package]] name = "derive_arbitrary" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" +checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", - "syn 1.0.109", + "quote 1.0.28", + "syn 2.0.10", ] [[package]] @@ -2646,7 +2646,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -2756,14 +2756,14 @@ dependencies = [ [[package]] name = "dns-lookup" -version = "1.0.8" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53ecafc952c4528d9b51a458d1a8904b81783feff9fde08ab6ed2545ff396872" +checksum = "8f332aa79f9e9de741ac013237294ef42ce2e9c6394dc7d766725812f1238812" dependencies = [ "cfg-if", "libc", - "socket2 0.4.9", - "winapi", + "socket2 0.5.3", + "windows-sys 0.48.0", ] [[package]] @@ -2902,7 +2902,7 @@ checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -2914,7 +2914,7 @@ checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -2926,7 +2926,7 @@ checksum = "11f36e95862220b211a6e2aa5eca09b4fa391b13cd52ceb8035a24bf65a79de2" dependencies = [ "once_cell", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -2946,7 +2946,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -3074,7 +3074,7 @@ checksum = "f47da3a72ec598d9c8937a7ebca8962a5c7a1f28444e38c2b33c771ba3f55f05" dependencies = [ "proc-macro-error", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -3170,7 +3170,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4c81935e123ab0741c4c4f0d9b8377e5fb21d3de7e062fa4b1263b1fbcba1ea" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -3351,7 +3351,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -3434,7 +3434,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb19fe8de3ea0920d282f7b77dd4227aea6b8b999b42cdf0ca41b2472b14443a" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -3540,7 +3540,7 @@ dependencies = [ "heck 0.4.0", "lazy_static", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "serde", "serde_json", "syn 1.0.109", @@ -4541,9 +4541,9 @@ dependencies = [ [[package]] name = "lalrpop" -version = "0.19.12" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a1cbf952127589f2851ab2046af368fd20645491bb4b376f04b7f94d7a9837b" +checksum = "da4081d44f4611b66c6dd725e6de3169f9f63905421e8626fcb86b6a898998b8" dependencies = [ "ascii-canvas", "bit-set", @@ -4554,7 +4554,7 @@ dependencies = [ "lalrpop-util", "petgraph", "regex", - "regex-syntax 0.6.29", + "regex-syntax 0.7.2", "string_cache", "term", "tiny-keccak", @@ -4563,9 +4563,9 @@ dependencies = [ [[package]] name = "lalrpop-util" -version = "0.19.12" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3c48237b9604c5a4702de6b824e02006c3214327564636aef27c1028a8fa0ed" +checksum = "3f35c735096c0293d313e8f2a641627472b83d01b937177fe76e5e2708d31e0d" [[package]] name = "lapin" @@ -4926,7 +4926,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -5452,7 +5452,7 @@ checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.2.1", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -5464,7 +5464,7 @@ checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.2.1", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -5647,7 +5647,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -5891,7 +5891,7 @@ dependencies = [ "pest", "pest_meta", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -5979,7 +5979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -6245,7 +6245,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", "version_check", ] @@ -6257,7 +6257,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "version_check", ] @@ -6366,7 +6366,7 @@ dependencies = [ "anyhow", "itertools 0.10.5", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -6395,7 +6395,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -6494,7 +6494,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -6509,18 +6509,18 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.29" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" +checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" dependencies = [ "proc-macro2 1.0.63", ] [[package]] name = "quoted_printable" -version = "0.4.7" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a24039f627d8285853cc90dcddf8c1ebfaa91f834566948872b225b9a28ed1b6" +checksum = "79ec282e887b434b68c18fe5c121d38e72a5cf35119b59e54ec5b992ea9c8eb0" [[package]] name = "radium" @@ -6909,7 +6909,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -7177,11 +7177,11 @@ dependencies = [ [[package]] name = "rustyline" -version = "11.0.0" +version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfc8644681285d1fb67a467fb3021bfea306b99b4146b166a1fe3ada965eece" +checksum = "994eca4bca05c87e86e15d90fc7a91d1be64b4482b38cb2d27474568fe7c9db9" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.3.2", "cfg-if", "clipboard-win", "libc", @@ -7409,7 +7409,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -7420,7 +7420,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -7472,7 +7472,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -7531,7 +7531,7 @@ checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -7543,7 +7543,7 @@ checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" dependencies = [ "darling 0.14.2", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -7814,7 +7814,7 @@ checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -7955,7 +7955,7 @@ dependencies = [ "heck 0.3.3", "proc-macro-error", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -7973,7 +7973,7 @@ checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "rustversion", "syn 1.0.109", ] @@ -8012,7 +8012,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "unicode-ident", ] @@ -8023,7 +8023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "unicode-ident", ] @@ -8040,7 +8040,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -8194,7 +8194,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -8340,7 +8340,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -8561,7 +8561,7 @@ dependencies = [ "prettyplease", "proc-macro2 1.0.63", "prost-build", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -8665,7 +8665,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -8936,7 +8936,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -8966,7 +8966,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3e1c30cedd24fc597f7d37a721efdbdc2b1acae012c1ef1218f4c7c2c0f3e7" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", ] @@ -9127,6 +9127,7 @@ dependencies = [ "getrandom 0.2.10", "rand 0.8.5", "serde", + "wasm-bindgen", ] [[package]] @@ -9506,7 +9507,7 @@ dependencies = [ "darling 0.13.4", "once_cell", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "serde", "serde_json", "syn 1.0.109", @@ -9519,7 +9520,7 @@ version = "0.1.0" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "serde", "serde_derive_internals", "syn 1.0.109", @@ -9683,9 +9684,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "vrl" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6236fdfaaa956af732a73630b8a8b43ef75e0a42fed6b94cf3c1c19c99daca5" +checksum = "ee149a42aa313d18cf7a7e6b54e06df03b4a0e6e5dd42a2b8b6d5eeb98d582c1" dependencies = [ "aes", "ansi_term", @@ -9696,6 +9697,7 @@ dependencies = [ "bytes 1.4.0", "cbc", "cfb-mode", + "cfg-if", "charset", "chrono", "chrono-tz", @@ -9709,14 +9711,13 @@ dependencies = [ "dyn-clone", "exitcode", "flate2", - "getrandom 0.2.10", "grok", "hex", "hmac", "hostname", - "indexmap 1.9.3", + "indexmap 2.0.0", "indoc", - "itertools 0.10.5", + "itertools 0.11.0", "lalrpop", "lalrpop-util", "md-5", @@ -9779,7 +9780,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", ] [[package]] @@ -9880,7 +9881,7 @@ dependencies = [ "log", "once_cell", "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", "wasm-bindgen-shared", ] @@ -9903,7 +9904,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.29", + "quote 1.0.28", "wasm-bindgen-macro-support", ] @@ -9914,7 +9915,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 2.0.10", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -10325,7 +10326,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6505e6815af7de1746a08f69c69606bb45695a17149517680f3b2149713b19a3" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", ] @@ -10345,7 +10346,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.29", + "quote 1.0.28", "syn 1.0.109", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index 63014558b28f4..40f40f0b54fe7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,7 +114,12 @@ members = [ "vdev", ] +[workspace.dependencies] +vrl = { version = "0.5.0", features = ["cli", "test", "test_framework", "arbitrary"] } + [dependencies] +vrl.workspace = true + # Internal libs codecs = { path = "lib/codecs", default-features = false } dnsmsg-parser = { path = "lib/dnsmsg-parser", optional = true } @@ -227,9 +232,6 @@ tui = { version = "0.19.0", optional = true, default-features = false, features hex = { version = "0.4.3", default-features = false, optional = true } sha2 = { version = "0.10.7", default-features = false, optional = true } -# VRL Lang -vrl = { package = "vrl", version = "0.4.0", features = ["cli", "test"] } - # External libs arc-swap = { version = "1.6", default-features = false, optional = true } async-compression = { version = "0.4.0", default-features = false, features = ["tokio", "gzip", "zstd"], optional = true } diff --git a/lib/codecs/Cargo.toml b/lib/codecs/Cargo.toml index 93aa9b6e2980f..85cd4cb6813a3 100644 --- a/lib/codecs/Cargo.toml +++ b/lib/codecs/Cargo.toml @@ -25,7 +25,7 @@ snafu = { version = "0.7.4", default-features = false, features = ["futures"] } syslog_loose = { version = "0.18", default-features = false, optional = true } tokio-util = { version = "0.7", default-features = false, features = ["codec"] } tracing = { version = "0.1", default-features = false } -vrl = { version = "0.4.0", default-features = false, features = ["value"] } +vrl.workspace = true vector-common = { path = "../vector-common", default-features = false } vector-config = { path = "../vector-config", default-features = false } vector-config-common = { path = "../vector-config-common", default-features = false } diff --git a/lib/enrichment/Cargo.toml b/lib/enrichment/Cargo.toml index 4dbc835c11b12..1a8e2467f0fed 100644 --- a/lib/enrichment/Cargo.toml +++ b/lib/enrichment/Cargo.toml @@ -9,7 +9,4 @@ publish = false arc-swap = { version = "1.6.0", default-features = false } chrono = { version = "0.4.19", default-features = false } dyn-clone = { version = "1.0.11", default-features = false } -vrl = { version = "0.4.0", default-features = false, features = [ - "compiler", - "diagnostic", -] } +vrl.workspace = true diff --git a/lib/opentelemetry-proto/Cargo.toml b/lib/opentelemetry-proto/Cargo.toml index 23c91e4c26282..c2e3dbfac011a 100644 --- a/lib/opentelemetry-proto/Cargo.toml +++ b/lib/opentelemetry-proto/Cargo.toml @@ -17,5 +17,5 @@ lookup = { package = "vector-lookup", path = "../vector-lookup", default-feature ordered-float = { version = "3.7.0", default-features = false } prost = { version = "0.11", default-features = false, features = ["std"] } tonic = { version = "0.9", default-features = false, features = ["codegen", "gzip", "prost", "tls", "tls-roots", "transport"] } -vrl = { version = "0.4.0", default-features = false, features = ["value"] } +vrl.workspace = true vector-core = { path = "../vector-core", default-features = false } diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 31c6c40a2ac5e..7d578ecb4e051 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -62,7 +62,7 @@ snafu = { version = "0.7", optional = true } stream-cancel = { version = "0.8.1", default-features = false } tokio = { version = "1.29.0", default-features = false, features = ["macros", "time"] } tracing = { version = "0.1.34", default-features = false } -vrl = { version = "0.4.0", default-features = false, features = ["value", "core", "compiler"] } +vrl.workspace = true vector-config = { path = "../vector-config" } vector-config-common = { path = "../vector-config-common" } vector-config-macros = { path = "../vector-config-macros" } diff --git a/lib/vector-config/Cargo.toml b/lib/vector-config/Cargo.toml index d843f0edb0a17..e1acd6593233c 100644 --- a/lib/vector-config/Cargo.toml +++ b/lib/vector-config/Cargo.toml @@ -26,7 +26,7 @@ snafu = { version = "0.7.4", default-features = false } toml = { version = "0.7.5", default-features = false } tracing = { version = "0.1.34", default-features = false } url = { version = "2.4.0", default-features = false, features = ["serde"] } -vrl = { version = "0.4.0", default-features = false, features = ["compiler"] } +vrl.workspace = true vector-config-common = { path = "../vector-config-common" } vector-config-macros = { path = "../vector-config-macros" } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index 7e2b0ff88c137..a281ac35dd87e 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -65,7 +65,7 @@ vector-common = { path = "../vector-common" } vector-config = { path = "../vector-config" } vector-config-common = { path = "../vector-config-common" } vector-config-macros = { path = "../vector-config-macros" } -vrl = { version = "0.4.0" } +vrl.workspace = true [target.'cfg(target_os = "macos")'.dependencies] security-framework = "2.9.1" @@ -94,7 +94,7 @@ rand = "0.8.5" rand_distr = "0.4.3" tracing-subscriber = { version = "0.3.17", default-features = false, features = ["env-filter", "fmt", "ansi", "registry"] } vector-common = { path = "../vector-common", default-features = false, features = ["test"] } -vrl = { version = "0.4.0", default-features = false, features = ["value", "arbitrary", "lua", "test"] } +vrl.workspace = true [features] api = ["dep:async-graphql"] diff --git a/lib/vector-lookup/Cargo.toml b/lib/vector-lookup/Cargo.toml index b1ca558a543cf..a159f31561d0f 100644 --- a/lib/vector-lookup/Cargo.toml +++ b/lib/vector-lookup/Cargo.toml @@ -10,4 +10,4 @@ license = "MPL-2.0" serde = { version = "1.0.164", default-features = false, features = ["derive", "alloc"] } vector-config = { path = "../vector-config" } vector-config-macros = { path = "../vector-config-macros" } -vrl = { version = "0.4.0", default-features = false, features = ["path"] } +vrl.workspace = true diff --git a/lib/vector-vrl/cli/Cargo.toml b/lib/vector-vrl/cli/Cargo.toml index dd2c451de79c3..106e430355c56 100644 --- a/lib/vector-vrl/cli/Cargo.toml +++ b/lib/vector-vrl/cli/Cargo.toml @@ -9,4 +9,4 @@ license = "MPL-2.0" [dependencies] clap = { version = "4.1.14", features = ["derive"] } vector-vrl-functions = { path = "../functions" } -vrl = { version = "0.4.0", default-features = false, features = ["stdlib", "cli"] } +vrl.workspace = true diff --git a/lib/vector-vrl/functions/Cargo.toml b/lib/vector-vrl/functions/Cargo.toml index 787637bad848b..432cac075cdb5 100644 --- a/lib/vector-vrl/functions/Cargo.toml +++ b/lib/vector-vrl/functions/Cargo.toml @@ -7,4 +7,4 @@ publish = false license = "MPL-2.0" [dependencies] -vrl = { version = "0.4.0", default-features = false, features = ["compiler", "path", "diagnostic"] } +vrl.workspace = true diff --git a/lib/vector-vrl/tests/Cargo.toml b/lib/vector-vrl/tests/Cargo.toml index 2ff39275589b0..b67b8afa63a4a 100644 --- a/lib/vector-vrl/tests/Cargo.toml +++ b/lib/vector-vrl/tests/Cargo.toml @@ -7,7 +7,7 @@ publish = false [dependencies] enrichment = { path = "../../enrichment" } -vrl = { version = "0.4.0", features = ["test_framework"]} +vrl.workspace = true vector-vrl-functions = { path = "../../vector-vrl/functions" } ansi_term = "0.12" diff --git a/lib/vector-vrl/web-playground/Cargo.toml b/lib/vector-vrl/web-playground/Cargo.toml index 72a9118937ba6..b951a29661226 100644 --- a/lib/vector-vrl/web-playground/Cargo.toml +++ b/lib/vector-vrl/web-playground/Cargo.toml @@ -10,7 +10,7 @@ crate-type = ["cdylib"] [dependencies] wasm-bindgen = "0.2" -vrl = { version = "0.4.0", default-features = false, features = ["value", "stdlib"] } +vrl.workspace = true serde = { version = "1.0", features = ["derive"] } serde-wasm-bindgen = "0.5" gloo-utils = { version = "0.1", features = ["serde"] } diff --git a/src/transforms/remap.rs b/src/transforms/remap.rs index 0cbd2af1c119e..381843b9101f3 100644 --- a/src/transforms/remap.rs +++ b/src/transforms/remap.rs @@ -240,7 +240,7 @@ impl TransformConfig for RemapConfig { .compile_vrl_program(enrichment_tables, merged_definition) .map(|(program, _, _, external_context)| { ( - program.final_type_state(), + program.final_type_info().state, external_context .get_custom::() .cloned() diff --git a/website/cue/reference/remap/functions/format_timestamp.cue b/website/cue/reference/remap/functions/format_timestamp.cue index b13bbd40a74c3..0bc92a9836794 100644 --- a/website/cue/reference/remap/functions/format_timestamp.cue +++ b/website/cue/reference/remap/functions/format_timestamp.cue @@ -19,6 +19,12 @@ remap: functions: format_timestamp: { required: true type: ["string"] }, + { + name: "timezone" + description: "The timezone to use when formatting the timestamp. The uses the TZ identifier, or 'local'" + required: false + type: ["string"] + }, ] internal_failure_reasons: [] return: types: ["string"] diff --git a/website/cue/reference/remap/functions/from_unix_timestamp.cue b/website/cue/reference/remap/functions/from_unix_timestamp.cue new file mode 100644 index 0000000000000..45323b1811948 --- /dev/null +++ b/website/cue/reference/remap/functions/from_unix_timestamp.cue @@ -0,0 +1,58 @@ +package metadata + +remap: functions: from_unix_timestamp: { + category: "Convert" + description: """ + Converts the `value` integer from a [Unix timestamp](\(urls.unix_timestamp)) to a VRL `timestamp`. + + Converts from the number of seconds since the Unix epoch by default, but milliseconds or nanoseconds can also be + specified by `unit`. + """ + + arguments: [ + { + name: "value" + description: "The Unix timestamp to convert." + required: true + type: ["integer"] + }, + { + name: "unit" + description: "The time unit." + type: ["string"] + required: false + enum: { + seconds: "Express Unix time in seconds" + milliseconds: "Express Unix time in milliseconds" + nanoseconds: "Express Unix time in nanoseconds" + } + default: "seconds" + }, + ] + internal_failure_reasons: [] + return: types: ["timestamp"] + + examples: [ + { + title: "Convert from a Unix timestamp (seconds)" + source: #""" + from_unix_timestamp!(5) + """# + return: "1970-01-01T00:00:05Z" + }, + { + title: "Convert from a Unix timestamp (milliseconds)" + source: #""" + from_unix_timestamp!(5000, unit: "milliseconds") + """# + return: "1970-01-01T00:00:05Z" + }, + { + title: "Convert from a Unix timestamp (nanoseconds)" + source: #""" + from_unix_timestamp!(5000, unit: "nanoseconds") + """# + return: "1970-01-01T00:00:00.000005Z" + }, + ] +} diff --git a/website/cue/reference/remap/functions/parse_nginx_log.cue b/website/cue/reference/remap/functions/parse_nginx_log.cue index b33397e1ba416..99f48be898b4d 100644 --- a/website/cue/reference/remap/functions/parse_nginx_log.cue +++ b/website/cue/reference/remap/functions/parse_nginx_log.cue @@ -35,8 +35,9 @@ remap: functions: parse_nginx_log: { description: "The format to use for parsing the log." required: true enum: { - "combined": "Nginx combined format" - "error": "Default Nginx error format" + "combined": "Nginx combined format" + "error": "Default Nginx error format" + "ingress_upstreaminfo": "Provides detailed upstream information (Nginx Ingress Controller)" } type: ["string"] }, From 3f6df61c4d90ed9d587c2935d188b5ada2f9ff02 Mon Sep 17 00:00:00 2001 From: Toby Lawrence Date: Fri, 30 Jun 2023 08:58:33 -0400 Subject: [PATCH 219/236] chore(datadog_metrics sink): incrementally encode sketches (#17764) ## Context When support was added for encoding/sending sketches in #9178, logic was added to handle "splitting" payloads if a metric exceeded the (un)compressed payload limits. As we lacked (at the time) the ability to encode sketch metrics one-by-one, we were forced to collect all of them, and then attempt to encode them all at once, which had a tendency to grow the response size past the (un)compressed payload limits. This "splitting" mechanism allowed us to compensate for that. However, in order to avoid getting stuck in pathological loops where payloads were too big, and thus required multiple splits (after already attempting at least one split), the logic was configured such that a batch of metrics would only be split once, and if the two subsequent slices couldn't be encoded without also exceeding the limits, they would be dropped and we would give up trying to split further. Despite the gut feeling during that work that it should be exceedingly rare to ever need to split further, real life has shown otherwise: #13175 ## Solution This PR introduces proper incremental encoding of sketches, which doesn't eliminate the possibility of needing to split (more below) but significantly reduces the likelihood that splitting will need to happen down to a purely theoretical level. We're taking advantage of hidden-from-docs methods in `prost` to encode each `SketchPayload` object and append the bytes into a single buffer. This is possible due to how Protocol Buffers functions. Additionally, we're now generating "file descriptors" for our compiled Protocol Buffers definitions. We use this to let us programmatically query the field number of the "sketches" field in the `SketchPayload` message, which is a slightly more robust way than just hardcoding it and hoping it doesn't ever change in the future. In Protocol Buffers, each field in a message is written out such that the field data is preceded by the field number. This is part and parcel to its ability to allow for backwards compatible changes to a definition. Further, for repeated fields -- i.e. `Vec` -- the repetitive nature is determined simply by write the same field multiple times rather than needing to write everything all together. Practically speaking, this means that we can encode a vector of two messages, or encode those two messages individually, and end up with the same encoded output of `[field N][field data][field N][field data]`. ### Ancillary changes We've additionally fixed a bug with the "bytes sent" metric being reported for the `datadog_metrics` sink due to some very tangled and miswired code around how compressed/uncompressed/event bytes/etc sizes were being shuttled from the request builder logic down to `Driver`. We've also reworked some of the encoder error types just to clean them up and simplify things a bit. ## Reviewer notes ### Still needing to handle splits The encoder still does need to care about splits, in a theoretical sense, because while we can accurately track and avoid ever exceeding the uncompressed payload limit, we can't know the final compressed payload size until we finalize the builder/payload. Currently, the encoder does a check to see if adding the current metric would cause us to exceed the compressed payload limit, assuming the compressor couldn't actually compress the encoded metric at all. This is a fairly robust check since it tries to optimally account for the overhead of an entirely incompressible payload, and so on... but we really want to avoid dropping events if possible, obviously, and that's why the splitting code is still in place. --- Cargo.lock | 12 + Cargo.toml | 5 +- LICENSE-3rdparty.csv | 1 + build.rs | 22 +- src/internal_events/datadog_metrics.rs | 14 +- src/proto.rs | 5 - src/proto/mod.rs | 19 + src/sinks/datadog/metrics/config.rs | 5 + src/sinks/datadog/metrics/encoder.rs | 651 ++++++++++++------- src/sinks/datadog/metrics/request_builder.rs | 143 ++-- src/sinks/datadog/metrics/service.rs | 16 +- src/sinks/datadog/metrics/sink.rs | 4 +- src/sinks/util/metadata.rs | 7 +- 13 files changed, 545 insertions(+), 359 deletions(-) delete mode 100644 src/proto.rs create mode 100644 src/proto/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 3364f48bce98e..c4c0e2d0b0ac1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6370,6 +6370,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "prost-reflect" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "000e1e05ebf7b26e1eba298e66fe4eee6eb19c567d0ffb35e0dd34231cdac4c8" +dependencies = [ + "once_cell", + "prost", + "prost-types", +] + [[package]] name = "prost-types" version = "0.11.9" @@ -9303,6 +9314,7 @@ dependencies = [ "proptest", "prost", "prost-build", + "prost-reflect", "prost-types", "pulsar", "quickcheck", diff --git a/Cargo.toml b/Cargo.toml index 40f40f0b54fe7..fe2d6de131203 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -205,8 +205,9 @@ serde_yaml = { version = "0.9.22", default-features = false } rmp-serde = { version = "1.1.1", default-features = false, optional = true } rmpv = { version = "1.0.0", default-features = false, features = ["with-serde"], optional = true } -# Prost +# Prost / Protocol Buffers prost = { version = "0.11", default-features = false, features = ["std"] } +prost-reflect = { version = "0.11", default-features = false, optional = true } prost-types = { version = "0.11", default-features = false, optional = true } # GCP @@ -673,7 +674,7 @@ sinks-console = [] sinks-databend = [] sinks-datadog_events = [] sinks-datadog_logs = [] -sinks-datadog_metrics = ["protobuf-build"] +sinks-datadog_metrics = ["protobuf-build", "dep:prost-reflect"] sinks-datadog_traces = ["protobuf-build", "dep:rmpv", "dep:rmp-serde", "dep:serde_bytes"] sinks-elasticsearch = ["aws-core", "transforms-metric_to_log"] sinks-file = ["dep:async-compression"] diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index 418c470b2563a..fd759a75ab82a 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -398,6 +398,7 @@ proc-macro2,https://github.com/dtolnay/proc-macro2,MIT OR Apache-2.0,"David Toln proptest,https://github.com/proptest-rs/proptest,MIT OR Apache-2.0,Jason Lingle prost,https://github.com/tokio-rs/prost,Apache-2.0,"Dan Burkert , Lucio Franco " prost-derive,https://github.com/tokio-rs/prost,Apache-2.0,"Dan Burkert , Lucio Franco , Tokio Contributors " +prost-reflect,https://github.com/andrewhickman/prost-reflect,MIT OR Apache-2.0,Andrew Hickman ptr_meta,https://github.com/djkoloski/ptr_meta,MIT,David Koloski pulsar,https://github.com/streamnative/pulsar-rs,MIT OR Apache-2.0,"Colin Stearns , Kevin Stenerson , Geoffroy Couprie " quad-rand,https://github.com/not-fl3/quad-rand,MIT,not-fl3 diff --git a/build.rs b/build.rs index 20cfd23d52a88..31c151f4b61d7 100644 --- a/build.rs +++ b/build.rs @@ -1,4 +1,11 @@ -use std::{collections::HashSet, env, fs::File, io::Write, path::Path, process::Command}; +use std::{ + collections::HashSet, + env, + fs::File, + io::Write, + path::{Path, PathBuf}, + process::Command, +}; struct TrackedEnv { tracked: HashSet, @@ -124,8 +131,19 @@ fn main() { println!("cargo:rerun-if-changed=proto/google/rpc/status.proto"); println!("cargo:rerun-if-changed=proto/vector.proto"); + // Create and store the "file descriptor set" from the compiled Protocol Buffers packages. + // + // This allows us to use runtime reflection to manually build Protocol Buffers payloads + // in a type-safe way, which is necessary for incrementally building certain payloads, like + // the ones generated in the `datadog_metrics` sink. + let protobuf_fds_path = + PathBuf::from(std::env::var("OUT_DIR").expect("OUT_DIR environment variable not set")) + .join("protobuf-fds.bin"); + let mut prost_build = prost_build::Config::new(); - prost_build.btree_map(["."]); + prost_build + .btree_map(["."]) + .file_descriptor_set_path(protobuf_fds_path); tonic_build::configure() .protoc_arg("--experimental_allow_proto3_optional") diff --git a/src/internal_events/datadog_metrics.rs b/src/internal_events/datadog_metrics.rs index 792d8496f041d..c4daf1d3ce7f8 100644 --- a/src/internal_events/datadog_metrics.rs +++ b/src/internal_events/datadog_metrics.rs @@ -7,19 +7,17 @@ use vector_common::internal_event::{ }; #[derive(Debug)] -pub struct DatadogMetricsEncodingError { - pub error_message: &'static str, +pub struct DatadogMetricsEncodingError<'a> { + pub reason: &'a str, pub error_code: &'static str, pub dropped_events: usize, } -impl InternalEvent for DatadogMetricsEncodingError { +impl<'a> InternalEvent for DatadogMetricsEncodingError<'a> { fn emit(self) { - let reason = "Failed to encode Datadog metrics."; error!( - message = reason, - error = %self.error_message, - error_code = %self.error_code, + message = self.reason, + error_code = self.error_code, error_type = error_type::ENCODER_FAILED, intentional = "false", stage = error_stage::PROCESSING, @@ -35,7 +33,7 @@ impl InternalEvent for DatadogMetricsEncodingError { if self.dropped_events > 0 { emit!(ComponentEventsDropped:: { count: self.dropped_events, - reason, + reason: self.reason, }); } } diff --git a/src/proto.rs b/src/proto.rs deleted file mode 100644 index b77e94c30f793..0000000000000 --- a/src/proto.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[cfg(any(feature = "sources-vector", feature = "sinks-vector"))] -use crate::event::proto as event; - -#[cfg(any(feature = "sources-vector", feature = "sinks-vector"))] -pub mod vector; diff --git a/src/proto/mod.rs b/src/proto/mod.rs new file mode 100644 index 0000000000000..efa1728fb6988 --- /dev/null +++ b/src/proto/mod.rs @@ -0,0 +1,19 @@ +#[cfg(any(feature = "sources-vector", feature = "sinks-vector"))] +use crate::event::proto as event; + +#[cfg(any(feature = "sources-vector", feature = "sinks-vector"))] +pub mod vector; + +#[cfg(feature = "sinks-datadog_metrics")] +pub mod fds { + use once_cell::sync::OnceCell; + use prost_reflect::DescriptorPool; + + pub fn protobuf_descriptors() -> &'static DescriptorPool { + static PROTOBUF_FDS: OnceCell = OnceCell::new(); + PROTOBUF_FDS.get_or_init(|| { + DescriptorPool::decode(include_bytes!(concat!(env!("OUT_DIR"), "/protobuf-fds.bin")).as_ref()) + .expect("should not fail to decode protobuf file descriptor set generated from build script") + }) + } +} diff --git a/src/sinks/datadog/metrics/config.rs b/src/sinks/datadog/metrics/config.rs index 1acedc003c079..9fb8c4cd48137 100644 --- a/src/sinks/datadog/metrics/config.rs +++ b/src/sinks/datadog/metrics/config.rs @@ -59,6 +59,11 @@ impl DatadogMetricsEndpoint { DatadogMetricsEndpoint::Sketches => "application/x-protobuf", } } + + // Gets whether or not this is a series endpoint. + pub const fn is_series(self) -> bool { + matches!(self, Self::Series) + } } /// Maps Datadog metric endpoints to their actual URI. diff --git a/src/sinks/datadog/metrics/encoder.rs b/src/sinks/datadog/metrics/encoder.rs index a2bd8330c5f35..0dd6c393e31b5 100644 --- a/src/sinks/datadog/metrics/encoder.rs +++ b/src/sinks/datadog/metrics/encoder.rs @@ -7,11 +7,13 @@ use std::{ use bytes::{BufMut, Bytes}; use chrono::{DateTime, Utc}; +use once_cell::sync::OnceCell; use prost::Message; use snafu::{ResultExt, Snafu}; use vector_core::{ config::{log_schema, LogSchema}, event::{metric::MetricSketch, Metric, MetricTags, MetricValue}, + metrics::AgentDDSketch, }; use super::config::{ @@ -19,7 +21,8 @@ use super::config::{ }; use crate::{ common::datadog::{DatadogMetricType, DatadogPoint, DatadogSeriesMetric}, - sinks::util::{encode_namespace, Compression, Compressor}, + proto::fds::protobuf_descriptors, + sinks::util::{encode_namespace, request_builder::EncodeResult, Compression, Compressor}, }; const SERIES_PAYLOAD_HEADER: &[u8] = b"{\"series\":["; @@ -37,6 +40,17 @@ pub enum CreateError { InvalidLimits, } +impl CreateError { + /// Gets the telemetry-friendly string version of this error. + /// + /// The value will be a short string with only lowercase letters and underscores. + pub const fn as_error_type(&self) -> &'static str { + match self { + Self::InvalidLimits => "invalid_payload_limits", + } + } +} + #[derive(Debug, Snafu)] pub enum EncoderError { #[snafu(display( @@ -49,11 +63,31 @@ pub enum EncoderError { metric_value: &'static str, }, - #[snafu(display("Failed to encode series metrics to JSON: {}", source))] + #[snafu( + context(false), + display("Failed to encode series metric to JSON: {source}") + )] JsonEncodingFailed { source: serde_json::Error }, - #[snafu(display("Failed to encode sketch metrics to Protocol Buffers: {}", source))] - ProtoEncodingFailed { source: prost::EncodeError }, + // Currently, the only time `prost` ever emits `EncodeError` is when there is insufficient + // buffer capacity, so we don't need to hold on to the error, and we can just hardcode this. + #[snafu(display( + "Failed to encode sketch metric to Protocol Buffers: insufficient buffer capacity." + ))] + ProtoEncodingFailed, +} + +impl EncoderError { + /// Gets the telemetry-friendly string version of this error. + /// + /// The value will be a short string with only lowercase letters and underscores. + pub const fn as_error_type(&self) -> &'static str { + match self { + Self::InvalidMetric { .. } => "invalid_metric", + Self::JsonEncodingFailed { .. } => "failed_to_encode_series", + Self::ProtoEncodingFailed => "failed_to_encode_sketch", + } + } } #[derive(Debug, Snafu)] @@ -64,9 +98,6 @@ pub enum FinishError { ))] CompressionFailed { source: io::Error }, - #[snafu(display("Failed to encode pending metrics: {}", source))] - PendingEncodeFailed { source: EncoderError }, - #[snafu(display("Finished payload exceeded the (un)compressed size limits"))] TooLarge { metrics: Vec, @@ -81,7 +112,6 @@ impl FinishError { pub const fn as_error_type(&self) -> &'static str { match self { Self::CompressionFailed { .. } => "compression_failed", - Self::PendingEncodeFailed { .. } => "pending_encode_failed", Self::TooLarge { .. } => "too_large", } } @@ -91,21 +121,15 @@ struct EncoderState { writer: Compressor, written: usize, buf: Vec, - - pending: Vec, processed: Vec, } impl Default for EncoderState { fn default() -> Self { - EncoderState { - // We use the "zlib default" compressor because it's all Datadog supports, and adding it - // generically to `Compression` would make things a little weird because of the - // conversion trait implementations that are also only none vs gzip. + Self { writer: get_compressor(), written: 0, buf: Vec::with_capacity(1024), - pending: Vec::new(), processed: Vec::new(), } } @@ -145,7 +169,7 @@ impl DatadogMetricsEncoder { compressed_limit: usize, ) -> Result { let (uncompressed_limit, compressed_limit) = - validate_payload_size_limits(uncompressed_limit, compressed_limit) + validate_payload_size_limits(endpoint, uncompressed_limit, compressed_limit) .ok_or(CreateError::InvalidLimits)?; Ok(Self { @@ -195,15 +219,23 @@ impl DatadogMetricsEncoder { { return Ok(Some(metric)); } - serde_json::to_writer(&mut self.state.buf, series) - .context(JsonEncodingFailedSnafu)?; + serde_json::to_writer(&mut self.state.buf, series)?; } } - // We can't encode sketches incrementally (yet), so we don't do any encoding here. We - // simply store it for later, and in `try_encode_pending`, any such pending metrics will be - // encoded in a single operation. + // Sketches are encoded via ProtoBuf, also in an incremental fashion. DatadogMetricsEndpoint::Sketches => match metric.value() { - MetricValue::Sketch { .. } => {} + MetricValue::Sketch { sketch } => match sketch { + MetricSketch::AgentDDSketch(ddsketch) => { + encode_sketch_incremental( + &metric, + ddsketch, + &self.default_namespace, + self.log_schema, + &mut self.state.buf, + ) + .map_err(|_| EncoderError::ProtoEncodingFailed)?; + } + }, value => { return Err(EncoderError::InvalidMetric { expected: "sketches", @@ -213,21 +245,14 @@ impl DatadogMetricsEncoder { }, } - // If we actually encoded a metric, we try to see if our temporary buffer can be compressed - // and added to the overall payload. Otherwise, it means we're deferring the metric for - // later encoding, so we store it off to the side. - if !self.state.buf.is_empty() { - match self.try_compress_buffer() { - Err(_) | Ok(false) => return Ok(Some(metric)), - Ok(true) => {} + // Try and see if our temporary buffer can be written to the compressor. + match self.try_compress_buffer() { + Err(_) | Ok(false) => Ok(Some(metric)), + Ok(true) => { + self.state.processed.push(metric); + Ok(None) } - - self.state.processed.push(metric); - } else { - self.state.pending.push(metric); } - - Ok(None) } fn try_compress_buffer(&mut self) -> io::Result { @@ -254,7 +279,8 @@ impl DatadogMetricsEncoder { // assume the worst case while our limits assume the worst case _overhead_. Maybe our // numbers are technically off in the end, but `finish` catches that for us, too. let compressed_len = self.state.writer.get_ref().len(); - if compressed_len + n > self.compressed_limit { + let max_compressed_metric_len = n + max_compressed_overhead_len(n); + if compressed_len + max_compressed_metric_len > self.compressed_limit { return Ok(false); } @@ -292,56 +318,7 @@ impl DatadogMetricsEncoder { self.encode_single_metric(metric) } - fn try_encode_pending(&mut self) -> Result<(), FinishError> { - // The Datadog Agent uses a particular Protocol Buffers library to incrementally encode the - // DDSketch structures into a payload, similar to how we incrementally encode the series - // metrics. Unfortunately, there's no existing Rust crate that allows writing out Protocol - // Buffers payloads by hand, so we have to cheat a little and buffer up the metrics until - // the very end. - // - // `try_encode`, and thus `encode_single_metric`, specifically store sketch-oriented metrics - // off to the side for this very purpose, letting us gather them all here, encoding them - // into a single Protocol Buffers payload. - // - // Naturally, this means we might actually generate a payload that's too big. This is a - // problem for the caller to figure out. Presently, the only usage of this encoder will - // naively attempt to split the batch into two and try again. - - // Only go through this if we're targeting the sketch endpoint. - if !(matches!(self.endpoint, DatadogMetricsEndpoint::Sketches)) { - return Ok(()); - } - - // Consume of all of the "pending" metrics and try to write them out as sketches. - let pending = mem::take(&mut self.state.pending); - write_sketches( - &pending, - &self.default_namespace, - self.log_schema, - &mut self.state.buf, - ) - .context(PendingEncodeFailedSnafu)?; - - if self.try_compress_buffer().context(CompressionFailedSnafu)? { - // Since we encoded and compressed them successfully, add them to the "processed" list. - self.state.processed.extend(pending); - Ok(()) - } else { - // The payload was too big overall, which we can't do anything about. Up to the caller - // now to try to encode them again after splitting the batch. - Err(FinishError::TooLarge { - metrics: pending, - // TODO: Hard-coded split code for now because we need to hoist up the logic for - // calculating the recommended splits to an instance method or something. - recommended_splits: 2, - }) - } - } - - pub fn finish(&mut self) -> Result<(Bytes, Vec, usize), FinishError> { - // Try to encode any pending metrics we had stored up. - self.try_encode_pending()?; - + pub fn finish(&mut self) -> Result<(EncodeResult, Vec), FinishError> { // Write any payload footer necessary for the configured endpoint. let n = write_payload_footer(self.endpoint, &mut self.state.writer) .context(CompressionFailedSnafu)?; @@ -371,7 +348,10 @@ impl DatadogMetricsEncoder { if recommended_splits == 1 { // "One" split means no splits needed: our payload didn't exceed either of the limits. - Ok((payload, processed, raw_bytes_written)) + Ok(( + EncodeResult::compressed(payload, raw_bytes_written), + processed, + )) } else { Err(FinishError::TooLarge { metrics: processed, @@ -381,6 +361,104 @@ impl DatadogMetricsEncoder { } } +fn get_sketch_payload_sketches_field_number() -> u32 { + static SKETCH_PAYLOAD_SKETCHES_FIELD_NUM: OnceCell = OnceCell::new(); + *SKETCH_PAYLOAD_SKETCHES_FIELD_NUM.get_or_init(|| { + let descriptors = protobuf_descriptors(); + let descriptor = descriptors + .get_message_by_name("datadog.agentpayload.SketchPayload") + .expect("should not fail to find `SketchPayload` message in descriptor pool"); + + descriptor + .get_field_by_name("sketches") + .map(|field| field.number()) + .expect("`sketches` field must exist in `SketchPayload` message") + }) +} + +fn sketch_to_proto_message( + metric: &Metric, + ddsketch: &AgentDDSketch, + default_namespace: &Option>, + log_schema: &'static LogSchema, +) -> ddmetric_proto::sketch_payload::Sketch { + let name = get_namespaced_name(metric, default_namespace); + let ts = encode_timestamp(metric.timestamp()); + let mut tags = metric.tags().cloned().unwrap_or_default(); + let host = tags.remove(log_schema.host_key()).unwrap_or_default(); + let tags = encode_tags(&tags); + + let cnt = ddsketch.count() as i64; + let min = ddsketch + .min() + .expect("min should be present for non-empty sketch"); + let max = ddsketch + .max() + .expect("max should be present for non-empty sketch"); + let avg = ddsketch + .avg() + .expect("avg should be present for non-empty sketch"); + let sum = ddsketch + .sum() + .expect("sum should be present for non-empty sketch"); + + let (bins, counts) = ddsketch.bin_map().into_parts(); + let k = bins.into_iter().map(Into::into).collect(); + let n = counts.into_iter().map(Into::into).collect(); + + ddmetric_proto::sketch_payload::Sketch { + metric: name, + tags, + host, + distributions: Vec::new(), + dogsketches: vec![ddmetric_proto::sketch_payload::sketch::Dogsketch { + ts, + cnt, + min, + max, + avg, + sum, + k, + n, + }], + } +} + +fn encode_sketch_incremental( + metric: &Metric, + ddsketch: &AgentDDSketch, + default_namespace: &Option>, + log_schema: &'static LogSchema, + buf: &mut B, +) -> Result<(), prost::EncodeError> +where + B: BufMut, +{ + // This encodes a single sketch metric incrementally, which means that we specifically write it + // as if we were writing a single field entry in the overall `SketchPayload` message + // type. + // + // By doing so, we can encode multiple sketches and concatenate all the buffers, and have the + // resulting buffer appear as if it's a normal `SketchPayload` message with a bunch of repeats + // of the `sketches` field. + // + // Crucially, this code works because `SketchPayload` has two fields -- metadata and sketches -- + // and we never actually set the metadata field... so the resulting message generated overall + // for `SketchPayload` with a single sketch looks just like as if we literally wrote out a + // single value for the given field. + + let sketch_proto = sketch_to_proto_message(metric, ddsketch, default_namespace, log_schema); + + // Manually write the field tag for `sketches` and then encode the sketch payload directly as a + // length-delimited message. + prost::encoding::encode_key( + get_sketch_payload_sketches_field_number(), + prost::encoding::WireType::LengthDelimited, + buf, + ); + sketch_proto.encode_length_delimited(buf) +} + fn get_namespaced_name(metric: &Metric, default_namespace: &Option>) -> String { encode_namespace( metric @@ -481,89 +559,10 @@ fn generate_series_metrics( Ok(results) } -fn write_sketches( - metrics: &[Metric], - default_namespace: &Option>, - log_schema: &'static LogSchema, - buf: &mut B, -) -> Result<(), EncoderError> -where - B: BufMut, -{ - let mut sketches = Vec::new(); - for metric in metrics { - match metric.value() { - MetricValue::Sketch { sketch } => match sketch { - MetricSketch::AgentDDSketch(ddsketch) => { - // Don't encode any empty sketches. - if ddsketch.is_empty() { - continue; - } - - let name = get_namespaced_name(metric, default_namespace); - let ts = encode_timestamp(metric.timestamp()); - let mut tags = metric.tags().cloned().unwrap_or_default(); - let host = tags.remove(log_schema.host_key()).unwrap_or_default(); - let tags = encode_tags(&tags); - - let cnt = ddsketch.count() as i64; - let min = ddsketch - .min() - .expect("min should be present for non-empty sketch"); - let max = ddsketch - .max() - .expect("max should be present for non-empty sketch"); - let avg = ddsketch - .avg() - .expect("avg should be present for non-empty sketch"); - let sum = ddsketch - .sum() - .expect("sum should be present for non-empty sketch"); - - let (bins, counts) = ddsketch.bin_map().into_parts(); - let k = bins.into_iter().map(Into::into).collect(); - let n = counts.into_iter().map(Into::into).collect(); - - let sketch = ddmetric_proto::sketch_payload::Sketch { - metric: name, - tags, - host, - distributions: Vec::new(), - dogsketches: vec![ddmetric_proto::sketch_payload::sketch::Dogsketch { - ts, - cnt, - min, - max, - avg, - sum, - k, - n, - }], - }; - - sketches.push(sketch); - } - }, - // We filter out non-sketch metrics during `encode_single_metric` if we're targeting - // the sketches endpoint. - _ => unreachable!(), - } - } - - let sketch_payload = ddmetric_proto::SketchPayload { - // TODO: The "common metadata" fields are things that only very loosely apply to Vector, or - // are hard to characterize -- for example, what's the API key for a sketch that didn't originate - // from the Datadog Agent? -- so we're just omitting it here in the hopes it doesn't - // actually matter. - metadata: None, - sketches, - }; - - // Now try encoding this sketch payload, and then try to compress it. - sketch_payload.encode(buf).context(ProtoEncodingFailedSnafu) -} - fn get_compressor() -> Compressor { + // We use the "zlib default" compressor because it's all Datadog supports, and adding it + // generically to `Compression` would make things a little weird because of the conversion trait + // implementations that are also only none vs gzip. Compression::zlib_default().into() } @@ -571,39 +570,52 @@ const fn max_uncompressed_header_len() -> usize { SERIES_PAYLOAD_HEADER.len() + SERIES_PAYLOAD_FOOTER.len() } +// Datadog ingest APIs accept zlib, which is what we're accounting for here. By default, zlib +// has a 2 byte header and 4 byte CRC trailer. [1] +// +// [1] https://www.zlib.net/zlib_tech.html +const ZLIB_HEADER_TRAILER: usize = 6; + const fn max_compression_overhead_len(compressed_limit: usize) -> usize { - // Datadog ingest APIs accept zlib, which is what we're accounting for here. By default, zlib - // has a 2 byte header and 4 byte CRC trailer. Additionally, Deflate, the underlying - // compression algorithm, has a technique to ensure that input data can't be encoded in such a - // way where it's expanded by a meaningful amount. + // We calculate the overhead as the zlib header/trailer plus the worst case overhead of + // compressing `compressed_limit` bytes, such that we assume all of the data we write may not be + // compressed at all. + ZLIB_HEADER_TRAILER + max_compressed_overhead_len(compressed_limit) +} + +const fn max_compressed_overhead_len(len: usize) -> usize { + // Datadog ingest APIs accept zlib, which is what we're accounting for here. // - // This technique allows storing blocks of uncompressed data with only 5 bytes of overhead per - // block. Technically, the blocks can be up to 65KB in Deflate, but modern zlib implementations - // use block sizes of 16KB. [1][2] + // Deflate, the underlying compression algorithm, has a technique to ensure that input data + // can't be encoded in such a way where it's expanded by a meaningful amount. This technique + // allows storing blocks of uncompressed data with only 5 bytes of overhead per block. + // Technically, the blocks can be up to 65KB in Deflate, but modern zlib implementations use + // block sizes of 16KB. [1][2] // - // With all of that said, we calculate the overhead as the header plus trailer plus the given - // compressed size limit, minus the known overhead, multiplied such that it accounts for the - // worse case of entirely uncompressed data. + // We calculate the overhead of compressing a given `len` bytes as the worst case of that many + // bytes being written to the compressor and being unable to be compressed at all // // [1] https://www.zlib.net/zlib_tech.html // [2] https://www.bolet.org/~pornin/deflate-flush-fr.html - const HEADER_TRAILER: usize = 6; const STORED_BLOCK_SIZE: usize = 16384; - HEADER_TRAILER + (1 + compressed_limit.saturating_sub(HEADER_TRAILER) / STORED_BLOCK_SIZE) * 5 + (1 + len.saturating_sub(ZLIB_HEADER_TRAILER) / STORED_BLOCK_SIZE) * 5 } const fn validate_payload_size_limits( + endpoint: DatadogMetricsEndpoint, uncompressed_limit: usize, compressed_limit: usize, ) -> Option<(usize, usize)> { - // Get the maximum possible length of the header/footer combined. - // - // This only matters for series metrics at the moment, since sketches are encoded in a single - // shot to their Protocol Buffers representation. We're "wasting" `header_len` bytes in the - // case of sketches, but we're also talking about like 10 bytes: not enough to care about. - let header_len = max_uncompressed_header_len(); - if uncompressed_limit <= header_len { - return None; + if endpoint.is_series() { + // For series, we need to make sure the uncompressed limit can account for the header/footer + // we would add that wraps the encoded metrics up in the expected JSON object. This does + // imply that adding 1 to this limit would be allowed, and obviously we can't encode a + // series metric in a single byte, but this is just a simple sanity check, not an exhaustive + // search of the absolute bare minimum size. + let header_len = max_uncompressed_header_len(); + if uncompressed_limit <= header_len { + return None; + } } // Get the maximum possible overhead of the compression container, based on the incoming @@ -659,6 +671,7 @@ mod tests { use std::{ io::{self, copy}, num::NonZeroU32, + sync::Arc, }; use bytes::{BufMut, Bytes, BytesMut}; @@ -668,16 +681,21 @@ mod tests { arbitrary::any, collection::btree_map, num::f64::POSITIVE as ARB_POSITIVE_F64, prop_assert, proptest, strategy::Strategy, string::string_regex, }; + use prost::Message; use vector_core::{ - config::log_schema, - event::{metric::TagValue, Metric, MetricKind, MetricTags, MetricValue}, + config::{log_schema, LogSchema}, + event::{ + metric::{MetricSketch, TagValue}, + Metric, MetricKind, MetricTags, MetricValue, + }, metric_tags, metrics::AgentDDSketch, }; use super::{ - encode_tags, encode_timestamp, generate_series_metrics, get_compressor, - max_compression_overhead_len, max_uncompressed_header_len, validate_payload_size_limits, + ddmetric_proto, encode_sketch_incremental, encode_tags, encode_timestamp, + generate_series_metrics, get_compressor, max_compression_overhead_len, + max_uncompressed_header_len, sketch_to_proto_message, validate_payload_size_limits, write_payload_footer, write_payload_header, DatadogMetricsEncoder, EncoderError, }; use crate::{ @@ -714,6 +732,10 @@ mod tests { compressor.finish().expect("should not fail").freeze() } + fn get_compressed_empty_sketches_payload() -> Bytes { + get_compressor().finish().expect("should not fail").freeze() + } + fn decompress_payload(payload: Bytes) -> io::Result { let mut decompressor = ZlibDecoder::new(&payload[..]); let mut decompressed = BytesMut::new().writer(); @@ -738,6 +760,41 @@ mod tests { } } + fn encode_sketches_normal( + metrics: &[Metric], + default_namespace: &Option>, + log_schema: &'static LogSchema, + buf: &mut B, + ) where + B: BufMut, + { + let mut sketches = Vec::new(); + for metric in metrics { + let MetricValue::Sketch { sketch } = metric.value() else { panic!("must be sketch") }; + match sketch { + MetricSketch::AgentDDSketch(ddsketch) => { + // Don't encode any empty sketches. + if ddsketch.is_empty() { + continue; + } + + let sketch = + sketch_to_proto_message(metric, ddsketch, default_namespace, log_schema); + + sketches.push(sketch); + } + } + } + + let sketch_payload = ddmetric_proto::SketchPayload { + metadata: None, + sketches, + }; + + // Now try encoding this sketch payload, and then try to compress it. + sketch_payload.encode(buf).unwrap() + } + #[test] fn test_encode_tags() { assert_eq!( @@ -825,16 +882,9 @@ mod tests { let result = encoder.finish(); assert!(result.is_ok()); - let (payload, mut processed, raw_bytes) = result.unwrap(); + let (_payload, mut processed) = result.unwrap(); assert_eq!(processed.len(), 1); assert_eq!(expected, processed.pop().unwrap()); - assert_eq!(100, payload.len()); - - // The payload is: - // {"series":[{"metric":"basic_counter","type":"count","interval":null,"points":[[1651664333,3.14]],"tags":[]}]} - // which comes to a total of 98 bytes. - // There are extra bytes that make up the header and footer. These should not be included in the raw bytes. - assert_eq!(109, raw_bytes); } #[test] @@ -855,25 +905,60 @@ mod tests { let result = encoder.finish(); assert!(result.is_ok()); - let (payload, mut processed, raw_bytes) = result.unwrap(); + let (_payload, mut processed) = result.unwrap(); assert_eq!(processed.len(), 1); assert_eq!(expected, processed.pop().unwrap()); + } - assert_eq!(81, payload.len()); - assert_eq!(70, raw_bytes); + #[test] + fn encode_multiple_sketch_metrics_normal_vs_incremental() { + // This tests our incremental sketch encoding against the more straightforward approach of + // just building/encoding a full `SketchPayload` message. + let metrics = vec![ + get_simple_sketch(), + get_simple_sketch(), + get_simple_sketch(), + ]; + + let mut normal_buf = Vec::new(); + encode_sketches_normal(&metrics, &None, log_schema(), &mut normal_buf); + + let mut incremental_buf = Vec::new(); + for metric in &metrics { + match metric.value() { + MetricValue::Sketch { sketch } => match sketch { + MetricSketch::AgentDDSketch(ddsketch) => encode_sketch_incremental( + metric, + ddsketch, + &None, + log_schema(), + &mut incremental_buf, + ) + .unwrap(), + }, + _ => panic!("should be a sketch"), + } + } + + assert_eq!(normal_buf, incremental_buf); } #[test] - fn payload_size_limits() { + fn payload_size_limits_series() { // Get the maximum length of the header/trailer data. let header_len = max_uncompressed_header_len(); // This is too small. - let result = validate_payload_size_limits(header_len, usize::MAX); + let result = + validate_payload_size_limits(DatadogMetricsEndpoint::Series, header_len, usize::MAX); assert_eq!(result, None); // This is just right. - let result = validate_payload_size_limits(header_len + 1, usize::MAX); + let result = validate_payload_size_limits( + DatadogMetricsEndpoint::Series, + header_len + 1, + usize::MAX, + ); assert_eq!(result, Some((header_len + 1, usize::MAX))); // Get the maximum compressed overhead length, based on our input uncompressed size. This @@ -882,16 +967,52 @@ mod tests { let compression_overhead_len = max_compression_overhead_len(usize::MAX); // This is too small. - let result = validate_payload_size_limits(usize::MAX, compression_overhead_len); + let result = validate_payload_size_limits( + DatadogMetricsEndpoint::Series, + usize::MAX, + compression_overhead_len, + ); + assert_eq!(result, None); + + // This is just right. + let result = validate_payload_size_limits( + DatadogMetricsEndpoint::Series, + usize::MAX, + compression_overhead_len + 1, + ); + assert_eq!(result, Some((usize::MAX, compression_overhead_len + 1))); + } + + #[test] + fn payload_size_limits_sketches() { + // There's no lower bound on uncompressed size for the sketches payload. + let result = validate_payload_size_limits(DatadogMetricsEndpoint::Sketches, 0, usize::MAX); + assert_eq!(result, Some((0, usize::MAX))); + + // Get the maximum compressed overhead length, based on our input uncompressed size. This + // represents the worst case overhead based on the input data (of length usize::MAX, in this + // case) being entirely incompressible. + let compression_overhead_len = max_compression_overhead_len(usize::MAX); + + // This is too small. + let result = validate_payload_size_limits( + DatadogMetricsEndpoint::Sketches, + usize::MAX, + compression_overhead_len, + ); assert_eq!(result, None); // This is just right. - let result = validate_payload_size_limits(usize::MAX, compression_overhead_len + 1); + let result = validate_payload_size_limits( + DatadogMetricsEndpoint::Sketches, + usize::MAX, + compression_overhead_len + 1, + ); assert_eq!(result, Some((usize::MAX, compression_overhead_len + 1))); } #[test] - fn encode_breaks_out_when_limit_reached_uncompressed() { + fn encode_series_breaks_out_when_limit_reached_uncompressed() { // We manually create the encoder with an arbitrarily low "uncompressed" limit but high // "compressed" limit to exercise the codepath that should avoid encoding a metric when the // uncompressed payload would exceed the limit. @@ -905,7 +1026,8 @@ mod tests { .expect("payload size limits should be valid"); // Trying to encode a metric that would cause us to exceed our uncompressed limits will - // _not_ return an error from `try_encode`. + // _not_ return an error from `try_encode`, but instead will simply return back the metric + // as it could not be added. let counter = get_simple_counter(); let result = encoder.try_encode(counter.clone()); assert!(result.is_ok()); @@ -917,17 +1039,55 @@ mod tests { let result = encoder.finish(); assert!(result.is_ok()); - let (payload, processed, raw_bytes) = result.unwrap(); - let empty_payload = get_compressed_empty_series_payload(); - assert_eq!(payload, empty_payload); + let (payload, processed) = result.unwrap(); + assert_eq!( + payload.uncompressed_byte_size, + max_uncompressed_header_len() + ); + assert_eq!( + payload.into_payload(), + get_compressed_empty_series_payload() + ); assert_eq!(processed.len(), 0); + } + + #[test] + fn encode_sketches_breaks_out_when_limit_reached_uncompressed() { + // We manually create the encoder with an arbitrarily low "uncompressed" limit but high + // "compressed" limit to exercise the codepath that should avoid encoding a metric when the + // uncompressed payload would exceed the limit. + let mut encoder = DatadogMetricsEncoder::with_payload_limits( + DatadogMetricsEndpoint::Sketches, + None, + 1, + usize::MAX, + ) + .expect("payload size limits should be valid"); + + // Trying to encode a metric that would cause us to exceed our uncompressed limits will + // _not_ return an error from `try_encode`, but instead will simply return back the metric + // as it could not be added. + let sketch = get_simple_sketch(); + let result = encoder.try_encode(sketch.clone()); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), Some(sketch)); - // Just the header and footer. - assert_eq!(13, raw_bytes); + // And similarly, since we didn't actually encode a metric, we _should_ be able to finish + // this payload, but it will be empty and no processed metrics should be returned. + let result = encoder.finish(); + assert!(result.is_ok()); + + let (payload, processed) = result.unwrap(); + assert_eq!(payload.uncompressed_byte_size, 0); + assert_eq!( + payload.into_payload(), + get_compressed_empty_sketches_payload() + ); + assert_eq!(processed.len(), 0); } #[test] - fn encode_breaks_out_when_limit_reached_compressed() { + fn encode_series_breaks_out_when_limit_reached_compressed() { // We manually create the encoder with an arbitrarily low "compressed" limit but high // "uncompressed" limit to exercise the codepath that should avoid encoding a metric when the // compressed payload would exceed the limit. @@ -942,7 +1102,8 @@ mod tests { .expect("payload size limits should be valid"); // Trying to encode a metric that would cause us to exceed our compressed limits will - // _not_ return an error from `try_encode`. + // _not_ return an error from `try_encode`, but instead will simply return back the metric + // as it could not be added. let counter = get_simple_counter(); let result = encoder.try_encode(counter.clone()); assert!(result.is_ok()); @@ -954,13 +1115,54 @@ mod tests { let result = encoder.finish(); assert!(result.is_ok()); - let (payload, processed, raw_bytes) = result.unwrap(); - let empty_payload = get_compressed_empty_series_payload(); - assert_eq!(payload, empty_payload); + let (payload, processed) = result.unwrap(); + assert_eq!( + payload.uncompressed_byte_size, + max_uncompressed_header_len() + ); + assert_eq!( + payload.into_payload(), + get_compressed_empty_series_payload() + ); assert_eq!(processed.len(), 0); + } - // Just the header and footer. - assert_eq!(13, raw_bytes); + #[test] + fn encode_sketches_breaks_out_when_limit_reached_compressed() { + // We manually create the encoder with an arbitrarily low "compressed" limit but high + // "uncompressed" limit to exercise the codepath that should avoid encoding a metric when the + // compressed payload would exceed the limit. + let uncompressed_limit = 128; + let compressed_limit = 16; + let mut encoder = DatadogMetricsEncoder::with_payload_limits( + DatadogMetricsEndpoint::Sketches, + None, + uncompressed_limit, + compressed_limit, + ) + .expect("payload size limits should be valid"); + + // Trying to encode a metric that would cause us to exceed our compressed limits will + // _not_ return an error from `try_encode`, but instead will simply return back the metric + // as it could not be added. + let sketch = get_simple_sketch(); + let result = encoder.try_encode(sketch.clone()); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), Some(sketch)); + + // And similarly, since we didn't actually encode a metric, we _should_ be able to finish + // this payload, but it will be empty (effectively, the header/footer will exist) and no + // processed metrics should be returned. + let result = encoder.finish(); + assert!(result.is_ok()); + + let (payload, processed) = result.unwrap(); + assert_eq!(payload.uncompressed_byte_size, 0); + assert_eq!( + payload.into_payload(), + get_compressed_empty_sketches_payload() + ); + assert_eq!(processed.len(), 0); } fn arb_counter_metric() -> impl Strategy { @@ -1003,7 +1205,8 @@ mod tests { if let Ok(mut encoder) = result { _ = encoder.try_encode(metric); - if let Ok((payload, _processed, _raw_bytes)) = encoder.finish() { + if let Ok((payload, _processed)) = encoder.finish() { + let payload = payload.into_payload(); prop_assert!(payload.len() <= compressed_limit); let result = decompress_payload(payload); diff --git a/src/sinks/datadog/metrics/request_builder.rs b/src/sinks/datadog/metrics/request_builder.rs index 64b1226b661bf..d217986d6f520 100644 --- a/src/sinks/datadog/metrics/request_builder.rs +++ b/src/sinks/datadog/metrics/request_builder.rs @@ -1,12 +1,8 @@ use bytes::Bytes; -use serde_json::error::Category; use snafu::Snafu; -use std::{num::NonZeroUsize, sync::Arc}; +use std::sync::Arc; use vector_common::request_metadata::RequestMetadata; -use vector_core::{ - event::{EventFinalizers, Finalizable, Metric}, - EstimatedJsonEncodedSizeOf, -}; +use vector_core::event::{EventFinalizers, Finalizable, Metric}; use super::{ config::{DatadogMetricsEndpoint, DatadogMetricsEndpointConfiguration}, @@ -17,19 +13,19 @@ use crate::sinks::util::{metadata::RequestMetadataBuilder, IncrementalRequestBui #[derive(Debug, Snafu)] pub enum RequestBuilderError { - #[snafu(display("Failed to build the request builder: {}", error_type))] - FailedToBuild { error_type: &'static str }, + #[snafu( + context(false), + display("Failed to build the request builder: {source}") + )] + FailedToBuild { source: CreateError }, - #[snafu(display("Encoding of a metric failed ({})", reason))] - FailedToEncode { - reason: &'static str, - dropped_events: u64, - }, + #[snafu(context(false), display("Failed to encode metric: {source}"))] + FailedToEncode { source: EncoderError }, - #[snafu(display("A split payload was still too big to encode/compress within size limits"))] + #[snafu(display("A split payload was still too big to encode/compress within size limits."))] FailedToSplit { dropped_events: u64 }, - #[snafu(display("An unexpected error occurred"))] + #[snafu(display("An unexpected error occurred: {error_type}"))] Unexpected { error_type: &'static str, dropped_events: u64, @@ -37,78 +33,28 @@ pub enum RequestBuilderError { } impl RequestBuilderError { - /// Converts this error into its constituent parts: the error reason, and how many events were - /// dropped as a result. - pub const fn into_parts(self) -> (&'static str, &'static str, u64) { + /// Converts this error into its constituent parts: the error reason, the error type, and how + /// many events were dropped as a result. + pub fn into_parts(self) -> (String, &'static str, u64) { match self { - Self::FailedToBuild { error_type } => { - ("Failed to build the request builder.", error_type, 0) - } - Self::FailedToEncode { - reason, - dropped_events, - } => ("Encoding of a metric failed.", reason, dropped_events), + Self::FailedToBuild { source } => (source.to_string(), source.as_error_type(), 0), + // Encoding errors always happen at the per-metric level, so we could only ever drop a + // single metric/event at a time. + Self::FailedToEncode { source } => (source.to_string(), source.as_error_type(), 1), Self::FailedToSplit { dropped_events } => ( - "A split payload was still too big to encode/compress withing size limits.", + "A split payload was still too big to encode/compress withing size limits." + .to_string(), "split_failed", dropped_events, ), Self::Unexpected { error_type, dropped_events, - } => ("An unexpected error occurred.", error_type, dropped_events), - } - } -} - -impl From for RequestBuilderError { - fn from(e: CreateError) -> Self { - match e { - CreateError::InvalidLimits => Self::FailedToBuild { - error_type: "invalid_payload_limits", - }, - } - } -} - -impl From for RequestBuilderError { - fn from(e: EncoderError) -> Self { - match e { - // Series metrics (JSON) are encoded incrementally, so we can only ever lose a single - // metric for a JSON encoding failure. - EncoderError::JsonEncodingFailed { source } => Self::FailedToEncode { - reason: match source.classify() { - Category::Io => "json_io", - Category::Syntax => "json_syntax", - Category::Data => "json_data", - Category::Eof => "json_eof", - }, - dropped_events: 1, - }, - // Sketch metrics (Protocol Buffers) are encoded in a single shot, so naturally we would - // expect `dropped_events` to be 1-N, instead of always 1. We should never emit this - // metric when calling `try_encode`, which is where we'd see the JSON variant of it. - // This is because sketch encoding happens at the end. - // - // Thus, we default `dropped_events` to 1, and if we actually hit this error when - // finishing up a payload, we'll fix up the true number of dropped events at that point. - EncoderError::ProtoEncodingFailed { .. } => Self::FailedToEncode { - // `prost` states that for an encoding error specifically, it can only ever fail due - // to insufficient capacity in the encoding buffer. - reason: "protobuf_insufficient_buf_capacity", - dropped_events: 1, - }, - // Not all metric types for valid depending on the configured endpoint of the encoder. - EncoderError::InvalidMetric { metric_value, .. } => Self::FailedToEncode { - // TODO: At some point, it would be nice to use `const_format` to build the reason - // as " _via_" to better understand in what context - // metric X is being considered as invalid. Practically it's not a huge issue, - // because the number of metric types are fixed and we should be able to inspect the - // code for issues, or if it became a big problem, we could just go ahead and do the - // `const_format` work... but it'd be nice to be ahead of curve when trivially possible. - reason: metric_value, - dropped_events: 1, - }, + } => ( + "An unexpected error occurred.".to_string(), + error_type, + dropped_events, + ), } } } @@ -118,7 +64,6 @@ pub struct DDMetricsMetadata { api_key: Option>, endpoint: DatadogMetricsEndpoint, finalizers: EventFinalizers, - raw_bytes: usize, } /// Incremental request builder specific to Datadog metrics. @@ -211,24 +156,21 @@ impl IncrementalRequestBuilder<((Option>, DatadogMetricsEndpoint), Vec< // If we encoded one or more metrics this pass, finalize the payload. if n > 0 { match encoder.finish() { - Ok((payload, mut metrics, raw_bytes_written)) => { - let json_size = metrics.estimated_json_encoded_size_of(); + Ok((encode_result, mut metrics)) => { let finalizers = metrics.take_finalizers(); let metadata = DDMetricsMetadata { api_key: api_key.as_ref().map(Arc::clone), endpoint, finalizers, - raw_bytes: raw_bytes_written, }; - let builder = RequestMetadataBuilder::new( - metrics.len(), - raw_bytes_written, - json_size, - ); - let bytes_len = NonZeroUsize::new(payload.len()) - .expect("payload should never be zero length"); - let request_metadata = builder.with_request_size(bytes_len); - results.push(Ok(((metadata, request_metadata), payload))); + + let request_metadata = + RequestMetadataBuilder::from_events(&metrics).build(&encode_result); + + results.push(Ok(( + (metadata, request_metadata), + encode_result.into_payload(), + ))); } Err(err) => match err { // The encoder informed us that the resulting payload was too big, so we're @@ -299,7 +241,6 @@ impl IncrementalRequestBuilder<((Option>, DatadogMetricsEndpoint), Vec< uri, content_type: ddmetrics_metadata.endpoint.content_type(), finalizers: ddmetrics_metadata.finalizers, - raw_bytes: ddmetrics_metadata.raw_bytes, metadata: request_metadata, } } @@ -332,21 +273,21 @@ fn encode_now_or_never( encoder .finish() - .map(|(payload, mut processed, raw_bytes_written)| { - let json_size = processed.estimated_json_encoded_size_of(); + .map(|(encode_result, mut processed)| { let finalizers = processed.take_finalizers(); let ddmetrics_metadata = DDMetricsMetadata { api_key, endpoint, finalizers, - raw_bytes: raw_bytes_written, }; - let builder = RequestMetadataBuilder::new(metrics_len, raw_bytes_written, json_size); - let bytes_len = - NonZeroUsize::new(payload.len()).expect("payload should never be zero length"); - let request_metadata = builder.with_request_size(bytes_len); - ((ddmetrics_metadata, request_metadata), payload) + let request_metadata = + RequestMetadataBuilder::from_events(&processed).build(&encode_result); + + ( + (ddmetrics_metadata, request_metadata), + encode_result.into_payload(), + ) }) .map_err(|_| RequestBuilderError::FailedToSplit { dropped_events: metrics_len as u64, diff --git a/src/sinks/datadog/metrics/service.rs b/src/sinks/datadog/metrics/service.rs index 6abacfcc739b7..7f62e6ddaefd5 100644 --- a/src/sinks/datadog/metrics/service.rs +++ b/src/sinks/datadog/metrics/service.rs @@ -60,7 +60,6 @@ pub struct DatadogMetricsRequest { pub uri: Uri, pub content_type: &'static str, pub finalizers: EventFinalizers, - pub raw_bytes: usize, pub metadata: RequestMetadata, } @@ -125,8 +124,7 @@ impl MetaDescriptive for DatadogMetricsRequest { pub struct DatadogMetricsResponse { status_code: StatusCode, body: Bytes, - byte_size: GroupedCountByteSize, - raw_byte_size: usize, + request_metadata: RequestMetadata, } impl DriverResponse for DatadogMetricsResponse { @@ -141,11 +139,12 @@ impl DriverResponse for DatadogMetricsResponse { } fn events_sent(&self) -> &GroupedCountByteSize { - &self.byte_size + self.request_metadata + .events_estimated_json_encoded_byte_size() } fn bytes_sent(&self) -> Option { - Some(self.raw_byte_size) + Some(self.request_metadata.request_wire_size()) } } @@ -184,9 +183,7 @@ impl Service for DatadogMetricsService { let api_key = self.api_key.clone(); Box::pin(async move { - let metadata = std::mem::take(request.metadata_mut()); - let byte_size = metadata.into_events_estimated_json_encoded_byte_size(); - let raw_byte_size = request.raw_bytes; + let request_metadata = std::mem::take(request.metadata_mut()); let request = request .into_http_request(api_key) @@ -205,8 +202,7 @@ impl Service for DatadogMetricsService { Ok(DatadogMetricsResponse { status_code: parts.status, body, - byte_size, - raw_byte_size, + request_metadata, }) }) } diff --git a/src/sinks/datadog/metrics/sink.rs b/src/sinks/datadog/metrics/sink.rs index a85eaaf7a3a11..5ceefc3c487d2 100644 --- a/src/sinks/datadog/metrics/sink.rs +++ b/src/sinks/datadog/metrics/sink.rs @@ -123,9 +123,9 @@ where .filter_map(|request| async move { match request { Err(e) => { - let (error_message, error_code, dropped_events) = e.into_parts(); + let (reason, error_code, dropped_events) = e.into_parts(); emit!(DatadogMetricsEncodingError { - error_message, + reason: reason.as_str(), error_code, dropped_events: dropped_events as usize, }); diff --git a/src/sinks/util/metadata.rs b/src/sinks/util/metadata.rs index 521f1c080995c..e6f4e7739e4d2 100644 --- a/src/sinks/util/metadata.rs +++ b/src/sinks/util/metadata.rs @@ -1,6 +1,5 @@ use std::num::NonZeroUsize; -use vector_buffers::EventCount; use vector_core::{config, ByteSizeOf, EstimatedJsonEncodedSizeOf}; use vector_common::{ @@ -21,21 +20,19 @@ pub struct RequestMetadataBuilder { impl RequestMetadataBuilder { pub fn from_events(events: &[E]) -> Self where - E: ByteSizeOf + EventCount + GetEventCountTags + EstimatedJsonEncodedSizeOf, + E: ByteSizeOf + GetEventCountTags + EstimatedJsonEncodedSizeOf, { let mut size = config::telemetry().create_request_count_byte_size(); - let mut event_count = 0; let mut events_byte_size = 0; for event in events { - event_count += 1; events_byte_size += event.size_of(); size.add_event(event, event.estimated_json_encoded_size_of()); } Self { - event_count, + event_count: events.len(), events_byte_size, events_estimated_json_encoded_byte_size: size, } From c19938c9213539da6b4ca6d50554557c87d6fde4 Mon Sep 17 00:00:00 2001 From: Nathan Fox Date: Fri, 30 Jun 2023 09:41:54 -0400 Subject: [PATCH 220/236] fix: remap behavior for root types when using the `Vector` namespace (#17807) This fixes an issue with `remap` when using the `Vector` namespace, where assigning a non-collection value (such as a string) directly to the root ended up with that value nested under the `message` field. Now it stays on the root. --- lib/vector-core/src/event/vrl_target.rs | 183 ++++++++++++++- lib/vector-core/src/schema/definition.rs | 19 ++ src/conditions/vrl.rs | 7 +- src/transforms/remap.rs | 276 ++++++----------------- 4 files changed, 271 insertions(+), 214 deletions(-) diff --git a/lib/vector-core/src/event/vrl_target.rs b/lib/vector-core/src/event/vrl_target.rs index f6277cdfe20a6..3ca714be778f0 100644 --- a/lib/vector-core/src/event/vrl_target.rs +++ b/lib/vector-core/src/event/vrl_target.rs @@ -6,11 +6,13 @@ use lookup::{OwnedTargetPath, OwnedValuePath, PathPrefix}; use snafu::Snafu; use vrl::compiler::value::VrlValueConvert; use vrl::compiler::{ProgramInfo, SecretTarget, Target}; -use vrl::value::Value; +use vrl::prelude::Collection; +use vrl::value::{Kind, Value}; use super::{Event, EventMetadata, LogEvent, Metric, MetricKind, TraceEvent}; -use crate::config::log_schema; +use crate::config::{log_schema, LogNamespace}; use crate::event::metric::TagValue; +use crate::schema::Definition; const VALID_METRIC_PATHS_SET: &str = ".name, .namespace, .timestamp, .kind, .tags"; @@ -114,11 +116,24 @@ impl VrlTarget { } } + /// Modifies a schema in the same way that the `into_events` function modifies the event + pub fn modify_schema_definition_for_into_events(input: Definition) -> Definition { + let log_namespaces = input.log_namespaces().clone(); + + // both namespaces merge arrays, but only `Legacy` moves field definitions into a "message" field. + let merged_arrays = merge_array_definitions(input); + Definition::combine_log_namespaces( + &log_namespaces, + move_field_definitions_into_message(merged_arrays.clone()), + merged_arrays, + ) + } + /// Turn the target back into events. /// /// This returns an iterator of events as one event can be turned into multiple by assigning an /// array to `.` in VRL. - pub fn into_events(self) -> TargetEvents { + pub fn into_events(self, log_namespace: LogNamespace) -> TargetEvents { match self { VrlTarget::LogEvent(value, metadata) => match value { value @ Value::Object(_) => { @@ -131,11 +146,16 @@ impl VrlTarget { _marker: PhantomData, }), - v => { - let mut log = LogEvent::new_with_metadata(metadata); - log.insert(log_schema().message_key(), v); - TargetEvents::One(log.into()) - } + v => match log_namespace { + LogNamespace::Vector => { + TargetEvents::One(LogEvent::from_parts(v, metadata).into()) + } + LogNamespace::Legacy => { + let mut log = LogEvent::new_with_metadata(metadata); + log.insert(log_schema().message_key(), v); + TargetEvents::One(log.into()) + } + }, }, VrlTarget::Trace(value, metadata) => match value { value @ Value::Object(_) => { @@ -174,6 +194,53 @@ impl VrlTarget { } } +/// If the VRL returns a value that is not an array (see [`merge_array_definitions`]), +/// or an object, that data is moved into the `message` field. +fn move_field_definitions_into_message(mut definition: Definition) -> Definition { + let mut message = definition.event_kind().clone(); + message.remove_object(); + message.remove_array(); + + if !message.is_never() { + // We need to add the given message type to a field called `message` + // in the event. + let message = Kind::object(Collection::from(BTreeMap::from([( + log_schema().message_key().into(), + message, + )]))); + + definition.event_kind_mut().remove_bytes(); + definition.event_kind_mut().remove_integer(); + definition.event_kind_mut().remove_float(); + definition.event_kind_mut().remove_boolean(); + definition.event_kind_mut().remove_timestamp(); + definition.event_kind_mut().remove_regex(); + definition.event_kind_mut().remove_null(); + + *definition.event_kind_mut() = definition.event_kind().union(message); + } + + definition +} + +/// If the transform returns an array, the elements of this array will be separated +/// out into it's individual elements and passed downstream. +/// +/// The potential types that the transform can output are any of the arrays +/// elements or any non-array elements that are within the definition. All these +/// definitions need to be merged together. +fn merge_array_definitions(mut definition: Definition) -> Definition { + if let Some(array) = definition.event_kind().as_array() { + let array_kinds = array.reduced_kind(); + + let kind = definition.event_kind_mut(); + kind.remove_array(); + *kind = kind.union(array_kinds); + } + + definition +} + fn set_metric_tag_values(name: String, value: &Value, metric: &mut Metric, multi_value_tags: bool) { if multi_value_tags { let tag_values = value @@ -589,11 +656,107 @@ mod test { use lookup::owned_value_path; use similar_asserts::assert_eq; use vrl::btreemap; + use vrl::value::kind::Index; use super::super::MetricValue; use super::*; use crate::metric_tags; + #[test] + fn test_field_definitions_in_message() { + let definition = + Definition::new_with_default_metadata(Kind::bytes(), [LogNamespace::Legacy]); + assert_eq!( + Definition::new_with_default_metadata( + Kind::object(BTreeMap::from([("message".into(), Kind::bytes())])), + [LogNamespace::Legacy] + ), + move_field_definitions_into_message(definition) + ); + + // Test when a message field already exists. + let definition = Definition::new_with_default_metadata( + Kind::object(BTreeMap::from([("message".into(), Kind::integer())])).or_bytes(), + [LogNamespace::Legacy], + ); + assert_eq!( + Definition::new_with_default_metadata( + Kind::object(BTreeMap::from([( + "message".into(), + Kind::bytes().or_integer() + )])), + [LogNamespace::Legacy] + ), + move_field_definitions_into_message(definition) + ); + } + + #[test] + fn test_merged_array_definitions_simple() { + // Test merging the array definitions where the schema definition + // is simple, containing only one possible type in the array. + let object: BTreeMap = [ + ("carrot".into(), Kind::bytes()), + ("potato".into(), Kind::integer()), + ] + .into(); + + let kind = Kind::array(Collection::from_unknown(Kind::object(object))); + + let definition = Definition::new_with_default_metadata(kind, [LogNamespace::Legacy]); + + let kind = Kind::object(BTreeMap::from([ + ("carrot".into(), Kind::bytes()), + ("potato".into(), Kind::integer()), + ])); + + let wanted = Definition::new_with_default_metadata(kind, [LogNamespace::Legacy]); + let merged = merge_array_definitions(definition); + + assert_eq!(wanted, merged); + } + + #[test] + fn test_merged_array_definitions_complex() { + // Test merging the array definitions where the schema definition + // is fairly complex containing multiple different possible types. + let object: BTreeMap = [ + ("carrot".into(), Kind::bytes()), + ("potato".into(), Kind::integer()), + ] + .into(); + + let array: BTreeMap = [ + (Index::from(0), Kind::integer()), + (Index::from(1), Kind::boolean()), + ( + Index::from(2), + Kind::object(BTreeMap::from([("peas".into(), Kind::bytes())])), + ), + ] + .into(); + + let mut kind = Kind::bytes(); + kind.add_object(object); + kind.add_array(array); + + let definition = Definition::new_with_default_metadata(kind, [LogNamespace::Legacy]); + + let mut kind = Kind::bytes(); + kind.add_integer(); + kind.add_boolean(); + kind.add_object(BTreeMap::from([ + ("carrot".into(), Kind::bytes().or_undefined()), + ("potato".into(), Kind::integer().or_undefined()), + ("peas".into(), Kind::bytes().or_undefined()), + ])); + + let wanted = Definition::new_with_default_metadata(kind, [LogNamespace::Legacy]); + let merged = merge_array_definitions(definition); + + assert_eq!(wanted, merged); + } + #[test] fn log_get() { let cases = vec![ @@ -755,7 +918,7 @@ mod test { Ok(Some(value)) ); assert_eq!( - match target.into_events() { + match target.into_events(LogNamespace::Legacy) { TargetEvents::One(event) => vec![event], TargetEvents::Logs(events) => events.collect::>(), TargetEvents::Traces(events) => events.collect::>(), @@ -901,7 +1064,7 @@ mod test { Target::target_insert(&mut target, &OwnedTargetPath::event_root(), value).unwrap(); assert_eq!( - match target.into_events() { + match target.into_events(LogNamespace::Legacy) { TargetEvents::One(event) => vec![event], TargetEvents::Logs(events) => events.collect::>(), TargetEvents::Traces(events) => events.collect::>(), diff --git a/lib/vector-core/src/schema/definition.rs b/lib/vector-core/src/schema/definition.rs index 62a5bd3b2ff64..a3c5afc034cb4 100644 --- a/lib/vector-core/src/schema/definition.rs +++ b/lib/vector-core/src/schema/definition.rs @@ -457,6 +457,25 @@ impl Definition { self } + /// If the schema definition depends on the `LogNamespace`, this combines the individual + /// definitions for each `LogNamespace`. + pub fn combine_log_namespaces( + log_namespaces: &BTreeSet, + legacy: Self, + vector: Self, + ) -> Self { + let mut combined = + Definition::new_with_default_metadata(Kind::never(), log_namespaces.clone()); + + if log_namespaces.contains(&LogNamespace::Legacy) { + combined = combined.merge(legacy); + } + if log_namespaces.contains(&LogNamespace::Vector) { + combined = combined.merge(vector); + } + combined + } + /// Returns an `OwnedTargetPath` into an event, based on the provided `meaning`, if the meaning exists. pub fn meaning_path(&self, meaning: &str) -> Option<&OwnedTargetPath> { match self.meaning.get(meaning) { diff --git a/src/conditions/vrl.rs b/src/conditions/vrl.rs index 0c132ccc485aa..3fb115e7ba7fc 100644 --- a/src/conditions/vrl.rs +++ b/src/conditions/vrl.rs @@ -6,6 +6,7 @@ use vrl::compiler::{CompilationResult, CompileConfig, Program, TypeState, VrlRun use vrl::diagnostic::Formatter; use vrl::value::Value; +use crate::config::LogNamespace; use crate::event::TargetEvents; use crate::{ conditions::{Condition, Conditional, ConditionalConfig}, @@ -84,12 +85,16 @@ pub struct Vrl { impl Vrl { fn run(&self, event: Event) -> (Event, RuntimeResult) { + let log_namespace = event + .maybe_as_log() + .map(|log| log.namespace()) + .unwrap_or(LogNamespace::Legacy); let mut target = VrlTarget::new(event, self.program.info(), false); // TODO: use timezone from remap config let timezone = TimeZone::default(); let result = Runtime::default().resolve(&mut target, &self.program, &timezone); - let original_event = match target.into_events() { + let original_event = match target.into_events(log_namespace) { TargetEvents::One(event) => event, _ => panic!("Event was modified in a condition. This is an internal compiler error."), }; diff --git a/src/transforms/remap.rs b/src/transforms/remap.rs index 381843b9101f3..b47414f3b526b 100644 --- a/src/transforms/remap.rs +++ b/src/transforms/remap.rs @@ -20,8 +20,6 @@ use vrl::compiler::runtime::{Runtime, Terminate}; use vrl::compiler::state::ExternalEnv; use vrl::compiler::{CompileConfig, ExpressionError, Function, Program, TypeState, VrlRuntime}; use vrl::diagnostic::{DiagnosticMessage, Formatter, Note}; -use vrl::value::kind::merge::{CollisionStrategy, Strategy}; -use vrl::value::kind::Collection; use vrl::value::{Kind, Value}; use crate::config::OutputId; @@ -287,63 +285,35 @@ impl TransformConfig for RemapConfig { // When a message is dropped and re-routed, we keep the original event, but also annotate // it with additional metadata. - let mut dropped_definition = Definition::new_with_default_metadata( - Kind::never(), - input_definition.log_namespaces().clone(), + let dropped_definition = Definition::combine_log_namespaces( + input_definition.log_namespaces(), + input_definition.clone().with_event_field( + &parse_value_path(log_schema().metadata_key()).expect("valid metadata key"), + Kind::object(BTreeMap::from([ + ("reason".into(), Kind::bytes()), + ("message".into(), Kind::bytes()), + ("component_id".into(), Kind::bytes()), + ("component_type".into(), Kind::bytes()), + ("component_kind".into(), Kind::bytes()), + ])), + Some("metadata"), + ), + input_definition + .clone() + .with_metadata_field(&owned_value_path!("reason"), Kind::bytes(), None) + .with_metadata_field(&owned_value_path!("message"), Kind::bytes(), None) + .with_metadata_field(&owned_value_path!("component_id"), Kind::bytes(), None) + .with_metadata_field(&owned_value_path!("component_type"), Kind::bytes(), None) + .with_metadata_field(&owned_value_path!("component_kind"), Kind::bytes(), None), ); - if input_definition - .log_namespaces() - .contains(&LogNamespace::Legacy) - { - dropped_definition = - dropped_definition.merge(input_definition.clone().with_event_field( - &parse_value_path(log_schema().metadata_key()).expect("valid metadata key"), - Kind::object(BTreeMap::from([ - ("reason".into(), Kind::bytes()), - ("message".into(), Kind::bytes()), - ("component_id".into(), Kind::bytes()), - ("component_type".into(), Kind::bytes()), - ("component_kind".into(), Kind::bytes()), - ])), - Some("metadata"), - )); - } - - if input_definition - .log_namespaces() - .contains(&LogNamespace::Vector) - { - dropped_definition = dropped_definition.merge( - input_definition - .clone() - .with_metadata_field(&owned_value_path!("reason"), Kind::bytes(), None) - .with_metadata_field(&owned_value_path!("message"), Kind::bytes(), None) - .with_metadata_field( - &owned_value_path!("component_id"), - Kind::bytes(), - None, - ) - .with_metadata_field( - &owned_value_path!("component_type"), - Kind::bytes(), - None, - ) - .with_metadata_field( - &owned_value_path!("component_kind"), - Kind::bytes(), - None, - ), - ); - } - default_definitions.insert( output_id.clone(), - move_field_definitions_into_message(merge_array_definitions(default_definition)), + VrlTarget::modify_schema_definition_for_into_events(default_definition), ); dropped_definitions.insert( output_id.clone(), - move_field_definitions_into_message(merge_array_definitions(dropped_definition)), + VrlTarget::modify_schema_definition_for_into_events(dropped_definition), ); } @@ -548,6 +518,11 @@ where None }; + let log_namespace = event + .maybe_as_log() + .map(|log| log.namespace()) + .unwrap_or(LogNamespace::Legacy); + let mut target = VrlTarget::new( event, self.program.info(), @@ -559,7 +534,7 @@ where let result = self.run_vrl(&mut target); match result { - Ok(_) => match target.into_events() { + Ok(_) => match target.into_events(log_namespace) { TargetEvents::One(event) => push_default(event, output), TargetEvents::Logs(events) => events.for_each(|event| push_default(event, output)), TargetEvents::Traces(events) => { @@ -610,58 +585,6 @@ fn push_dropped(event: Event, output: &mut TransformOutputsBuf) { output.push(Some(DROPPED), event); } -/// If the VRL returns a value that is not an array (see [`merge_array_definitions`]), -/// or an object, that data is moved into the `message` field. -fn move_field_definitions_into_message(mut definition: schema::Definition) -> schema::Definition { - let mut message = definition.event_kind().clone(); - message.remove_object(); - message.remove_array(); - - if !message.is_never() { - // We need to add the given message type to a field called `message` - // in the event. - let message = Kind::object(Collection::from(BTreeMap::from([( - log_schema().message_key().into(), - message, - )]))); - - definition.event_kind_mut().remove_bytes(); - definition.event_kind_mut().remove_integer(); - definition.event_kind_mut().remove_float(); - definition.event_kind_mut().remove_boolean(); - definition.event_kind_mut().remove_timestamp(); - definition.event_kind_mut().remove_regex(); - definition.event_kind_mut().remove_null(); - - *definition.event_kind_mut() = definition.event_kind().union(message); - } - - definition -} - -/// If the transform returns an array, the elements of this array will be separated -/// out into it's individual elements and passed downstream. -/// -/// The potential types that the transform can output are any of the arrays -/// elements or any non-array elements that are within the definition. All these -/// definitions need to be merged together. -fn merge_array_definitions(mut definition: schema::Definition) -> schema::Definition { - if let Some(array) = definition.event_kind().as_array() { - let array_kinds = array.reduced_kind(); - - let kind = definition.event_kind_mut(); - kind.remove_array(); - kind.merge( - array_kinds, - Strategy { - collisions: CollisionStrategy::Union, - }, - ); - } - - definition -} - #[derive(Debug, Snafu)] pub enum BuildError { #[snafu(display("must provide exactly one of `source` or `file` configuration"))] @@ -681,7 +604,7 @@ mod tests { use indoc::{formatdoc, indoc}; use vector_core::{config::GlobalOptions, event::EventMetadata, metric_tags}; use vrl::btreemap; - use vrl::value::kind::{Collection, Index}; + use vrl::value::kind::Collection; use super::*; use crate::{ @@ -698,6 +621,7 @@ mod tests { transforms::OutputBuffer, }; use chrono::DateTime; + use enrichment::TableRegistry; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; @@ -809,6 +733,49 @@ mod tests { assert!(tform.runner().runtime.is_empty()); } + #[test] + fn remap_return_raw_string_vector_namespace() { + let initial_definition = Definition::default_for_namespace(&[LogNamespace::Vector].into()); + + let event = { + let mut metadata = EventMetadata::default() + .with_schema_definition(&Arc::new(initial_definition.clone())); + // the Vector metadata field is required for an event to correctly detect the namespace at runtime + metadata + .value_mut() + .insert(&owned_value_path!("vector"), BTreeMap::new()); + + let mut event = LogEvent::new_with_metadata(metadata); + event.insert("copy_from", "buz"); + Event::from(event) + }; + + let conf = RemapConfig { + source: Some(r#" . = "root string";"#.to_string()), + file: None, + drop_on_error: true, + drop_on_abort: false, + ..Default::default() + }; + let mut tform = remap(conf.clone()).unwrap(); + let result = transform_one(&mut tform, event).unwrap(); + assert_eq!(get_field_string(&result, "."), "root string"); + + let mut outputs = conf.outputs( + TableRegistry::default(), + &[(OutputId::dummy(), initial_definition)], + LogNamespace::Vector, + ); + + assert_eq!(outputs.len(), 1); + let output = outputs.pop().unwrap(); + assert_eq!(output.port, None); + let actual_schema_def = output.schema_definitions(true)[&OutputId::dummy()].clone(); + let expected_schema = + Definition::new(Kind::bytes(), Kind::any_object(), [LogNamespace::Vector]); + assert_eq!(actual_schema_def, expected_schema); + } + #[test] fn check_remap_adds() { let event = { @@ -1596,103 +1563,6 @@ mod tests { .await } - #[test] - fn test_field_definitions_in_message() { - let definition = - schema::Definition::new_with_default_metadata(Kind::bytes(), [LogNamespace::Legacy]); - assert_eq!( - schema::Definition::new_with_default_metadata( - Kind::object(BTreeMap::from([("message".into(), Kind::bytes())])), - [LogNamespace::Legacy] - ), - move_field_definitions_into_message(definition) - ); - - // Test when a message field already exists. - let definition = schema::Definition::new_with_default_metadata( - Kind::object(BTreeMap::from([("message".into(), Kind::integer())])).or_bytes(), - [LogNamespace::Legacy], - ); - assert_eq!( - schema::Definition::new_with_default_metadata( - Kind::object(BTreeMap::from([( - "message".into(), - Kind::bytes().or_integer() - )])), - [LogNamespace::Legacy] - ), - move_field_definitions_into_message(definition) - ) - } - - #[test] - fn test_merged_array_definitions_simple() { - // Test merging the array definitions where the schema definition - // is simple, containing only one possible type in the array. - let object: BTreeMap = [ - ("carrot".into(), Kind::bytes()), - ("potato".into(), Kind::integer()), - ] - .into(); - - let kind = Kind::array(Collection::from_unknown(Kind::object(object))); - - let definition = - schema::Definition::new_with_default_metadata(kind, [LogNamespace::Legacy]); - - let kind = Kind::object(BTreeMap::from([ - ("carrot".into(), Kind::bytes()), - ("potato".into(), Kind::integer()), - ])); - - let wanted = schema::Definition::new_with_default_metadata(kind, [LogNamespace::Legacy]); - let merged = merge_array_definitions(definition); - - assert_eq!(wanted, merged); - } - - #[test] - fn test_merged_array_definitions_complex() { - // Test merging the array definitions where the schema definition - // is fairly complex containing multiple different possible types. - let object: BTreeMap = [ - ("carrot".into(), Kind::bytes()), - ("potato".into(), Kind::integer()), - ] - .into(); - - let array: BTreeMap = [ - (Index::from(0), Kind::integer()), - (Index::from(1), Kind::boolean()), - ( - Index::from(2), - Kind::object(BTreeMap::from([("peas".into(), Kind::bytes())])), - ), - ] - .into(); - - let mut kind = Kind::bytes(); - kind.add_object(object); - kind.add_array(array); - - let definition = - schema::Definition::new_with_default_metadata(kind, [LogNamespace::Legacy]); - - let mut kind = Kind::bytes(); - kind.add_integer(); - kind.add_boolean(); - kind.add_object(BTreeMap::from([ - ("carrot".into(), Kind::bytes().or_undefined()), - ("potato".into(), Kind::integer().or_undefined()), - ("peas".into(), Kind::bytes().or_undefined()), - ])); - - let wanted = schema::Definition::new_with_default_metadata(kind, [LogNamespace::Legacy]); - let merged = merge_array_definitions(definition); - - assert_eq!(wanted, merged); - } - #[test] fn test_combined_transforms_simple() { // Make sure that when getting the definitions from one transform and From 812929b1761355e2209ce33b3fc439d9b8b0d182 Mon Sep 17 00:00:00 2001 From: Dominic Burkart Date: Fri, 30 Jun 2023 16:44:33 +0200 Subject: [PATCH 221/236] feat(internal telemetry at shutdown): close internal sources after external ones (#17741) We would like to close the internal logs, metrics, and trace sources sent from Vector as late as possible during shutdown to facilitate debugging. In this PR, we wait until all other sources are shut down before shutting down internal telemetry sources. This means that shutdown may be a bit longer, but we will have better observability on the shutdown process. issue: https://github.com/vectordotdev/vector/issues/15912 --- lib/vector-common/src/shutdown.rs | 33 ++++++++++++++++++++----------- src/config/source.rs | 2 +- src/sources/socket/mod.rs | 2 +- src/sources/util/framestream.rs | 2 +- src/topology/builder.rs | 7 +++++-- 5 files changed, 30 insertions(+), 16 deletions(-) diff --git a/lib/vector-common/src/shutdown.rs b/lib/vector-common/src/shutdown.rs index 79f58978bd6e9..d1b35be5c6fb7 100644 --- a/lib/vector-common/src/shutdown.rs +++ b/lib/vector-common/src/shutdown.rs @@ -107,9 +107,11 @@ impl ShutdownSignal { } } +type IsInternal = bool; + #[derive(Debug, Default)] pub struct SourceShutdownCoordinator { - shutdown_begun_triggers: HashMap, + shutdown_begun_triggers: HashMap, shutdown_force_triggers: HashMap, shutdown_complete_tripwires: HashMap, } @@ -121,13 +123,14 @@ impl SourceShutdownCoordinator { pub fn register_source( &mut self, id: &ComponentKey, + internal: bool, ) -> (ShutdownSignal, impl Future) { let (shutdown_begun_trigger, shutdown_begun_tripwire) = Tripwire::new(); let (force_shutdown_trigger, force_shutdown_tripwire) = Tripwire::new(); let (shutdown_complete_trigger, shutdown_complete_tripwire) = Tripwire::new(); self.shutdown_begun_triggers - .insert(id.clone(), shutdown_begun_trigger); + .insert(id.clone(), (internal, shutdown_begun_trigger)); self.shutdown_force_triggers .insert(id.clone(), force_shutdown_trigger); self.shutdown_complete_tripwires @@ -201,13 +204,14 @@ impl SourceShutdownCoordinator { /// Panics if this coordinator has had its triggers removed (ie /// has been taken over with `Self::takeover_source`). pub fn shutdown_all(self, deadline: Option) -> impl Future { - let mut complete_futures = Vec::new(); + let mut internal_sources_complete_futures = Vec::new(); + let mut external_sources_complete_futures = Vec::new(); let shutdown_begun_triggers = self.shutdown_begun_triggers; let mut shutdown_complete_tripwires = self.shutdown_complete_tripwires; let mut shutdown_force_triggers = self.shutdown_force_triggers; - for (id, trigger) in shutdown_begun_triggers { + for (id, (internal, trigger)) in shutdown_begun_triggers { trigger.cancel(); let shutdown_complete_tripwire = @@ -229,10 +233,16 @@ impl SourceShutdownCoordinator { deadline, ); - complete_futures.push(source_complete); + if internal { + internal_sources_complete_futures.push(source_complete); + } else { + external_sources_complete_futures.push(source_complete); + } } - futures::future::join_all(complete_futures).map(|_| ()) + futures::future::join_all(external_sources_complete_futures) + .then(|_| futures::future::join_all(internal_sources_complete_futures)) + .map(|_| ()) } /// Sends the signal to the given source to begin shutting down. Returns a future that resolves @@ -250,11 +260,12 @@ impl SourceShutdownCoordinator { id: &ComponentKey, deadline: Instant, ) -> impl Future { - let begin_shutdown_trigger = self.shutdown_begun_triggers.remove(id).unwrap_or_else(|| { - panic!( + let (_, begin_shutdown_trigger) = + self.shutdown_begun_triggers.remove(id).unwrap_or_else(|| { + panic!( "shutdown_begun_trigger for source \"{id}\" not found in the ShutdownCoordinator" ) - }); + }); // This is what actually triggers the source to begin shutting down. begin_shutdown_trigger.cancel(); @@ -336,7 +347,7 @@ mod test { let mut shutdown = SourceShutdownCoordinator::default(); let id = ComponentKey::from("test"); - let (shutdown_signal, _) = shutdown.register_source(&id); + let (shutdown_signal, _) = shutdown.register_source(&id, false); let deadline = Instant::now() + Duration::from_secs(1); let shutdown_complete = shutdown.shutdown_source(&id, deadline); @@ -352,7 +363,7 @@ mod test { let mut shutdown = SourceShutdownCoordinator::default(); let id = ComponentKey::from("test"); - let (_shutdown_signal, force_shutdown_tripwire) = shutdown.register_source(&id); + let (_shutdown_signal, force_shutdown_tripwire) = shutdown.register_source(&id, false); let deadline = Instant::now() + Duration::from_secs(1); let shutdown_complete = shutdown.shutdown_source(&id, deadline); diff --git a/src/config/source.rs b/src/config/source.rs index 1353c18c05dc4..5e53ebbad1725 100644 --- a/src/config/source.rs +++ b/src/config/source.rs @@ -143,7 +143,7 @@ impl SourceContext { out: SourceSender, ) -> (Self, crate::shutdown::SourceShutdownCoordinator) { let mut shutdown = crate::shutdown::SourceShutdownCoordinator::default(); - let (shutdown_signal, _) = shutdown.register_source(key); + let (shutdown_signal, _) = shutdown.register_source(key, false); ( Self { key: key.clone(), diff --git a/src/sources/socket/mod.rs b/src/sources/socket/mod.rs index 58ef30c3fcf99..d381b65cecfdd 100644 --- a/src/sources/socket/mod.rs +++ b/src/sources/socket/mod.rs @@ -872,7 +872,7 @@ mod test { source_id: &ComponentKey, shutdown: &mut SourceShutdownCoordinator, ) -> (SocketAddr, JoinHandle>) { - let (shutdown_signal, _) = shutdown.register_source(source_id); + let (shutdown_signal, _) = shutdown.register_source(source_id, false); init_udp_inner(sender, source_id, shutdown_signal, None, false).await } diff --git a/src/sources/util/framestream.rs b/src/sources/util/framestream.rs index 194dd48432074..8bae468a85241 100644 --- a/src/sources/util/framestream.rs +++ b/src/sources/util/framestream.rs @@ -728,7 +728,7 @@ mod test { let source_id = ComponentKey::from(source_id); let socket_path = frame_handler.socket_path(); let mut shutdown = SourceShutdownCoordinator::default(); - let (shutdown_signal, _) = shutdown.register_source(&source_id); + let (shutdown_signal, _) = shutdown.register_source(&source_id, false); let server = build_framestream_unix_source(frame_handler, shutdown_signal, pipeline) .expect("Failed to build framestream unix source."); diff --git a/src/topology/builder.rs b/src/topology/builder.rs index b7ace14acd57b..250754aeddfae 100644 --- a/src/topology/builder.rs +++ b/src/topology/builder.rs @@ -71,6 +71,8 @@ static TRANSFORM_CONCURRENCY_LIMIT: Lazy = Lazy::new(|| { .unwrap_or_else(crate::num_threads) }); +const INTERNAL_SOURCES: [&str; 2] = ["internal_logs", "internal_metrics"]; + /// Builds only the new pieces, and doesn't check their topology. pub async fn build_pieces( config: &super::Config, @@ -313,8 +315,9 @@ impl<'a> Builder<'a> { let pipeline = builder.build(); - let (shutdown_signal, force_shutdown_tripwire) = - self.shutdown_coordinator.register_source(key); + let (shutdown_signal, force_shutdown_tripwire) = self + .shutdown_coordinator + .register_source(key, INTERNAL_SOURCES.contains(&typetag)); let context = SourceContext { key: key.clone(), From b8e3dbe1cf55e4d117023531e19891fc8c19ccf9 Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Fri, 30 Jun 2023 10:44:35 -0400 Subject: [PATCH 222/236] chore(ci): correctly validate comment author in k8s e2e job (#17818) --- .github/workflows/k8s_e2e.yml | 57 +++++++++++++++++------------------ 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/.github/workflows/k8s_e2e.yml b/.github/workflows/k8s_e2e.yml index 50de3f5db9a2c..e7faa3a59555a 100644 --- a/.github/workflows/k8s_e2e.yml +++ b/.github/workflows/k8s_e2e.yml @@ -42,9 +42,31 @@ env: PROFILE: debug jobs: + validate: + name: Validate comment + runs-on: ubuntu-latest + if: | + github.event_name != 'issue_comment' || + ( github.event.issue.pull_request && + ( contains(github.event.comment.body, '/ci-run-all') || + contains(github.event.comment.body, '/ci-run-k8s') + ) + ) + steps: + - name: Get PR comment author + id: comment + uses: tspascoal/get-user-teams-membership@v2 + with: + username: ${{ github.actor }} + team: 'Vector' + GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} + + - name: Validate author membership + if: steps.comment.outputs.isTeamMember == 'false' + run: exit 1 + changes: - if: github.event_name != 'issue_comment' || (github.event.issue.pull_request && - (contains(github.event.comment.body, '/ci-run-k8s') || contains(github.event.comment.body, '/ci-run-all'))) + needs: validate uses: ./.github/workflows/changes.yml with: base_ref: ${{ github.event.pull_request.base.ref }} @@ -54,7 +76,7 @@ jobs: build-x86_64-unknown-linux-gnu: name: Build - x86_64-unknown-linux-gnu runs-on: [linux, ubuntu-20.04-4core] - needs: changes + needs: [changes, validate] if: github.event_name != 'pull_request' || needs.changes.outputs.k8s == 'true' # cargo-deb requires a release build, but we don't need optimizations for tests env: @@ -62,14 +84,6 @@ jobs: CARGO_PROFILE_RELEASE_CODEGEN_UNITS: 256 CARGO_INCREMENTAL: 0 steps: - - name: Validate issue comment - if: github.event_name == 'issue_comment' - uses: tspascoal/get-user-teams-membership@v2 - with: - username: ${{ github.actor }} - team: 'Vector' - GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} - - name: (PR comment) Get PR branch if: ${{ github.event_name == 'issue_comment' }} uses: xt0rted/pull-request-comment-branch@v2 @@ -127,19 +141,11 @@ jobs: compute-k8s-test-plan: name: Compute K8s test plan runs-on: ubuntu-latest - needs: changes + needs: [changes, validate] if: github.event_name != 'pull_request' || needs.changes.outputs.k8s == 'true' outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - name: Validate issue comment - if: github.event_name == 'issue_comment' - uses: tspascoal/get-user-teams-membership@v2 - with: - username: ${{ github.actor }} - team: 'Vector' - GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} - - uses: actions/github-script@v6.4.1 id: set-matrix with: @@ -189,6 +195,7 @@ jobs: name: K8s ${{ matrix.kubernetes_version.version }} / ${{ matrix.container_runtime }} (${{ matrix.kubernetes_version.role }}) runs-on: [linux, ubuntu-20.04-4core] needs: + - validate - build-x86_64-unknown-linux-gnu - compute-k8s-test-plan strategy: @@ -239,21 +246,13 @@ jobs: final-result: name: K8s E2E Suite runs-on: ubuntu-latest - needs: test-e2e-kubernetes + needs: [test-e2e-kubernetes, validate] if: | always() && (github.event_name != 'issue_comment' || (github.event.issue.pull_request && (contains(github.event.comment.body, '/ci-run-k8s') || contains(github.event.comment.body, '/ci-run-all')))) env: FAILED: ${{ contains(needs.*.result, 'failure') }} steps: - - name: Validate issue comment - if: github.event_name == 'issue_comment' - uses: tspascoal/get-user-teams-membership@v2 - with: - username: ${{ github.actor }} - team: 'Vector' - GITHUB_TOKEN: ${{ secrets.GH_PAT_ORG }} - - name: (PR comment) Get PR branch if: success() && github.event_name == 'issue_comment' uses: xt0rted/pull-request-comment-branch@v2 From 7d098e42dfd08ea1f2e63355e2a95c2b38e3b768 Mon Sep 17 00:00:00 2001 From: Nathan Fox Date: Fri, 30 Jun 2023 10:59:08 -0400 Subject: [PATCH 223/236] chore(docs): Add Log Namespacing docs (#16571) This updates documentation and adds a blog-post announcing the log namespacing feature (as a beta release). --------- Co-authored-by: Spencer Gilbert --- website/content/en/blog/log-namespacing.md | 169 ++++++++++++++++++ website/cue/reference/configuration.cue | 20 ++- .../remap/functions/set_semantic_meaning.cue | 44 +++++ website/cue/reference/urls.cue | 1 + website/layouts/partials/data.html | 33 ++++ 5 files changed, 263 insertions(+), 4 deletions(-) create mode 100644 website/content/en/blog/log-namespacing.md create mode 100644 website/cue/reference/remap/functions/set_semantic_meaning.cue diff --git a/website/content/en/blog/log-namespacing.md b/website/content/en/blog/log-namespacing.md new file mode 100644 index 0000000000000..4516b5685794b --- /dev/null +++ b/website/content/en/blog/log-namespacing.md @@ -0,0 +1,169 @@ +--- +title: Log Namespacing +short: Log Namespacing +description: Changing Vector's data model +authors: ["fuchsnj"] +date: "2023-06-30" +badges: + type: announcement + domains: ["data model"] +tags: [] +--- + +The Vector team has been hard at work improving the data model of events in Vector. These +changes are now available for beta testing for those who want to try it out and give feedback. +This is an opt-in feature. Nothing should change unless you specifically enable it. + +## Why + +Currently, all data for events is placed at the root of the event, regardless of where the data came +from or how it was obtained. Not only can that make it confusing to understand what a certain field +represents (eg: was the `timestamp` field generated by Vector when it was ingested, or is it when +the source originally created the event) but it can easily cause data collisions. + +Log namespacing also unblocks powerful features being worked on, such as end-to-end type checking +of events in Vector. + +## How to enable + +The [global config] `schema.log_namespace` can be set to `true` to enable the new +Log Namespacing feature for all components. The default is `false`. + +Every source also has a `log_namespace` config option. This will override the global setting, +so you can try out Log Namespacing on individual sources. + +The following example enables the `log_namespace` feature globally, then disables it for a single +source. + +```toml +schema.log_namespace = true + +[sources.input_with_log_namespace] +type = "demo_logs" +format = "shuffle" +lines = ["input_with_log_namespace"] +interval = 1 + +[sources.input_without_log_namespace] +type = "demo_logs" +format = "shuffle" +lines = ["input_without_log_namespace"] +interval = 1 +log_namespace = false + +[sinks.console] +type = "console" +inputs = ["input_with_log_namespace", "input_without_log_namespace"] +encoding.codec = "json" + +``` + +## How It Works + +### Data Layout + +When handling log events, information is categorized into one of the following groups: +(Examples are from the `datadog_agent` source) + +- Event Data: The decoded event data. (eg: the log itself) +- Source Metadata: Metadata provided by the source of the event. (eg: hostname / tags) +- Vector Metadata: Metadata provided by Vector. (eg: the time when Vector received the event) + +#### Without Log Namespacing + +All three of these are placed at the root of the event. The exact layout depends on the source, +some fields are configurable, and the [global log schema] can change the name / location of some +fields. + +Example log event from the `datadog_agent` source (with the JSON decoder) + +```json +{ + "ddsource": "vector", + "ddtags": "env:prod", + "hostname": "alpha", + "foo": "foo field", + "service": "cernan", + "source_type": "datadog_agent", + "bar": "bar field", + "status": "warning", + "timestamp": "1970-02-14T20:44:57.570Z" +} +``` + +#### With Log Namespacing + +When enabled, the layout of this data is well-defined and consistent. + +Event Data (and _only_ Event Data) is placed at the root of the event (eg: `.`). +Source metadata is placed in event metadata, prefixed by the source name. (eg: `%datadog_agent`) +Vector metadata is placed in event metadata, prefixed by `vector`. (eg: `%vector`) + +Generally sinks will only send the event data. If you want to include any metadata fields, +it's recommended to use a [remap] transform to add data to the event as needed. + +It's important to note that previously the type of an event (`.`) would always be an object +with fields. Now it is possible for event to be any type, such as a string. + +Example log event from the `datadog agent` source. (same data as the example above) + +Event root (`.`) + +```json +{ + "foo": "foo field", + "bar": "bar field" +} +``` + +Source metadata fields (`%datadog_agent`) + +```json +{ + "ddsource": "vector", + "ddtags": "env:prod", + "hostname": "alpha", + "service": "cernan", + "status": "warning", + "timestamp": "1970-02-14T20:44:57.570Z" +} +``` + +Source vector fields (`%vector`) + +```json +{ + "source_type": "datadog_agent", + "ingest_timestamp": "1970-02-14T20:44:58.236Z" +} +``` + +Here is a sample VRL script accessing different parts of an event when log namespacing is enabled. + +```coffee +event = . +field_from_event = .foo + +all_metadata = % +tags = %datadog_agent.ddtags +timestamp = %vector.ingest_timestamp + +``` + +### Semantic Meaning + +Before Log Namespacing, Vector used the [global log schema] to keep certain types of information +at known locations. This is changing, and when log namespacing is enabled, the [global log schema] +will no longer be used. To replace it, a new feature called "semantic meaning" will be used instead. +This allows assigning meaning to different fields of an event, which allows sinks to access +information needed, such as timestamps, hostname, the message, etc. + +Semantic meaning will automatically be assigned by all sources. Sinks will check on startup to make +sure a meaning exists for all required fields. If a source does not provide a required field, or +a meaning needs to be manually adjusted for any reason, the VRL function [set_semantic_meaning] can +be used. + +[global log schema]: /docs/reference/configuration/global-options/#log_schema +[set_semantic_meaning]: /docs/reference/vrl/functions/#set_semantic_meaning +[remap]: /docs/reference/configuration/transforms/remap/ +[global config]: /docs/reference/configuration/global-options/#log_namespacing diff --git a/website/cue/reference/configuration.cue b/website/cue/reference/configuration.cue index d1a1476d3de38..08d9b0bb6b88d 100644 --- a/website/cue/reference/configuration.cue +++ b/website/cue/reference/configuration.cue @@ -251,6 +251,17 @@ configuration: { } } } + log_namespacing: { + common: false + description: """ + Globally enables / disables log namespacing. See [Log Namespacing](\(urls.log_namespacing_blog)) + for more details. If you want to enable individual sources, there is a config + option in the source configuration. + """ + required: false + warnings: [] + type: bool: default: false + } telemetry: { common: false @@ -274,7 +285,7 @@ configuration: { common: true description: """ Add a `source` tag with the source component the event was received from. - + If there is no source component, for example if the event was generated by the `lua` transform a `-` is emitted for this tag. """ @@ -309,13 +320,14 @@ configuration: { } log_schema: { - common: false + common: false description: """ Configures default log schema for all events. This is used by - Vector source components to assign the fields on incoming + Vector components to assign the fields on incoming events. + These values are ignored if log namespacing is enabled. (See [Log Namespacing](\(urls.log_namespacing_blog))) """ - required: false + required: false type: object: { examples: [] options: { diff --git a/website/cue/reference/remap/functions/set_semantic_meaning.cue b/website/cue/reference/remap/functions/set_semantic_meaning.cue new file mode 100644 index 0000000000000..d21ca5b121b27 --- /dev/null +++ b/website/cue/reference/remap/functions/set_semantic_meaning.cue @@ -0,0 +1,44 @@ +package metadata + +remap: functions: set_semantic_meaning: { + category: "Event" + description: """ + Sets a semantic meaning for an event. Note that this function assigns + meaning at Vector startup, and has _no_ runtime behavior. It is suggested + to put all calls to this function at the beginning of a VRL function. The function + cannot be conditionally called (eg: using an if statement cannot stop the meaning + from being assigned). + """ + + arguments: [ + { + name: "target" + description: """ + The path of the value that will be assigned a meaning. + """ + required: true + type: ["path"] + }, + { + name: "meaning" + description: """ + The name of the meaning to assign. + """ + required: true + type: ["string"] + }, + ] + internal_failure_reasons: [ + ] + return: types: ["null"] + + examples: [ + { + title: "Sets custom field semantic meaning" + source: #""" + set_semantic_meaning(.foo, "bar") + """# + return: null + }, + ] +} diff --git a/website/cue/reference/urls.cue b/website/cue/reference/urls.cue index 0e3560392c108..2f6921ba39eea 100644 --- a/website/cue/reference/urls.cue +++ b/website/cue/reference/urls.cue @@ -313,6 +313,7 @@ urls: { logfmt_specs: "https://pkg.go.dev/github.com/kr/logfmt#section-documentation" logstash: "https://www.elastic.co/logstash" logstash_protocol: "https://github.com/elastic/logstash-forwarder/blob/master/PROTOCOL.md" + log_namespacing_blog: "/blog/log-namespacing/" loki: "https://grafana.com/oss/loki/" loki_multi_tenancy: "\(github)/grafana/loki/blob/master/docs/operations/multi-tenancy.md" log_event_source: "\(vector_repo)/blob/master/src/event/" diff --git a/website/layouts/partials/data.html b/website/layouts/partials/data.html index 9a7b71d4fd8a1..3feefe90cd353 100644 --- a/website/layouts/partials/data.html +++ b/website/layouts/partials/data.html @@ -257,6 +257,39 @@
+
+ +

+ Warning + +

+
+
+
+ + + +
+
The fields shown below will be + different if log namespacing is enabled. + See Log Namespacing for + more details +
+
+
+
{{ template "logs_output" . }}
From bf9828d03b92a6b7ce0295d3468eb1c139f5a1fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Jun 2023 15:10:36 +0000 Subject: [PATCH 224/236] chore(deps): Bump quote from 1.0.28 to 1.0.29 (#17815) Bumps [quote](https://github.com/dtolnay/quote) from 1.0.28 to 1.0.29.
Release notes

Sourced from quote's releases.

1.0.29

  • Fix proc_macro_span_shrink-related build error when built with -Zminimal-versions
Commits
  • e99862e Release 1.0.29
  • 0c68465 Fix -Zminimal-versions build
  • 200f56a Remove .clippy.toml in favor of respecting rust-version from Cargo.toml
  • 47e1066 Ignore uninlined_format_args pedantic clippy lint
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=quote&package-manager=cargo&previous-version=1.0.28&new-version=1.0.29)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 138 ++++++++++++++++++++++++++--------------------------- 1 file changed, 69 insertions(+), 69 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c4c0e2d0b0ac1..a0a3fb1c59b93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -260,7 +260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c6368f9ae5c6ec403ca910327ae0c9437b0a85255b6950c90d497e6177f6e5e" dependencies = [ "proc-macro-hack", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -473,7 +473,7 @@ dependencies = [ "darling 0.14.2", "proc-macro-crate 1.2.1", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "thiserror", ] @@ -593,7 +593,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -615,7 +615,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -632,7 +632,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -1417,7 +1417,7 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd9e32d7420c85055e8107e5b2463c4eeefeaac18b52359fe9f9c08a18f342b2" dependencies = [ - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1561,7 +1561,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61820b4c5693eafb998b1e67485423c923db4a75f72585c247bdee32bad81e7b" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1572,7 +1572,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c76cdbfa13def20d1f8af3ae7b3c6771f06352a74221d8851262ac384c122b8e" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1643,7 +1643,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13e576ebe98e605500b3c8041bb888e966653577172df6dd97398714eb30b9bf" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1726,7 +1726,7 @@ dependencies = [ "cached_proc_macro_types", "darling 0.14.2", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -1951,7 +1951,7 @@ checksum = "81d7dc0031c3a59a04fc2ba395c8e2dd463cba1859275f065d225f6122221b45" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -2405,7 +2405,7 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2459,7 +2459,7 @@ dependencies = [ "codespan-reporting", "once_cell", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "scratch", "syn 1.0.109", ] @@ -2477,7 +2477,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2510,7 +2510,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "strsim 0.10.0", "syn 1.0.109", ] @@ -2524,7 +2524,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "strsim 0.10.0", "syn 1.0.109", ] @@ -2536,7 +2536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2547,7 +2547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" dependencies = [ "darling_core 0.14.2", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2623,7 +2623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2634,7 +2634,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -2646,7 +2646,7 @@ checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -2902,7 +2902,7 @@ checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2914,7 +2914,7 @@ checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2926,7 +2926,7 @@ checksum = "11f36e95862220b211a6e2aa5eca09b4fa391b13cd52ceb8035a24bf65a79de2" dependencies = [ "once_cell", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -2946,7 +2946,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -3074,7 +3074,7 @@ checksum = "f47da3a72ec598d9c8937a7ebca8962a5c7a1f28444e38c2b33c771ba3f55f05" dependencies = [ "proc-macro-error", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -3170,7 +3170,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4c81935e123ab0741c4c4f0d9b8377e5fb21d3de7e062fa4b1263b1fbcba1ea" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -3351,7 +3351,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -3434,7 +3434,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb19fe8de3ea0920d282f7b77dd4227aea6b8b999b42cdf0ca41b2472b14443a" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -3540,7 +3540,7 @@ dependencies = [ "heck 0.4.0", "lazy_static", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "serde", "serde_json", "syn 1.0.109", @@ -4926,7 +4926,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -5452,7 +5452,7 @@ checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" dependencies = [ "proc-macro-crate 1.2.1", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -5464,7 +5464,7 @@ checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.2.1", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -5647,7 +5647,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -5891,7 +5891,7 @@ dependencies = [ "pest", "pest_meta", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -5979,7 +5979,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -6245,7 +6245,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "version_check", ] @@ -6257,7 +6257,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "version_check", ] @@ -6366,7 +6366,7 @@ dependencies = [ "anyhow", "itertools 0.10.5", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -6406,7 +6406,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -6505,7 +6505,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -6520,9 +6520,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.28" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488" +checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ "proc-macro2 1.0.63", ] @@ -6920,7 +6920,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff26ed6c7c4dfc2aa9480b86a60e3c7233543a270a680e10758a507c5a4ce476" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7420,7 +7420,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -7431,7 +7431,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7483,7 +7483,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7542,7 +7542,7 @@ checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7554,7 +7554,7 @@ checksum = "859011bddcc11f289f07f467cc1fe01c7a941daa4d8f6c40d4d1c92eb6d9319c" dependencies = [ "darling 0.14.2", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7825,7 +7825,7 @@ checksum = "475b3bbe5245c26f2d8a6f62d67c1f30eb9fffeccee721c45d162c3ebbdf81b2" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7966,7 +7966,7 @@ dependencies = [ "heck 0.3.3", "proc-macro-error", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -7984,7 +7984,7 @@ checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.0", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "rustversion", "syn 1.0.109", ] @@ -8023,7 +8023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "unicode-ident", ] @@ -8034,7 +8034,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "unicode-ident", ] @@ -8051,7 +8051,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -8205,7 +8205,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -8351,7 +8351,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -8572,7 +8572,7 @@ dependencies = [ "prettyplease", "proc-macro2 1.0.63", "prost-build", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -8676,7 +8676,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -8947,7 +8947,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89851716b67b937e393b3daa8423e67ddfc4bbbf1654bcf05488e95e0828db0c" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -8977,7 +8977,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3e1c30cedd24fc597f7d37a721efdbdc2b1acae012c1ef1218f4c7c2c0f3e7" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", ] @@ -9519,7 +9519,7 @@ dependencies = [ "darling 0.13.4", "once_cell", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "serde", "serde_json", "syn 1.0.109", @@ -9532,7 +9532,7 @@ version = "0.1.0" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "serde", "serde_derive_internals", "syn 1.0.109", @@ -9792,7 +9792,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d257817081c7dffcdbab24b9e62d2def62e2ff7d00b1c20062551e6cccc145ff" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", ] [[package]] @@ -9893,7 +9893,7 @@ dependencies = [ "log", "once_cell", "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", "wasm-bindgen-shared", ] @@ -9916,7 +9916,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ - "quote 1.0.28", + "quote 1.0.29", "wasm-bindgen-macro-support", ] @@ -9927,7 +9927,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 2.0.10", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -10338,7 +10338,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6505e6815af7de1746a08f69c69606bb45695a17149517680f3b2149713b19a3" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", ] @@ -10358,7 +10358,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ "proc-macro2 1.0.63", - "quote 1.0.28", + "quote 1.0.29", "syn 1.0.109", "synstructure", ] From 0b32626848f5189d6832e6f8ea3c66ebaa553975 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Jun 2023 15:56:30 +0000 Subject: [PATCH 225/236] chore(deps): Bump pin-project from 1.1.0 to 1.1.1 (#17806) Bumps [pin-project](https://github.com/taiki-e/pin-project) from 1.1.0 to 1.1.1.
Release notes

Sourced from pin-project's releases.

1.1.1

  • Fix build error from dependency when built with -Z minimal-versions.
Changelog

Sourced from pin-project's changelog.

[1.1.1] - 2023-06-29

  • Fix build error from dependency when built with -Z minimal-versions.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pin-project&package-manager=cargo&previous-version=1.1.0&new-version=1.1.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- lib/vector-buffers/Cargo.toml | 2 +- lib/vector-common/Cargo.toml | 2 +- lib/vector-core/Cargo.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a0a3fb1c59b93..cb18c2e91d261 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5965,18 +5965,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" +checksum = "6e138fdd8263907a2b0e1b4e80b7e58c721126479b6e6eedfb1b402acea7b9bd" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" +checksum = "d1fef411b303e3e12d534fb6e7852de82da56edd937d895125821fb7c09436c7" dependencies = [ "proc-macro2 1.0.63", "quote 1.0.29", diff --git a/Cargo.toml b/Cargo.toml index fe2d6de131203..4b156e9963f1c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -291,7 +291,7 @@ openssl-probe = { version = "0.1.5", default-features = false } ordered-float = { version = "3.7.0", default-features = false } paste = "1.0.12" percent-encoding = { version = "2.3.0", default-features = false } -pin-project = { version = "1.1.0", default-features = false } +pin-project = { version = "1.1.1", default-features = false } postgres-openssl = { version = "0.5.0", default-features = false, features = ["runtime"], optional = true } pulsar = { version = "6.0.1", default-features = false, features = ["tokio-runtime", "auth-oauth2", "flate2", "lz4", "snap", "zstd"], optional = true } rand = { version = "0.8.5", default-features = false, features = ["small_rng"] } diff --git a/lib/vector-buffers/Cargo.toml b/lib/vector-buffers/Cargo.toml index 3f774b21ae5b6..09de0d0ac6153 100644 --- a/lib/vector-buffers/Cargo.toml +++ b/lib/vector-buffers/Cargo.toml @@ -19,7 +19,7 @@ futures = { version = "0.3.28", default-features = false, features = ["std"] } memmap2 = { version = "0.7.1", default-features = false } metrics = "0.21.0" num-traits = { version = "0.2.15", default-features = false } -pin-project = { version = "1.1.0", default-features = false } +pin-project = { version = "1.1.1", default-features = false } rkyv = { version = "0.7.40", default-features = false, features = ["size_32", "std", "strict", "validation"] } serde = { version = "1.0.164", default-features = false, features = ["derive"] } snafu = { version = "0.7.4", default-features = false, features = ["std"] } diff --git a/lib/vector-common/Cargo.toml b/lib/vector-common/Cargo.toml index 7d578ecb4e051..24023729e5104 100644 --- a/lib/vector-common/Cargo.toml +++ b/lib/vector-common/Cargo.toml @@ -53,7 +53,7 @@ metrics = "0.21.0" nom = { version = "7", optional = true } ordered-float = { version = "3.7.0", default-features = false } paste = "1.0.12" -pin-project = { version = "1.1.0", default-features = false } +pin-project = { version = "1.1.1", default-features = false } ryu = { version = "1", default-features = false } serde_json = { version = "1.0.99", default-features = false, features = ["std", "raw_value"] } serde = { version = "1.0.164", optional = true, features = ["derive"] } diff --git a/lib/vector-core/Cargo.toml b/lib/vector-core/Cargo.toml index a281ac35dd87e..1395883640469 100644 --- a/lib/vector-core/Cargo.toml +++ b/lib/vector-core/Cargo.toml @@ -33,7 +33,7 @@ once_cell = { version = "1.18", default-features = false } ordered-float = { version = "3.7.0", default-features = false } openssl = { version = "0.10.55", default-features = false, features = ["vendored"] } parking_lot = { version = "0.12.1", default-features = false } -pin-project = { version = "1.1.0", default-features = false } +pin-project = { version = "1.1.1", default-features = false } proptest = { version = "1.2", optional = true } prost-types = { version = "0.11", default-features = false } prost = { version = "0.11", default-features = false, features = ["std"] } From 96e68f76efe2208a8899b3f8961125ba5424a9ba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Jun 2023 16:25:20 +0000 Subject: [PATCH 226/236] chore(deps): Bump lru from 0.10.0 to 0.10.1 (#17810) Bumps [lru](https://github.com/jeromefroe/lru-rs) from 0.10.0 to 0.10.1.
Changelog

Sourced from lru's changelog.

v0.10.1 - 2023-07-29

  • Add try_get_or_insert method.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=lru&package-manager=cargo&previous-version=0.10.0&new-version=0.10.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cb18c2e91d261..b4643639d00ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4728,9 +4728,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03f1160296536f10c833a82dca22267d5486734230d47bf00bf435885814ba1e" +checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" [[package]] name = "lru-cache" diff --git a/Cargo.toml b/Cargo.toml index 4b156e9963f1c..dd4227faab120 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -277,7 +277,7 @@ k8s-openapi = { version = "0.18.0", default-features = false, features = ["api", kube = { version = "0.82.0", default-features = false, features = ["client", "openssl-tls", "runtime"], optional = true } listenfd = { version = "1.0.1", default-features = false, optional = true } logfmt = { version = "0.0.2", default-features = false, optional = true } -lru = { version = "0.10.0", default-features = false, optional = true } +lru = { version = "0.10.1", default-features = false, optional = true } maxminddb = { version = "0.23.0", default-features = false, optional = true } md-5 = { version = "0.10", default-features = false, optional = true } mongodb = { version = "2.6.0", default-features = false, features = ["tokio-runtime"], optional = true } From 708b7f6088c14180945d80e2a8f13ed471ded77a Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 30 Jun 2023 10:17:31 -0700 Subject: [PATCH 227/236] chore(ci): Add schedule to component features workflow conditional check (#17816) Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- .github/workflows/component_features.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/component_features.yml b/.github/workflows/component_features.yml index eec8309e3a0d1..d321a81f17964 100644 --- a/.github/workflows/component_features.yml +++ b/.github/workflows/component_features.yml @@ -20,7 +20,7 @@ jobs: check-component-features: # use free tier on schedule and 8 core to expedite results on demand invocation runs-on: ${{ github.event_name == 'schedule' && 'ubuntu-latest' || fromJSON('["linux", "ubuntu-20.04-8core"]') }} - if: github.event_name == 'issue_comment' || github.event_name == 'workflow_dispatch' + if: github.event_name == 'issue_comment' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' steps: - name: (PR comment) Get PR branch if: github.event_name == 'issue_comment' From fe730adee64c45bc9a0737838a8aaa2bd8ef61d8 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 30 Jun 2023 11:38:51 -0700 Subject: [PATCH 228/236] chore(ci): Bump up OSX runners for release builds (#17823) Currently the longest build job at 2 hours; the second longest is an hour. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index b490037ba14c1..2dc4c4939eb37 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -203,7 +203,7 @@ jobs: build-x86_64-apple-darwin-packages: name: Build Vector for x86_64-apple-darwin (.tar.gz) - runs-on: macos-11 + runs-on: macos-latest-xl needs: generate-publish-metadata env: VECTOR_VERSION: ${{ needs.generate-publish-metadata.outputs.vector_version }} From 47c3da1f21d3cc3d4af09d321ae3754972e0a150 Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Fri, 30 Jun 2023 16:35:02 -0400 Subject: [PATCH 229/236] chore(ci): fix gardener issues comment workflow (#17825) As it was previously implemented, this workflow would add an issue to the Gardener board if it wasn't already present. This updates the graphql query to fetch the necessary project item id without creating new items. Tested the graphql query in the [playground](https://docs.github.com/en/graphql/overview/explorer). Tested the `jq` command locally. Example Issue ID: `I_kwDOCLjIc85p9V5j` --------- Co-authored-by: Jesse Szwedko --- .github/workflows/gardener_issue_comment.yml | 57 ++++++++++---------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/.github/workflows/gardener_issue_comment.yml b/.github/workflows/gardener_issue_comment.yml index 8ea89fb315145..18ee687a44b17 100644 --- a/.github/workflows/gardener_issue_comment.yml +++ b/.github/workflows/gardener_issue_comment.yml @@ -26,44 +26,43 @@ jobs: status_field_id="PVTF_lADOAQFeYs4AAsTrzgAXRuU" # Status triage_option_id="2a08fafa" - # ensures that the issue is already on board but also seems to be the only way to fetch - # the item id - item_id="$(gh api graphql -f query=' - mutation($project_id: ID!, $content_id: ID!) { - addProjectV2ItemById(input: {projectId: $project_id, contentId: $content_id}) { - item { - id - } - } - }' -f project_id="$project_id" -f content_id="$issue_id" -q '.data.addProjectV2ItemById.item.id' - )" - - echo "item_id: $item_id" - - if [ -z "$item_id" ] ; then - echo "Issue not found in Gardener board" - exit 0 - else - echo "Found issue on Gardener board" - fi - - current_status="$(gh api graphql -f query=' + # Query for project items for the given issue + project_items="$(gh api graphql -f query=' query($item_id: ID!) { node(id: $item_id) { - ... on ProjectV2Item { - fieldValueByName(name: "Status") { - ... on ProjectV2ItemFieldSingleSelectValue { - name + ... on Issue { + projectItems(first: 50) { + ... on ProjectV2ItemConnection { + nodes { + fieldValueByName(name: "Status") { + ... on ProjectV2ItemFieldSingleSelectValue { + name + } + } + ... on ProjectV2Item { + project { + ... on ProjectV2 { + id + } + } + } + } } } } } - }' -f item_id="$item_id" + }' -f item_id="$issue_id" )" - current_status=$(echo $current_status | jq -c -r '.["data"]["node"]["fieldValueByName"]["name"]') + # Extract the item in the Gardener project + current_status=$(echo $project_items | jq -r '.data.node.projectItems.nodes[] | select(.project.id == $project_id) | .fieldValueByName.name') - echo "Current issue status is: '${current_status}'" + if [ -z "$current_status" ] ; then + echo "Issue not found in Gardener board" + exit 0 + else + echo "Found issue on Gardener board. Current issue status is: '${current_status}'" + fi if [ "$current_status" = "Blocked / Waiting" ] ; then echo "Moving issue from 'Blocked / Waiting' to 'Triage'" From 77ac63c5bd87309b1ddd54e55b933072b40e34ea Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Fri, 30 Jun 2023 17:00:11 -0400 Subject: [PATCH 230/236] chore(clickhouse sink): refactor to new style (#17723) Closes: https://github.com/vectordotdev/vector/issues/17094 Updates the clickhouse docker image to support aarch64. --- scripts/integration/clickhouse/compose.yaml | 2 +- scripts/integration/clickhouse/test.yaml | 2 +- src/sinks/clickhouse/config.rs | 86 +++++-- src/sinks/clickhouse/http_sink.rs | 250 ------------------- src/sinks/clickhouse/integration_tests.rs | 43 +--- src/sinks/clickhouse/mod.rs | 14 +- src/sinks/clickhouse/service.rs | 257 ++++++++++++++++++++ src/sinks/clickhouse/sink.rs | 117 +++++++++ src/sinks/prelude.rs | 2 +- 9 files changed, 474 insertions(+), 299 deletions(-) delete mode 100644 src/sinks/clickhouse/http_sink.rs create mode 100644 src/sinks/clickhouse/service.rs create mode 100644 src/sinks/clickhouse/sink.rs diff --git a/scripts/integration/clickhouse/compose.yaml b/scripts/integration/clickhouse/compose.yaml index fe11611e9265d..62f8a90a543c2 100644 --- a/scripts/integration/clickhouse/compose.yaml +++ b/scripts/integration/clickhouse/compose.yaml @@ -2,4 +2,4 @@ version: '3' services: clickhouse: - image: docker.io/yandex/clickhouse-server:${CONFIG_VERSION} + image: docker.io/clickhouse/clickhouse-server:${CONFIG_VERSION} diff --git a/scripts/integration/clickhouse/test.yaml b/scripts/integration/clickhouse/test.yaml index 7da786d257251..7b106b914d4dd 100644 --- a/scripts/integration/clickhouse/test.yaml +++ b/scripts/integration/clickhouse/test.yaml @@ -7,7 +7,7 @@ env: CLICKHOUSE_ADDRESS: http://clickhouse:8123 matrix: - version: ['19'] + version: ['23'] # changes to these files/paths will invoke the integration test in CI # expressions are evaluated using https://github.com/micromatch/picomatch diff --git a/src/sinks/clickhouse/config.rs b/src/sinks/clickhouse/config.rs index 08299eb541099..0efffb97ef3ce 100644 --- a/src/sinks/clickhouse/config.rs +++ b/src/sinks/clickhouse/config.rs @@ -1,21 +1,18 @@ -use vector_config::configurable_component; +use http::{Request, StatusCode, Uri}; +use hyper::Body; +use super::{ + service::{ClickhouseRetryLogic, ClickhouseService}, + sink::ClickhouseSink, +}; use crate::{ - codecs::Transformer, - config::{AcknowledgementsConfig, Input, SinkConfig, SinkContext}, - http::Auth, + http::{get_http_scheme_from_uri, Auth, HttpClient, MaybeAuth}, sinks::{ - util::{ - BatchConfig, Compression, RealtimeSizeBasedDefaultBatchSettings, TowerRequestConfig, - UriSerde, - }, - Healthcheck, VectorSink, + prelude::*, + util::{RealtimeSizeBasedDefaultBatchSettings, UriSerde}, }, - tls::TlsConfig, }; -use super::http_sink::build_http_sink; - /// Configuration for the `clickhouse` sink. #[configurable_component(sink("clickhouse", "Deliver log data to a ClickHouse database."))] #[derive(Clone, Debug, Default)] @@ -82,9 +79,41 @@ impl_generate_config_from_default!(ClickhouseConfig); #[typetag::serde(name = "clickhouse")] impl SinkConfig for ClickhouseConfig { async fn build(&self, cx: SinkContext) -> crate::Result<(VectorSink, Healthcheck)> { - // later we can build different sink(http, native) here - // according to the clickhouseConfig - build_http_sink(self, cx).await + let endpoint = self.endpoint.with_default_parts().uri; + let protocol = get_http_scheme_from_uri(&endpoint); + + let auth = self.auth.choose_one(&self.endpoint.auth)?; + + let tls_settings = TlsSettings::from_options(&self.tls)?; + let client = HttpClient::new(tls_settings, &cx.proxy)?; + + let service = ClickhouseService::new( + client.clone(), + auth.clone(), + &endpoint, + self.database.as_deref(), + self.table.as_str(), + self.skip_unknown_fields, + self.date_time_best_effort, + )?; + + let request_limits = self.request.unwrap_with(&Default::default()); + let service = ServiceBuilder::new() + .settings(request_limits, ClickhouseRetryLogic::default()) + .service(service); + + let batch_settings = self.batch.into_batcher_settings()?; + let sink = ClickhouseSink::new( + batch_settings, + self.compression, + self.encoding.clone(), + service, + protocol, + ); + + let healthcheck = Box::pin(healthcheck(client, endpoint, auth)); + + Ok((VectorSink::from_event_streamsink(sink), healthcheck)) } fn input(&self) -> Input { @@ -95,3 +124,30 @@ impl SinkConfig for ClickhouseConfig { &self.acknowledgements } } + +async fn healthcheck(client: HttpClient, endpoint: Uri, auth: Option) -> crate::Result<()> { + // TODO: check if table exists? + let uri = format!("{}/?query=SELECT%201", endpoint); + let mut request = Request::get(uri).body(Body::empty()).unwrap(); + + if let Some(auth) = auth { + auth.apply(&mut request); + } + + let response = client.send(request).await?; + + match response.status() { + StatusCode::OK => Ok(()), + status => Err(HealthcheckError::UnexpectedStatus { status }.into()), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn generate_config() { + crate::test_util::test_generate_config::(); + } +} diff --git a/src/sinks/clickhouse/http_sink.rs b/src/sinks/clickhouse/http_sink.rs deleted file mode 100644 index 91c197d126723..0000000000000 --- a/src/sinks/clickhouse/http_sink.rs +++ /dev/null @@ -1,250 +0,0 @@ -use bytes::{BufMut, Bytes, BytesMut}; -use futures::{FutureExt, SinkExt}; -use http::{Request, StatusCode, Uri}; -use hyper::Body; -use snafu::ResultExt; - -use super::ClickhouseConfig; -use crate::{ - codecs::Transformer, - config::SinkContext, - event::Event, - http::{HttpClient, HttpError, MaybeAuth}, - sinks::{ - util::{ - http::{BatchedHttpSink, HttpEventEncoder, HttpRetryLogic, HttpSink}, - retries::{RetryAction, RetryLogic}, - Buffer, TowerRequestConfig, - }, - Healthcheck, HealthcheckError, UriParseSnafu, VectorSink, - }, - tls::TlsSettings, -}; - -pub(crate) async fn build_http_sink( - cfg: &ClickhouseConfig, - cx: SinkContext, -) -> crate::Result<(VectorSink, Healthcheck)> { - let batch = cfg.batch.into_batch_settings()?; - let request = cfg.request.unwrap_with(&TowerRequestConfig::default()); - let tls_settings = TlsSettings::from_options(&cfg.tls)?; - let client = HttpClient::new(tls_settings, &cx.proxy)?; - - let config = ClickhouseConfig { - auth: cfg.auth.choose_one(&cfg.endpoint.auth)?, - ..cfg.clone() - }; - - let sink = BatchedHttpSink::with_logic( - config.clone(), - Buffer::new(batch.size, cfg.compression), - ClickhouseRetryLogic::default(), - request, - batch.timeout, - client.clone(), - ) - .sink_map_err(|error| error!(message = "Fatal clickhouse sink error.", %error)); - - let healthcheck = healthcheck(client, config).boxed(); - - #[allow(deprecated)] - Ok((VectorSink::from_event_sink(sink), healthcheck)) -} - -pub struct ClickhouseEventEncoder { - transformer: Transformer, -} - -impl HttpEventEncoder for ClickhouseEventEncoder { - fn encode_event(&mut self, mut event: Event) -> Option { - self.transformer.transform(&mut event); - let log = event.into_log(); - - let mut body = crate::serde::json::to_bytes(&log).expect("Events should be valid json!"); - body.put_u8(b'\n'); - - Some(body) - } -} - -#[async_trait::async_trait] -impl HttpSink for ClickhouseConfig { - type Input = BytesMut; - type Output = BytesMut; - type Encoder = ClickhouseEventEncoder; - - fn build_encoder(&self) -> Self::Encoder { - ClickhouseEventEncoder { - transformer: self.encoding.clone(), - } - } - - async fn build_request(&self, events: Self::Output) -> crate::Result> { - let database = if let Some(database) = &self.database { - database.as_str() - } else { - "default" - }; - - let uri = set_uri_query( - &self.endpoint.with_default_parts().uri, - database, - &self.table, - self.skip_unknown_fields, - self.date_time_best_effort, - ) - .expect("Unable to encode uri"); - - let mut builder = Request::post(&uri).header("Content-Type", "application/x-ndjson"); - - if let Some(ce) = self.compression.content_encoding() { - builder = builder.header("Content-Encoding", ce); - } - - let mut request = builder.body(events.freeze()).unwrap(); - - if let Some(auth) = &self.auth { - auth.apply(&mut request); - } - - Ok(request) - } -} - -async fn healthcheck(client: HttpClient, config: ClickhouseConfig) -> crate::Result<()> { - // TODO: check if table exists? - let uri = format!("{}/?query=SELECT%201", config.endpoint.with_default_parts()); - let mut request = Request::get(uri).body(Body::empty()).unwrap(); - - if let Some(auth) = &config.auth { - auth.apply(&mut request); - } - - let response = client.send(request).await?; - - match response.status() { - StatusCode::OK => Ok(()), - status => Err(HealthcheckError::UnexpectedStatus { status }.into()), - } -} - -fn set_uri_query( - uri: &Uri, - database: &str, - table: &str, - skip_unknown: bool, - date_time_best_effort: bool, -) -> crate::Result { - let query = url::form_urlencoded::Serializer::new(String::new()) - .append_pair( - "query", - format!( - "INSERT INTO \"{}\".\"{}\" FORMAT JSONEachRow", - database, - table.replace('\"', "\\\"") - ) - .as_str(), - ) - .finish(); - - let mut uri = uri.to_string(); - if !uri.ends_with('/') { - uri.push('/'); - } - uri.push_str("?input_format_import_nested_json=1&"); - if skip_unknown { - uri.push_str("input_format_skip_unknown_fields=1&"); - } - if date_time_best_effort { - uri.push_str("date_time_input_format=best_effort&") - } - uri.push_str(query.as_str()); - - uri.parse::() - .context(UriParseSnafu) - .map_err(Into::into) -} - -#[derive(Debug, Default, Clone)] -struct ClickhouseRetryLogic { - inner: HttpRetryLogic, -} - -impl RetryLogic for ClickhouseRetryLogic { - type Error = HttpError; - type Response = http::Response; - - fn is_retriable_error(&self, error: &Self::Error) -> bool { - self.inner.is_retriable_error(error) - } - - fn should_retry_response(&self, response: &Self::Response) -> RetryAction { - match response.status() { - StatusCode::INTERNAL_SERVER_ERROR => { - let body = response.body(); - - // Currently, ClickHouse returns 500's incorrect data and type mismatch errors. - // This attempts to check if the body starts with `Code: {code_num}` and to not - // retry those errors. - // - // Reference: https://github.com/vectordotdev/vector/pull/693#issuecomment-517332654 - // Error code definitions: https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Common/ErrorCodes.cpp - // - // Fix already merged: https://github.com/ClickHouse/ClickHouse/pull/6271 - if body.starts_with(b"Code: 117") { - RetryAction::DontRetry("incorrect data".into()) - } else if body.starts_with(b"Code: 53") { - RetryAction::DontRetry("type mismatch".into()) - } else { - RetryAction::Retry(String::from_utf8_lossy(body).to_string().into()) - } - } - _ => self.inner.should_retry_response(response), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn generate_config() { - crate::test_util::test_generate_config::(); - } - - #[test] - fn encode_valid() { - let uri = set_uri_query( - &"http://localhost:80".parse().unwrap(), - "my_database", - "my_table", - false, - true, - ) - .unwrap(); - assert_eq!(uri.to_string(), "http://localhost:80/?input_format_import_nested_json=1&date_time_input_format=best_effort&query=INSERT+INTO+%22my_database%22.%22my_table%22+FORMAT+JSONEachRow"); - - let uri = set_uri_query( - &"http://localhost:80".parse().unwrap(), - "my_database", - "my_\"table\"", - false, - false, - ) - .unwrap(); - assert_eq!(uri.to_string(), "http://localhost:80/?input_format_import_nested_json=1&query=INSERT+INTO+%22my_database%22.%22my_%5C%22table%5C%22%22+FORMAT+JSONEachRow"); - } - - #[test] - fn encode_invalid() { - set_uri_query( - &"localhost:80".parse().unwrap(), - "my_database", - "my_table", - false, - false, - ) - .unwrap_err(); - } -} diff --git a/src/sinks/clickhouse/integration_tests.rs b/src/sinks/clickhouse/integration_tests.rs index 21acee5aecc15..f16b19ffcc7ed 100644 --- a/src/sinks/clickhouse/integration_tests.rs +++ b/src/sinks/clickhouse/integration_tests.rs @@ -24,7 +24,7 @@ use crate::{ config::{log_schema, SinkConfig, SinkContext}, sinks::util::{BatchConfig, Compression, TowerRequestConfig}, test_util::{ - components::{run_and_assert_sink_compliance, HTTP_SINK_TAGS}, + components::{run_and_assert_sink_compliance, SINK_TAGS}, random_string, trace_init, }, }; @@ -70,12 +70,8 @@ async fn insert_events() { .as_mut_log() .insert("items", vec!["item1", "item2"]); - run_and_assert_sink_compliance( - sink, - stream::once(ready(input_event.clone())), - &HTTP_SINK_TAGS, - ) - .await; + run_and_assert_sink_compliance(sink, stream::once(ready(input_event.clone())), &SINK_TAGS) + .await; let output = client.select_all(&table).await; assert_eq!(1, output.rows); @@ -119,12 +115,8 @@ async fn skip_unknown_fields() { let (mut input_event, mut receiver) = make_event(); input_event.as_mut_log().insert("unknown", "mysteries"); - run_and_assert_sink_compliance( - sink, - stream::once(ready(input_event.clone())), - &HTTP_SINK_TAGS, - ) - .await; + run_and_assert_sink_compliance(sink, stream::once(ready(input_event.clone())), &SINK_TAGS) + .await; let output = client.select_all(&table).await; assert_eq!(1, output.rows); @@ -171,12 +163,8 @@ async fn insert_events_unix_timestamps() { let (mut input_event, _receiver) = make_event(); - run_and_assert_sink_compliance( - sink, - stream::once(ready(input_event.clone())), - &HTTP_SINK_TAGS, - ) - .await; + run_and_assert_sink_compliance(sink, stream::once(ready(input_event.clone())), &SINK_TAGS) + .await; let output = client.select_all(&table).await; assert_eq!(1, output.rows); @@ -239,12 +227,8 @@ timestamp_format = "unix""#, let (mut input_event, _receiver) = make_event(); - run_and_assert_sink_compliance( - sink, - stream::once(ready(input_event.clone())), - &HTTP_SINK_TAGS, - ) - .await; + run_and_assert_sink_compliance(sink, stream::once(ready(input_event.clone())), &SINK_TAGS) + .await; let output = client.select_all(&table).await; assert_eq!(1, output.rows); @@ -292,10 +276,10 @@ async fn no_retry_on_incorrect_data() { }; let client = ClickhouseClient::new(host); - // the event contains a message field, but its being omitted to - // fail the request. + // The event contains a message field, but it's of type String, which will cause + // the request to fail. client - .create_table(&table, "host String, timestamp String") + .create_table(&table, "host String, timestamp String, message Int32") .await; let (sink, _hc) = config.build(SinkContext::default()).await.unwrap(); @@ -351,7 +335,7 @@ async fn no_retry_on_incorrect_data_warp() { .unwrap() .unwrap(); - assert_eq!(receiver.try_recv(), Ok(BatchStatus::Errored)); + assert_eq!(receiver.try_recv(), Ok(BatchStatus::Rejected)); } fn make_event() -> (Event, BatchStatusReceiver) { @@ -378,7 +362,6 @@ impl ClickhouseClient { let response = self .client .post(&self.host) - // .body(format!( "CREATE TABLE {} ({}) diff --git a/src/sinks/clickhouse/mod.rs b/src/sinks/clickhouse/mod.rs index 2f4e5af1870fc..488df9181a8bf 100644 --- a/src/sinks/clickhouse/mod.rs +++ b/src/sinks/clickhouse/mod.rs @@ -1,5 +1,17 @@ +//! The Clickhouse [`vector_core::sink::VectorSink`] +//! +//! This module contains the [`vector_core::sink::VectorSink`] instance that is responsible for +//! taking a stream of [`vector_core::event::Event`] instances and forwarding them to Clickhouse. +//! +//! Events are sent to Clickhouse using the HTTP interface with a query of the following structure: +//! `INSERT INTO my_db.my_table FORMAT JSONEachRow`. The event payload is encoded as new-line +//! delimited JSON. +//! +//! This sink only supports logs for now but could support metrics and traces as well in the future. + mod config; -mod http_sink; #[cfg(all(test, feature = "clickhouse-integration-tests"))] mod integration_tests; +mod service; +mod sink; pub use self::config::ClickhouseConfig; diff --git a/src/sinks/clickhouse/service.rs b/src/sinks/clickhouse/service.rs new file mode 100644 index 0000000000000..ecce62e537290 --- /dev/null +++ b/src/sinks/clickhouse/service.rs @@ -0,0 +1,257 @@ +use bytes::Bytes; +use http::{ + header::{CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_TYPE}, + Request, Response, StatusCode, Uri, +}; +use hyper::{body, Body}; +use snafu::ResultExt; +use std::task::{Context, Poll}; +use tracing::Instrument; + +use crate::{ + http::{Auth, HttpClient, HttpError}, + sinks::{ + prelude::*, + util::{http::HttpRetryLogic, retries::RetryAction}, + UriParseSnafu, + }, +}; + +#[derive(Debug, Clone)] +pub struct ClickhouseRequest { + pub body: Bytes, + pub compression: Compression, + pub finalizers: EventFinalizers, + pub metadata: RequestMetadata, +} + +impl MetaDescriptive for ClickhouseRequest { + fn get_metadata(&self) -> &RequestMetadata { + &self.metadata + } + + fn metadata_mut(&mut self) -> &mut RequestMetadata { + &mut self.metadata + } +} + +impl Finalizable for ClickhouseRequest { + fn take_finalizers(&mut self) -> EventFinalizers { + self.finalizers.take_finalizers() + } +} + +pub struct ClickhouseResponse { + http_response: Response, + events_byte_size: GroupedCountByteSize, + raw_byte_size: usize, +} + +impl DriverResponse for ClickhouseResponse { + fn event_status(&self) -> EventStatus { + match self.http_response.status().is_success() { + true => EventStatus::Delivered, + false => EventStatus::Rejected, + } + } + + fn events_sent(&self) -> &GroupedCountByteSize { + &self.events_byte_size + } + + fn bytes_sent(&self) -> Option { + Some(self.raw_byte_size) + } +} + +#[derive(Debug, Default, Clone)] +pub struct ClickhouseRetryLogic { + inner: HttpRetryLogic, +} + +impl RetryLogic for ClickhouseRetryLogic { + type Error = HttpError; + type Response = ClickhouseResponse; + + fn is_retriable_error(&self, error: &Self::Error) -> bool { + self.inner.is_retriable_error(error) + } + + fn should_retry_response(&self, response: &Self::Response) -> RetryAction { + match response.http_response.status() { + StatusCode::INTERNAL_SERVER_ERROR => { + let body = response.http_response.body(); + + // Currently, ClickHouse returns 500's incorrect data and type mismatch errors. + // This attempts to check if the body starts with `Code: {code_num}` and to not + // retry those errors. + // + // Reference: https://github.com/vectordotdev/vector/pull/693#issuecomment-517332654 + // Error code definitions: https://github.com/ClickHouse/ClickHouse/blob/master/dbms/src/Common/ErrorCodes.cpp + // + // Fix already merged: https://github.com/ClickHouse/ClickHouse/pull/6271 + if body.starts_with(b"Code: 117") { + RetryAction::DontRetry("incorrect data".into()) + } else if body.starts_with(b"Code: 53") { + RetryAction::DontRetry("type mismatch".into()) + } else { + RetryAction::Retry(String::from_utf8_lossy(body).to_string().into()) + } + } + _ => self.inner.should_retry_response(&response.http_response), + } + } +} + +/// `ClickhouseService` is a `Tower` service used to send logs to Clickhouse. +#[derive(Debug, Clone)] +pub struct ClickhouseService { + client: HttpClient, + uri: Uri, + auth: Option, +} + +impl ClickhouseService { + /// Creates a new `ClickhouseService`. + pub fn new( + client: HttpClient, + auth: Option, + endpoint: &Uri, + database: Option<&str>, + table: &str, + skip_unknown_fields: bool, + date_time_best_effort: bool, + ) -> crate::Result { + // Set the URI query once during initialization, as it won't change throughout the lifecycle + // of the service. + let uri = set_uri_query( + endpoint, + database.unwrap_or("default"), + table, + skip_unknown_fields, + date_time_best_effort, + )?; + Ok(Self { client, auth, uri }) + } +} + +impl Service for ClickhouseService { + type Response = ClickhouseResponse; + type Error = crate::Error; + type Future = BoxFuture<'static, Result>; + + // Emission of Error internal event is handled upstream by the caller. + fn poll_ready(&mut self, _cx: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + // Emission of Error internal event is handled upstream by the caller. + fn call(&mut self, request: ClickhouseRequest) -> Self::Future { + let mut client = self.client.clone(); + + let mut builder = Request::post(&self.uri) + .header(CONTENT_TYPE, "application/x-ndjson") + .header(CONTENT_LENGTH, request.body.len()); + if let Some(ce) = request.compression.content_encoding() { + builder = builder.header(CONTENT_ENCODING, ce); + } + if let Some(auth) = &self.auth { + builder = auth.apply_builder(builder); + } + + let http_request = builder + .body(Body::from(request.body)) + .expect("building HTTP request failed unexpectedly"); + + Box::pin(async move { + let response = client.call(http_request).in_current_span().await?; + let (parts, body) = response.into_parts(); + let body = body::to_bytes(body).await?; + Ok(ClickhouseResponse { + http_response: hyper::Response::from_parts(parts, body), + raw_byte_size: request.metadata.request_encoded_size(), + events_byte_size: request + .metadata + .into_events_estimated_json_encoded_byte_size(), + }) + }) + } +} + +fn set_uri_query( + uri: &Uri, + database: &str, + table: &str, + skip_unknown: bool, + date_time_best_effort: bool, +) -> crate::Result { + let query = url::form_urlencoded::Serializer::new(String::new()) + .append_pair( + "query", + format!( + "INSERT INTO \"{}\".\"{}\" FORMAT JSONEachRow", + database, + table.replace('\"', "\\\"") + ) + .as_str(), + ) + .finish(); + + let mut uri = uri.to_string(); + if !uri.ends_with('/') { + uri.push('/'); + } + + uri.push_str("?input_format_import_nested_json=1&"); + if skip_unknown { + uri.push_str("input_format_skip_unknown_fields=1&"); + } + if date_time_best_effort { + uri.push_str("date_time_input_format=best_effort&") + } + uri.push_str(query.as_str()); + + uri.parse::() + .context(UriParseSnafu) + .map_err(Into::into) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn encode_valid() { + let uri = set_uri_query( + &"http://localhost:80".parse().unwrap(), + "my_database", + "my_table", + false, + true, + ) + .unwrap(); + assert_eq!(uri.to_string(), "http://localhost:80/?input_format_import_nested_json=1&date_time_input_format=best_effort&query=INSERT+INTO+%22my_database%22.%22my_table%22+FORMAT+JSONEachRow"); + + let uri = set_uri_query( + &"http://localhost:80".parse().unwrap(), + "my_database", + "my_\"table\"", + false, + false, + ) + .unwrap(); + assert_eq!(uri.to_string(), "http://localhost:80/?input_format_import_nested_json=1&query=INSERT+INTO+%22my_database%22.%22my_%5C%22table%5C%22%22+FORMAT+JSONEachRow"); + } + + #[test] + fn encode_invalid() { + set_uri_query( + &"localhost:80".parse().unwrap(), + "my_database", + "my_table", + false, + false, + ) + .unwrap_err(); + } +} diff --git a/src/sinks/clickhouse/sink.rs b/src/sinks/clickhouse/sink.rs new file mode 100644 index 0000000000000..805cc50bcf4bc --- /dev/null +++ b/src/sinks/clickhouse/sink.rs @@ -0,0 +1,117 @@ +use bytes::Bytes; +use codecs::{encoding::Framer, JsonSerializerConfig, NewlineDelimitedEncoderConfig}; + +use super::service::{ClickhouseRequest, ClickhouseRetryLogic, ClickhouseService}; +use crate::{internal_events::SinkRequestBuildError, sinks::prelude::*}; + +pub struct ClickhouseSink { + batch_settings: BatcherSettings, + compression: Compression, + encoding: (Transformer, Encoder), + service: Svc, + protocol: &'static str, +} + +impl ClickhouseSink { + pub fn new( + batch_settings: BatcherSettings, + compression: Compression, + transformer: Transformer, + service: Svc, + protocol: &'static str, + ) -> Self { + Self { + batch_settings, + compression, + encoding: ( + transformer, + Encoder::::new( + NewlineDelimitedEncoderConfig::default().build().into(), + JsonSerializerConfig::default().build().into(), + ), + ), + service, + protocol, + } + } + + async fn run_inner(self: Box, input: BoxStream<'_, Event>) -> Result<(), ()> { + input + .batched(self.batch_settings.into_byte_size_config()) + .request_builder( + None, + ClickhouseRequestBuilder { + compression: self.compression, + encoding: self.encoding, + }, + ) + .filter_map(|request| async { + match request { + Err(error) => { + emit!(SinkRequestBuildError { error }); + None + } + Ok(req) => Some(req), + } + }) + .into_driver(self.service) + .protocol(self.protocol) + .run() + .await + } +} + +#[async_trait::async_trait] +impl StreamSink for ClickhouseSink { + async fn run( + self: Box, + input: futures_util::stream::BoxStream<'_, Event>, + ) -> Result<(), ()> { + self.run_inner(input).await + } +} + +struct ClickhouseRequestBuilder { + compression: Compression, + encoding: (Transformer, Encoder), +} + +impl RequestBuilder> for ClickhouseRequestBuilder { + type Metadata = EventFinalizers; + type Events = Vec; + type Encoder = (Transformer, Encoder); + type Payload = Bytes; + type Request = ClickhouseRequest; + type Error = std::io::Error; + + fn compression(&self) -> Compression { + self.compression + } + + fn encoder(&self) -> &Self::Encoder { + &self.encoding + } + + fn split_input( + &self, + mut events: Vec, + ) -> (Self::Metadata, RequestMetadataBuilder, Self::Events) { + let finalizers = events.take_finalizers(); + let builder = RequestMetadataBuilder::from_events(&events); + (finalizers, builder, events) + } + + fn build_request( + &self, + metadata: Self::Metadata, + request_metadata: RequestMetadata, + payload: EncodeResult, + ) -> Self::Request { + ClickhouseRequest { + body: payload.into_payload(), + compression: self.compression, + finalizers: metadata, + metadata: request_metadata, + } + } +} diff --git a/src/sinks/prelude.rs b/src/sinks/prelude.rs index ffef449c78df1..775b530c41002 100644 --- a/src/sinks/prelude.rs +++ b/src/sinks/prelude.rs @@ -17,7 +17,7 @@ pub use crate::{ BatchConfig, Compression, NoDefaultsBatchSettings, RequestBuilder, SinkBatchSettings, TowerRequestConfig, }, - Healthcheck, + Healthcheck, HealthcheckError, }, template::{Template, TemplateParseError}, tls::TlsConfig, From 93ef6c3e9241601253b48e27ee817e73474a89c6 Mon Sep 17 00:00:00 2001 From: Doug Smith Date: Fri, 30 Jun 2023 17:30:23 -0400 Subject: [PATCH 231/236] chore(docs): add instructions for regenerating component docs and licenses (#17828) --- docs/DEVELOPING.md | 4 ++++ docs/DOCUMENTING.md | 19 +++++++++++++++---- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/docs/DEVELOPING.md b/docs/DEVELOPING.md index 0fa32c68d30c6..2eb9fc1d63fab 100644 --- a/docs/DEVELOPING.md +++ b/docs/DEVELOPING.md @@ -123,6 +123,8 @@ Loosely, you'll need the following: - **To build Vector:** Have working Rustup, Protobuf tools, C++/C build tools (LLVM, GCC, or MSVC), Python, and Perl, `make` (the GNU one preferably), `bash`, `cmake`, `GNU coreutils`, and `autotools`. - **To run integration tests:** Have `docker` available, or a real live version of that service. (Use `AUTOSPAWN=false`) - **To run `make check-component-features`:** Have `remarshal` installed. +- **To run `make check-licenses` or `cargo vdev build licenses`:** Have `rust-license-tool` [installed](https://github.com/DataDog/rust-license-tool). +- **To run `cargo vdev build component-docs`:** Have `cue` [installed](https://cuelang.org/docs/install/). If you find yourself needing to run something inside the Docker environment described above, that's totally fine, they won't collide or hurt each other. In this case, you'd just run `make environment-generate`. @@ -156,6 +158,8 @@ cargo bench transforms::example # Format your code before pushing! make fmt cargo fmt +# Build component documentation for the website +cargo vdev build component-docs ``` If you run `make` you'll see a full list of all our tasks. Some of these will start Docker containers, sign commits, or even make releases. These are not common development commands and your mileage may vary. diff --git a/docs/DOCUMENTING.md b/docs/DOCUMENTING.md index 1b1dc230ba2f7..33546631e252b 100644 --- a/docs/DOCUMENTING.md +++ b/docs/DOCUMENTING.md @@ -9,12 +9,14 @@ documentation in tandem with code changes. 1. [Responsibilities](#responsibilities) 2. [Reference documentation](#reference-documentation) - 1. [Formatting](#formatting) - 2. [Validating](#validating) + 1. [Installing CUE](#installing-cue) + 2. [Generating from source code](#generating-from-source-code) + 3. [Formatting](#formatting) + 4. [Validating](#validating) 1. [Tips & tricks](#tips--tricks) 1. [Make small incremental changes](#make-small-incremental-changes) - 3. [Changelog](#changelog) - 4. [Release highlights](#release-highlights) + 5. [Changelog](#changelog) + 6. [Release highlights](#release-highlights) 1. [FAQ](#faq) 1. [What makes a release highlight noteworthy?](#what-makes-a-release-highlight-noteworthy) 2. [How is a release highlight different from a blog post?](#how-is-a-release-highlight-different-from-a-blog-post) @@ -53,6 +55,15 @@ version that Vector depends on. Currently Vector is using `v0.5.0`. Using a CUE version different than this may result in CUE check/build errors. We are aiming to improve the developer experience around external tool dependencies ([#15909](https://github.com/vectordotdev/vector/issues/15909)). +### Generating from source code + +Much of Vector's reference documentation is automatically compiled from source code (e.g., doc comments). +To regenerate this content, run: + +```bash +cargo vdev build component-docs +``` + ### Formatting Vector has some CUE-related CI checks that are run whenever changes are made to From 4786743dcaa73e16781e8b43ce0a1ce0315a55d1 Mon Sep 17 00:00:00 2001 From: Nathan Fox Date: Fri, 30 Jun 2023 19:13:02 -0400 Subject: [PATCH 232/236] fix: `aws_ec2_metadata` transform when using log namespacing (#17819) closes: https://github.com/vectordotdev/vector/issues/17193 This keeps the same runtime behavior (if the even is not an object, it is converted to an object), but fixes the schema definition calculation to not panic. The behavior might not be optimal since it may result in data loss. Users can remap before hand and convert to an object if needed, and longer term this functionality should be made available in VRL for more flexibility. --- src/transforms/aws_ec2_metadata.rs | 38 ++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/src/transforms/aws_ec2_metadata.rs b/src/transforms/aws_ec2_metadata.rs index f78373b7863af..b79fbafccf7a4 100644 --- a/src/transforms/aws_ec2_metadata.rs +++ b/src/transforms/aws_ec2_metadata.rs @@ -16,6 +16,7 @@ use tokio::time::{sleep, Duration, Instant}; use tracing::Instrument; use vector_config::configurable_component; use vector_core::config::LogNamespace; +use vrl::value::kind::Collection; use vrl::value::Kind; use crate::config::OutputId; @@ -274,6 +275,11 @@ impl TransformConfig for Ec2Metadata { .map(|(output, definition)| { let mut schema_definition = definition.clone(); + // If the event is not an object, it will be converted to an object in this transform + if !schema_definition.event_kind().contains_object() { + *schema_definition.event_kind_mut() = Kind::object(Collection::empty()); + } + for path in paths { schema_definition = schema_definition.with_field(path, Kind::bytes().or_undefined(), None); @@ -708,6 +714,38 @@ enum Ec2MetadataError { }, } +#[cfg(test)] +mod test { + use crate::config::schema::Definition; + use crate::config::{LogNamespace, OutputId, TransformConfig}; + use crate::transforms::aws_ec2_metadata::Ec2Metadata; + use enrichment::TableRegistry; + use lookup::OwnedTargetPath; + use vrl::owned_value_path; + use vrl::value::Kind; + + #[tokio::test] + async fn schema_def_with_string_input() { + let transform_config = Ec2Metadata { + namespace: Some(OwnedTargetPath::event(owned_value_path!("ec2", "metadata")).into()), + ..Default::default() + }; + + let input_definition = + Definition::new(Kind::bytes(), Kind::any_object(), [LogNamespace::Vector]); + + let mut outputs = transform_config.outputs( + TableRegistry::default(), + &[(OutputId::dummy(), input_definition)], + LogNamespace::Vector, + ); + assert_eq!(outputs.len(), 1); + let output = outputs.pop().unwrap(); + let actual_schema_def = output.schema_definitions(true)[&OutputId::dummy()].clone(); + assert!(actual_schema_def.event_kind().is_object()); + } +} + #[cfg(feature = "aws-ec2-metadata-integration-tests")] #[cfg(test)] mod integration_tests { From ee10b8cbae51b9c0bade8d8bd8273a8dbeb3bb58 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Fri, 30 Jun 2023 16:41:15 -0700 Subject: [PATCH 233/236] chore(ci): revert fix gardener issues comment workflow (#17829) Reverts vectordotdev/vector#17825 Just reverting until we can address https://github.com/vectordotdev/vector/actions/runs/5427730493/jobs/9871271740 to avoid notification noise. --- .github/workflows/gardener_issue_comment.yml | 57 ++++++++++---------- 1 file changed, 29 insertions(+), 28 deletions(-) diff --git a/.github/workflows/gardener_issue_comment.yml b/.github/workflows/gardener_issue_comment.yml index 18ee687a44b17..8ea89fb315145 100644 --- a/.github/workflows/gardener_issue_comment.yml +++ b/.github/workflows/gardener_issue_comment.yml @@ -26,44 +26,45 @@ jobs: status_field_id="PVTF_lADOAQFeYs4AAsTrzgAXRuU" # Status triage_option_id="2a08fafa" - # Query for project items for the given issue - project_items="$(gh api graphql -f query=' - query($item_id: ID!) { - node(id: $item_id) { - ... on Issue { - projectItems(first: 50) { - ... on ProjectV2ItemConnection { - nodes { - fieldValueByName(name: "Status") { - ... on ProjectV2ItemFieldSingleSelectValue { - name - } - } - ... on ProjectV2Item { - project { - ... on ProjectV2 { - id - } - } - } - } - } + # ensures that the issue is already on board but also seems to be the only way to fetch + # the item id + item_id="$(gh api graphql -f query=' + mutation($project_id: ID!, $content_id: ID!) { + addProjectV2ItemById(input: {projectId: $project_id, contentId: $content_id}) { + item { + id } } - } - }' -f item_id="$issue_id" + }' -f project_id="$project_id" -f content_id="$issue_id" -q '.data.addProjectV2ItemById.item.id' )" - # Extract the item in the Gardener project - current_status=$(echo $project_items | jq -r '.data.node.projectItems.nodes[] | select(.project.id == $project_id) | .fieldValueByName.name') + echo "item_id: $item_id" - if [ -z "$current_status" ] ; then + if [ -z "$item_id" ] ; then echo "Issue not found in Gardener board" exit 0 else - echo "Found issue on Gardener board. Current issue status is: '${current_status}'" + echo "Found issue on Gardener board" fi + current_status="$(gh api graphql -f query=' + query($item_id: ID!) { + node(id: $item_id) { + ... on ProjectV2Item { + fieldValueByName(name: "Status") { + ... on ProjectV2ItemFieldSingleSelectValue { + name + } + } + } + } + }' -f item_id="$item_id" + )" + + current_status=$(echo $current_status | jq -c -r '.["data"]["node"]["fieldValueByName"]["name"]') + + echo "Current issue status is: '${current_status}'" + if [ "$current_status" = "Blocked / Waiting" ] ; then echo "Moving issue from 'Blocked / Waiting' to 'Triage'" gh api graphql -f query=' From 00cc584aa43d6e975a118667badd20be4030bb84 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Wed, 5 Jul 2023 10:52:37 -0700 Subject: [PATCH 234/236] chore(ci): Set HOMEBREW_NO_INSTALL_FROM_API in CI (#17867) We seem to be hitting https://github.com/Homebrew/homebrew-cask/issues/150323 which recommends this workaround. Signed-off-by: Jesse Szwedko Signed-off-by: Jesse Szwedko --- scripts/environment/bootstrap-macos-10.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/environment/bootstrap-macos-10.sh b/scripts/environment/bootstrap-macos-10.sh index 6db8102f683bc..fb105c2756b81 100755 --- a/scripts/environment/bootstrap-macos-10.sh +++ b/scripts/environment/bootstrap-macos-10.sh @@ -1,6 +1,9 @@ #! /usr/bin/env bash set -e -o verbose +# https://github.com/Homebrew/homebrew-cask/issues/150323 +unset HOMEBREW_NO_INSTALL_FROM_API + brew update brew install ruby@2.7 coreutils cue-lang/tap/cue protobuf From 4f67695942c7f44f807cc92a43c6d6456fcebd92 Mon Sep 17 00:00:00 2001 From: neuronull Date: Wed, 5 Jul 2023 12:29:41 -0600 Subject: [PATCH 235/236] fix(ci): add missing env var (#17872) --- .github/workflows/integration-comment.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/integration-comment.yml b/.github/workflows/integration-comment.yml index a35f994f82f25..d915a956e2bb8 100644 --- a/.github/workflows/integration-comment.yml +++ b/.github/workflows/integration-comment.yml @@ -31,6 +31,7 @@ env: AWS_SECRET_ACCESS_KEY: "dummy" AXIOM_TOKEN: ${{ secrets.AXIOM_TOKEN }} TEST_APPSIGNAL_PUSH_API_KEY: ${{ secrets.TEST_APPSIGNAL_PUSH_API_KEY }} + TEST_DATADOG_API_KEY: ${{ secrets.CI_TEST_DATADOG_API_KEY }} CONTAINER_TOOL: "docker" DD_ENV: "ci" DD_API_KEY: ${{ secrets.DD_API_KEY }} From 0f13b22a4cebbba000444bdb45f02bc820730a13 Mon Sep 17 00:00:00 2001 From: Jesse Szwedko Date: Wed, 28 Jun 2023 14:49:53 -0700 Subject: [PATCH 236/236] chore(releasing): Prepare v0.31.0 release Signed-off-by: Jesse Szwedko --- distribution/install.sh | 2 +- .../2023-07-04-0-31-0-upgrade-guide.md | 31 - .../2023-07-05-0-31-0-upgrade-guide.md | 14 +- website/content/en/releases/0.31.0.md | 4 + .../administration/interfaces/kubectl.cue | 2 +- website/cue/reference/releases/0.31.0.cue | 550 ++++++++++++++++++ website/cue/reference/versions.cue | 1 + 7 files changed, 570 insertions(+), 34 deletions(-) delete mode 100644 website/content/en/highlights/2023-07-04-0-31-0-upgrade-guide.md create mode 100644 website/content/en/releases/0.31.0.md create mode 100644 website/cue/reference/releases/0.31.0.cue diff --git a/distribution/install.sh b/distribution/install.sh index e2903c7b11e7e..4fbfb8f10b059 100755 --- a/distribution/install.sh +++ b/distribution/install.sh @@ -12,7 +12,7 @@ set -u # If PACKAGE_ROOT is unset or empty, default it. PACKAGE_ROOT="${PACKAGE_ROOT:-"https://packages.timber.io/vector"}" -VECTOR_VERSION="0.30.0" +VECTOR_VERSION="0.31.0" _divider="--------------------------------------------------------------------------------" _prompt=">>>" _indent=" " diff --git a/website/content/en/highlights/2023-07-04-0-31-0-upgrade-guide.md b/website/content/en/highlights/2023-07-04-0-31-0-upgrade-guide.md deleted file mode 100644 index 51eada592553a..0000000000000 --- a/website/content/en/highlights/2023-07-04-0-31-0-upgrade-guide.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -date: "2023-07-04" -title: "0.31 Upgrade Guide" -description: "An upgrade guide that addresses breaking changes in 0.31.0" -authors: ["stephenwakely"] -release: "0.31.0" -hide_on_release_notes: false -badges: - type: breaking change ---- - -Vector's 0.31.0 release includes **breaking changes**: - -1. [`component_received_event_bytes_total` and `component_sent_event_bytes_total` consistently use estimated JSON size of the event](#event_json_size) - -We cover them below to help you upgrade quickly: - -## Upgrade guide - -### Breaking changes - -#### `component_received_event_bytes_total` and `component_sent_event_bytes_total` consistently use estimated JSON size of the event {#event_json_size} - -Prior to this Version, metrics emitted by Vector were inconsistently measuring -the byte size of the events that were being sent and received. These metrics -have been updated for all components so they always emit an estimate of the size -of the event should it be serialized to JSON. - -Measuring the events like this allows a consistent measurement to be applied -across all components regardless of how the source or sink serializes the event -when connecting to the external service. diff --git a/website/content/en/highlights/2023-07-05-0-31-0-upgrade-guide.md b/website/content/en/highlights/2023-07-05-0-31-0-upgrade-guide.md index fe42e31349445..bc77ce6ab0a2f 100644 --- a/website/content/en/highlights/2023-07-05-0-31-0-upgrade-guide.md +++ b/website/content/en/highlights/2023-07-05-0-31-0-upgrade-guide.md @@ -2,7 +2,7 @@ date: "2023-07-05" title: "0.31 Upgrade Guide" description: "An upgrade guide that addresses breaking changes in 0.31.0" -authors: ["tobz"] +authors: ["stephenwakely", "tobz"] release: "0.31.0" hide_on_release_notes: false badges: @@ -12,6 +12,7 @@ badges: Vector's 0.31.0 release includes **breaking changes**: 1. [Removal of various deprecated internal metrics](#deprecated-internal-metrics) +1. [`component_received_event_bytes_total` and `component_sent_event_bytes_total` consistently use estimated JSON size of the event](#event_json_size) We cover them below to help you upgrade quickly: @@ -49,3 +50,14 @@ A small note is that a small number of components still emit some of these metri additional tags and information that is disallowed by the Component Specification. They will be removed in a future version once we can rectify those discrepancies, but they are effectively removed as of this release: you cannot depend on them still existing. + +#### `component_received_event_bytes_total` and `component_sent_event_bytes_total` consistently use estimated JSON size of the event {#event_json_size} + +Prior to this Version, metrics emitted by Vector were inconsistently measuring +the byte size of the events that were being sent and received. These metrics +have been updated for all components so they always emit an estimate of the size +of the event should it be serialized to JSON. + +Measuring the events like this allows a consistent measurement to be applied +across all components regardless of how the source or sink serializes the event +when connecting to the external service. diff --git a/website/content/en/releases/0.31.0.md b/website/content/en/releases/0.31.0.md new file mode 100644 index 0000000000000..0b72126ceabbe --- /dev/null +++ b/website/content/en/releases/0.31.0.md @@ -0,0 +1,4 @@ +--- +title: Vector v0.31.0 release notes +weight: 21 +--- diff --git a/website/cue/reference/administration/interfaces/kubectl.cue b/website/cue/reference/administration/interfaces/kubectl.cue index 8eeddaf8b6b06..3524b0d09fea6 100644 --- a/website/cue/reference/administration/interfaces/kubectl.cue +++ b/website/cue/reference/administration/interfaces/kubectl.cue @@ -19,7 +19,7 @@ administration: interfaces: kubectl: { role_implementations: [Name=string]: { commands: { _deployment_variant: string - _vector_version: "0.30" + _vector_version: "0.31" _namespace: string | *"vector" _controller_resource_type: string _controller_resource_name: string | *_deployment_variant diff --git a/website/cue/reference/releases/0.31.0.cue b/website/cue/reference/releases/0.31.0.cue new file mode 100644 index 0000000000000..6637ec37afa53 --- /dev/null +++ b/website/cue/reference/releases/0.31.0.cue @@ -0,0 +1,550 @@ +package metadata + +releases: "0.31.0": { + date: "2023-07-05" + codename: "" + + description: """ + The Vector team is pleased to announce version 0.31.0! + + Be sure to check out the [upgrade guide](/highlights/2023-07-05-0-31-0-upgrade-guide) for + breaking changes in this release. + + In addition to the usual smaller enhancements and bug fixes, this release includes an opt-in + beta of a new log event data model that we think will make it easier to process logs by + moving event metadata out of the log event itself. We are looking for feedback on this new + feature before beginning towards making it the default and eventually removing the old log + event data model. + + By way of example, an example event from the `datadog_agent` source currently looks like: + + ```json + { + "ddsource": "vector", + "ddtags": "env:prod", + "hostname": "alpha", + "foo": "foo field", + "service": "cernan", + "source_type": "datadog_agent", + "bar": "bar field", + "status": "warning", + "timestamp": "1970-02-14T20:44:57.570Z" + } + ``` + + Will now look like: + + ```json + { + "foo": "foo field", + "bar": "bar field" + } + ``` + + (just the event itself) + + with additional buckets for source added metadata: + + + ```json + { + "ddsource": "vector", + "ddtags": "env:prod", + "hostname": "alpha", + "service": "cernan", + "status": "warning", + "timestamp": "1970-02-14T20:44:57.570Z" + } + ``` + + accessible via `%.`, and Vector added metadata: + + + ```json + { + "source_type": "datadog_agent", + "ingest_timestamp": "1970-02-14T20:44:58.236Z" + } + ``` + + accessible via `%vector.`. + + We think this new organization will be easier to reason about for users as well as avoid key + conflicts between event fields and metadata. + + You can opt into this feature by setting `schema.log_namespace` as a global setting or the + `log_namespace` option now available on each source itself. See [the blog + post](/blog/log-namespacing) for an expanded explanation and details. Let us know what you think [on this issue](https://github.com/vectordotdev/vector/issues/17796). + """ + + known_issues: [] + + changelog: [ + { + type: "fix" + scopes: ["fluent source"] + description: """ + The `fluent` source now correctly sends back message acknowledgements in msgpack + rather than JSON. Previously fluentbit would fail to process them. + """ + contributors: ["ChezBunch"] + pr_numbers: [17407] + }, + { + type: "enhancement" + scopes: ["aws_s3 source"] + description: """ + The `aws_s3` source now support bucket notifications in SQS that originated as SNS + messages. It still does not support receiving SNS messages directly. + """ + contributors: ["sbalmos"] + pr_numbers: [17352] + }, + { + type: "enhancement" + scopes: ["vrl"] + description: """ + A `from_unix_timestamp` function was added to VRL to decode timestamp values from + unix timestamps. This deprecates the `to_timestamp` function, which will be removed + in a future release. + """ + pr_numbers: [17793] + }, + { + type: "enhancement" + scopes: ["vrl"] + description: """ + The `parse_nginx_log` function now supports `ingress_upstreaminfo` as a format. + """ + pr_numbers: [17793] + }, + { + type: "enhancement" + scopes: ["vrl"] + description: """ + The `format_timestamp` function now supports an optional `timezone` argument to + control the timezone of the encoded timestamp. + """ + pr_numbers: [17793] + }, + { + type: "fix" + scopes: ["vrl"] + description: """ + VRL now supports the `\\0` null byte escape sequence in strings. + """ + pr_numbers: [17793] + }, + { + type: "fix" + scopes: ["statsd sink"] + description: """ + The `statsd` sink now correctly encodes all counters as incremental, per the spec. + """ + pr_numbers: [16199] + }, + { + type: "chore" + scopes: ["observability"] + description: """ + Several deprecated internal metrics were removed: + + - `events_in_total` + - `events_out_total` + - `processed_bytes_total` + - `processed_events_total` + - `processing_errors_total` + - `events_failed_total` + - `events_discarded_total` + + See [the upgrade guide](/highlights/2023-07-05-0-31-0-upgrade-guide#deprecated-internal-metrics) for more details. + """ + breaking: true + pr_numbers: [17516, 17542] + }, + { + type: "chore" + scopes: ["observability"] + description: """ + The `component_received_event_bytes_total` and `component_sent_event_bytes_total` + internal metrics have been updated to use a new measure, "estimated JSON size", that + is an estimate of the size of the event were it encoded as JSON rather than the + "in-memory size" of the event, which is an implementation detail. See [the upgrade + guide](/highlights/2023-07-05-0-31-0-upgrade-guide#event_json_size) for more + details. + """ + breaking: true + pr_numbers: [17516, 17542] + }, + { + type: "enhancement" + scopes: ["shutdown"] + description: """ + Vector's graceful shutdown time limit is now configurable (via + `--graceful-shutdown-limit-secs`) and able to be disabled (via + `--no-graceful-shutdown-limit`). See the [CLI + docs](docs/reference/cli/) for more. + """ + pr_numbers: [17479] + }, + { + type: "enhancement" + scopes: ["sinks"] + description: """ + Support for `zstd` compression was added to sinks support compression. + """ + contributors: ["akoshchiy"] + pr_numbers: [17371] + }, + { + type: "enhancement" + scopes: ["prometheus_remote_write sink"] + description: """ + The `prometheus_remote_write` sink now supports `zstd` and `gzip` compression in + addition to `snappy` (the default). + """ + contributors: ["zamazan4ik"] + pr_numbers: [17334] + }, + { + type: "enhancement" + scopes: ["journald source"] + description: """ + The `journald` source now supports a `journal_namespace` option to restrict the namespace of the units that the source consumes logs from. + """ + pr_numbers: [17648] + }, + { + type: "fix" + scopes: ["buffers"] + description: """ + A disk buffer deadlock that occurred on start-up after certain crash conditions was + fixed. + """ + pr_numbers: [17657] + }, + { + type: "enhancement" + scopes: ["codecs"] + description: """ + The `gelf`, `native_json`, `syslog`, and `json` decoders (configurable as + `decoding.codec` on sources) now have corresponding options for lossy UTF-8 + decoding via `decoding..lossy = true|false`. This can be used to + accept invalid UTF-8 where invalid characters are replaced before decoded. + """ + pr_numbers: [17628, 17680] + }, + { + type: "fix" + scopes: ["http_client source"] + description: """ + The `http_client` no longer corrupts binary data by always trying to interpret as UTF-8 bytes. Instead options were added to encoders for lossy UTF-8 decoding (see above entry). + """ + pr_numbers: [17655] + }, + { + type: "enhancement" + scopes: ["aws_kinesis_firehose sink", "aws_kinesis_streams sink"] + description: """ + The `aws_kinesis_firehose` and `aws_kinesis_streams` sinks are now able to retry requests + with partial failures by setting `request_retry_partial` to true. The default is + `false` to avoid writing duplicate data if proper event idempotency is not in place. + """ + contributors: ["dengmingtong"] + pr_numbers: [17535] + }, + { + type: "fix" + scopes: ["http provider"] + description: """ + The `Proxy-Authorization` header is now added to to HTTP requests from components + that support HTTP proxies when authentication is used. + """ + contributors: ["syedriko"] + pr_numbers: [17363] + }, + { + type: "fix" + scopes: ["shutdown"] + description: """ + Vector now exits non-zero if the graceful shutdown time limit expires before Vector + finishes shutting down. + """ + pr_numbers: [17676] + }, + { + type: "fix" + scopes: ["transforms", "sinks", "observability"] + description: """ + The following components now log template render errors at the warning level rather + than error and does not increment `component_errors_total`. This fixes a regression + in v0.30.0 for the `loki` sink. + + - `loki` sink + - `papertrail` sink + - `splunk_hec_logs` sink + - `splunk_hec_metrics` sink + - `throttle` transform + - `log_to_metric` transform + """ + pr_numbers: [17746] + }, + { + type: "enhancement" + scopes: ["observability"] + description: """ + The `component_sent_event_bytes_total` and `component_sent_event_total` metrics can + now optionally have a `service` and `source` tag added to them, driven from event + data, from the added [`telemetry` global config + options](docs/reference/configuration/global-options/#telemetry). This can be used + to break down processing volume by service and source. + """ + pr_numbers: [17549] + }, + { + type: "enhancement" + scopes: ["observability"] + description: """ + The `internal_metrics` and `internal_logs` sources now shutdown last in order to + capture as much telemetry as possible during Vector shutdown. + """ + pr_numbers: [17741] + }, + { + type: "fix" + scopes: ["datadog_metrics sink"] + description: """ + The `datadog_metrics` sink now incrementally encodes sketches. This avoids issues + users have seen with sketch payloads exceeding the limits and being dropped. + """ + pr_numbers: [17764] + }, + { + type: "fix" + scopes: ["datadog_agent source"] + description: """ + The `datadog_agent` reporting of events and bytes received was fixed so it no longer + double counted incoming events. + """ + pr_numbers: [17720] + }, + { + type: "fix" + scopes: ["config"] + description: """ + `log_schema` global configuration fields can now appear in a different file than + defined sources. + """ + contributors: ["Hexta"] + pr_numbers: [17759] + }, + { + type: "fix" + scopes: ["file source"] + description: """ + Vector now supports running greater than 512 sources. Previously it would lock up if + more than 512 `file` sources were defined. + """ + contributors: ["honganan"] + pr_numbers: [17717] + }, + { + type: "fix" + scopes: ["observability"] + description: """ + Internal metrics for the Adaptive Concurrency Request module are now correctly + tagged with component metadata like other sink metrics (`component_kind`, + `component_id`, `component_type`). + """ + pr_numbers: [17765] + }, + ] + + commits: [ + {sha: "2ed8ec77d6effb6c373f56209aa52d9f6158f571", date: "2023-05-18 04:49:06 UTC", description: "bump reqwest from 0.11.17 to 0.11.18", pr_number: 17420, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 22, deletions_count: 8}, + {sha: "e7fa8d373b74117c4d0d90902c3124e620c3c6c3", date: "2023-05-18 13:08:05 UTC", description: "bump rdkafka from 0.30.0 to 0.31.0", pr_number: 17428, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "ae656c7124b9c148e7a678967f58edc2a32501e5", date: "2023-05-19 05:04:53 UTC", description: "bump proc-macro2 from 1.0.57 to 1.0.58", pr_number: 17426, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 65, deletions_count: 65}, + {sha: "c7d7cf8e36b9de6de7cd963e472d33b792c24413", date: "2023-05-19 14:51:58 UTC", description: "use get for page request", pr_number: 17373, scopes: ["databend sink"], type: "fix", breaking_change: false, author: "everpcpc", files_count: 1, insertions_count: 9, deletions_count: 14}, + {sha: "d1949921a81181e2eeb1780d7e081d767f758f5e", date: "2023-05-19 09:55:39 UTC", description: "fix ack message format", pr_number: 17407, scopes: ["fluent source"], type: "fix", breaking_change: false, author: "Benoît GARNIER", files_count: 1, insertions_count: 16, deletions_count: 10}, + {sha: "187f142ef5c28dec8e9b1ffbdfe0196acbe45804", date: "2023-05-19 02:00:47 UTC", description: "update fluentd link", pr_number: 17436, scopes: ["external docs"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "54d9c99492ec14924994a4857961aaafe3200f9b", date: "2023-05-20 08:46:28 UTC", description: "Add info about Vector Operator to Kubernetes instalation page", pr_number: 17432, scopes: ["docs"], type: "chore", breaking_change: false, author: "Vladimir", files_count: 1, insertions_count: 7, deletions_count: 1}, + {sha: "a8b7899bea771e6f2ca2e7c78c5a1c578f03d78f", date: "2023-05-20 00:00:07 UTC", description: "bump lapin from 2.1.1 to 2.1.2", pr_number: 17439, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "ac0c7e82fc5877a58a60da872c40ad9b63143953", date: "2023-05-20 00:03:07 UTC", description: "bump security-framework from 2.9.0 to 2.9.1", pr_number: 17441, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "91ba052ba59d920761a02f7999c4b5d8b39d1766", date: "2023-05-20 08:27:29 UTC", description: "bump toml from 0.7.3 to 0.7.4", pr_number: 17440, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 6, insertions_count: 21, deletions_count: 21}, + {sha: "b6394228d53508f22c6a65c69961baff19457c05", date: "2023-05-20 09:22:44 UTC", description: "bump lapin from 2.1.2 to 2.2.0", pr_number: 17443, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "05bf262536031d199c06d980f47be317c97520ea", date: "2023-05-20 09:43:25 UTC", description: "bump clap_complete from 4.2.3 to 4.3.0", pr_number: 17447, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "618379a27583f6233a76c5b788616816b74bee03", date: "2023-05-20 10:36:37 UTC", description: "bump lapin from 2.2.0 to 2.2.1", pr_number: 17448, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "060399a4bbef4280d1cea7c04304ed1308504ca0", date: "2023-05-22 23:37:55 UTC", description: "Move most CI checks to merge queue", pr_number: 17340, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 21, insertions_count: 2308, deletions_count: 1101}, + {sha: "8e40b6850a57f874476f071d4ec98d699a99a65e", date: "2023-05-23 00:37:49 UTC", description: "temporarily disable flakey `aws_s3` integration test case `handles_errored_status` ", pr_number: 17455, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 3, deletions_count: 0}, + {sha: "7554d9c8cc7b9b7134c7879dc941f8f55bc837e2", date: "2023-05-23 06:56:53 UTC", description: "bump bstr from 1.4.0 to 1.5.0", pr_number: 17453, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 5, deletions_count: 5}, + {sha: "95cbba9116f12e1aa3665f89050132a28f9a0327", date: "2023-05-23 07:26:37 UTC", description: "bump base64 from 0.21.0 to 0.21.1", pr_number: 17451, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 14, deletions_count: 14}, + {sha: "85703e792fe0ff70a466380823cf2d4b14b21603", date: "2023-05-23 01:07:21 UTC", description: "Bump PR limit for Dependabot to 100", pr_number: 17459, scopes: ["deps"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "299fd6ab53b1e818d09ae38f4321c20bdce4f30e", date: "2023-05-23 01:22:01 UTC", description: "Update fs_extra to 1.3.0", pr_number: 17458, scopes: ["deps"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "1f54415cb3fd4dc8f3f1b5989aa8d051cbe1faa5", date: "2023-05-23 01:47:25 UTC", description: "Bump lalrpop to 0.19.12", pr_number: 17457, scopes: ["deps"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 5, deletions_count: 5}, + {sha: "547783d17e8d2d3d351213a034e8d38fdcaa3047", date: "2023-05-23 02:11:46 UTC", description: "Clarify when component received and sent bytes events should be emitted", pr_number: 17464, scopes: ["docs"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 12, deletions_count: 9}, + {sha: "78bbfbc0205d97b401b5ba3084fe71e2bfdd7f33", date: "2023-05-23 03:49:14 UTC", description: "Bump version to 0.31.0", pr_number: 17466, scopes: [], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "36998428099da9b3ce4bcf0fd6f8787be1920363", date: "2023-05-23 05:43:33 UTC", description: "fix failure notify job conditional in publish workflow", pr_number: 17468, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 4, deletions_count: 4}, + {sha: "f54787190119255c1f97b2fe603ea5e65355b1cd", date: "2023-05-23 05:09:59 UTC", description: "Bump k8s manifests to 0.22.0", pr_number: 17467, scopes: ["kubernetes"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 18, insertions_count: 22, deletions_count: 22}, + {sha: "897e45d5aa3d9ede6aa9115dae41a90b5a200ffa", date: "2023-05-23 23:06:22 UTC", description: "bump regex from 1.8.1 to 1.8.2", pr_number: 17469, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 5, insertions_count: 9, deletions_count: 9}, + {sha: "9aaf864254bb05a92504533cd3d072341dbcb7e9", date: "2023-05-24 03:13:09 UTC", description: "bump data-encoding from 2.3.3 to 2.4.0", pr_number: 17452, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "bca45eb32bff27429a6beb3cf1d7b241d6de8c70", date: "2023-05-24 03:14:31 UTC", description: "bump myrotvorets/set-commit-status-action from 1.1.6 to 1.1.7", pr_number: 17460, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 11, insertions_count: 27, deletions_count: 27}, + {sha: "c425006f299c7a5f91509f7bdb18963f4da0748f", date: "2023-05-24 03:15:58 UTC", description: "bump xt0rted/pull-request-comment-branch from 1 to 2", pr_number: 17461, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 12, insertions_count: 18, deletions_count: 18}, + {sha: "9f6f6ecde0db3ffdd7b904647f490511433836b5", date: "2023-05-24 02:08:11 UTC", description: "minor fixes to workflows post merge queue enabling ", pr_number: 17462, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 4, insertions_count: 14, deletions_count: 12}, + {sha: "c1262cd162e04550b69913877d6b97037aceaea4", date: "2023-05-24 04:18:45 UTC", description: "Update metadata to match the editorial review for the schema.", pr_number: 17475, scopes: ["aws_s3 sink"], type: "chore", breaking_change: false, author: "Ari", files_count: 5, insertions_count: 9, deletions_count: 1}, + {sha: "9235fc249f4a0aa34d1119ed7dd334e23e5c3674", date: "2023-05-25 03:49:32 UTC", description: "bump proptest from 1.1.0 to 1.2.0", pr_number: 17476, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 8, deletions_count: 15}, + {sha: "ebf958b1355b4b729e7c99232bc40e2f7e809abf", date: "2023-05-25 03:57:35 UTC", description: "bump opendal from 0.34.0 to 0.35.0", pr_number: 17471, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "9a44e6e8763c5d2bc91de1c24b14662d10d0b434", date: "2023-05-24 22:51:54 UTC", description: "Update the NOTICE file", pr_number: 17430, scopes: [], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 1, insertions_count: 3, deletions_count: 0}, + {sha: "58d7f3dfb0b57445db931604c6f72d93015da505", date: "2023-05-24 23:39:50 UTC", description: "temporarily disable comment_trigger workflow", pr_number: 17480, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 5, deletions_count: 3}, + {sha: "541bb0087eb95b8d67c98547240c8104c5b2a69f", date: "2023-05-25 07:03:53 UTC", description: "Extend library functionality for secret scanning", pr_number: 17483, scopes: ["enterprise"], type: "chore", breaking_change: false, author: "Will Wang", files_count: 3, insertions_count: 23, deletions_count: 18}, + {sha: "78fb4694c26d061314e8a01236a67633d8035d5c", date: "2023-05-25 05:04:04 UTC", description: "Fix architecture detection for ARMv7", pr_number: 17484, scopes: ["distribution"], type: "fix", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 4, deletions_count: 3}, + {sha: "426d6602d22193940ac6e495fc5c175aa3bc8f90", date: "2023-05-25 22:17:36 UTC", description: "update `vrl` to `0.4.0`", pr_number: 17378, scopes: [], type: "chore", breaking_change: false, author: "Nathan Fox", files_count: 44, insertions_count: 101, deletions_count: 269}, + {sha: "670bdea00ab7a13921aa3194667068b27f58e35a", date: "2023-05-26 04:26:55 UTC", description: "set source fields to mean service", pr_number: 17470, scopes: ["observability"], type: "chore", breaking_change: false, author: "Stephen Wakely", files_count: 6, insertions_count: 58, deletions_count: 15}, + {sha: "077a294d10412552e80c41429f23bd6a4f47724b", date: "2023-05-26 04:13:59 UTC", description: "Bump async-graphql from 5.0.8 to 5.0.9", pr_number: 17486, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 10, deletions_count: 10}, + {sha: "79f7dfb4d4633badf8ee89f0e940fa44f5bd59aa", date: "2023-05-26 04:14:38 UTC", description: "bump memmap2 from 0.6.1 to 0.6.2", pr_number: 17482, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "84f0adac7a8e6306e12eaf13dc8c28f23e33f867", date: "2023-05-26 04:15:58 UTC", description: "bump criterion from 0.4.0 to 0.5.0", pr_number: 17477, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 7, insertions_count: 13, deletions_count: 46}, + {sha: "7699f4ded19e520258adddd4c628a7a309c52c4e", date: "2023-05-26 01:33:59 UTC", description: "update comment_trigger note about concurrency groups", pr_number: 17491, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 5, deletions_count: 3}, + {sha: "ac81fc1318b229e2b9c6bbcd080af7438afde85a", date: "2023-05-26 08:18:38 UTC", description: "Bump async-graphql-warp from 5.0.8 to 5.0.9", pr_number: 17489, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "b28d915cb6a48da836bb4736c027f1ca5d623fe2", date: "2023-05-26 05:31:23 UTC", description: "remove custom async sleep impl", pr_number: 17493, scopes: [], type: "chore", breaking_change: false, author: "Nathan Fox", files_count: 1, insertions_count: 3, deletions_count: 12}, + {sha: "2a76cac4d327eac537996d3409a64633c96f5ac8", date: "2023-05-26 06:00:07 UTC", description: "refactor `statsd` sink to stream-based style", pr_number: 16199, scopes: ["statsd sink"], type: "chore", breaking_change: false, author: "Toby Lawrence", files_count: 31, insertions_count: 2346, deletions_count: 786}, + {sha: "5d90cff55c04701692dfe2b92416c3cf4ded5a4d", date: "2023-05-26 10:01:46 UTC", description: "bump regex from 1.8.2 to 1.8.3", pr_number: 17494, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 5, insertions_count: 6, deletions_count: 6}, + {sha: "cc307460df2b45af6f33311d493c6bd7f9d44da5", date: "2023-05-26 23:02:43 UTC", description: "Bump quote from 1.0.27 to 1.0.28", pr_number: 17496, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 67, deletions_count: 67}, + {sha: "f261781b5ce4389fb23017a2d4892c7f16753ad9", date: "2023-05-27 03:03:25 UTC", description: "Bump base64 from 0.21.1 to 0.21.2", pr_number: 17488, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 14, deletions_count: 14}, + {sha: "2ad5b478f8948d0c3d92197f90100148cebda237", date: "2023-05-27 03:03:51 UTC", description: "bump aws-sigv4 from 0.55.1 to 0.55.3", pr_number: 17481, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 10, deletions_count: 10}, + {sha: "4ce3278ba5c2b92391818ff85c410a01f6b71cbf", date: "2023-05-27 04:47:28 UTC", description: "Bump proc-macro2 from 1.0.58 to 1.0.59", pr_number: 17495, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 66, deletions_count: 66}, + {sha: "a551f33da2b752229bd8139c72af80ce8b149638", date: "2023-05-27 11:12:45 UTC", description: "RFC for Data Volume Insights", pr_number: 17322, scopes: [], type: "chore", breaking_change: false, author: "Stephen Wakely", files_count: 1, insertions_count: 240, deletions_count: 0}, + {sha: "98c54ad3a371ac710151367a953252f9eb293548", date: "2023-05-27 06:51:49 UTC", description: "remove deprecated internal metrics + massive cleanup to vector top and graphql API", pr_number: 17516, scopes: ["observability"], type: "chore", breaking_change: false, author: "Toby Lawrence", files_count: 121, insertions_count: 1134, deletions_count: 2147}, + {sha: "bf372fd7cdef40704205e5fb5bf10bc50e002d94", date: "2023-05-30 02:03:43 UTC", description: "fix a few logic bugs and more strict comment parsing", pr_number: 17502, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 6, insertions_count: 39, deletions_count: 61}, + {sha: "cc703da814928b41e0d9c0d7d211181f4aa5758a", date: "2023-05-30 06:10:19 UTC", description: "Bump tokio from 1.28.1 to 1.28.2", pr_number: 17525, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 9, insertions_count: 12, deletions_count: 12}, + {sha: "2388c2f492a4952e48f1c1f8469045378ec60739", date: "2023-05-30 12:11:22 UTC", description: "Bump quanta from 0.11.0 to 0.11.1", pr_number: 17524, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "da7bc951c450c1274fa37abb2d19b83dd3f965ab", date: "2023-05-30 12:12:17 UTC", description: "Bump criterion from 0.5.0 to 0.5.1", pr_number: 17500, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 4, deletions_count: 4}, + {sha: "aa014528ca83bd3f1d17604d8c138ac2d0484074", date: "2023-05-31 00:17:29 UTC", description: "Drop VRL license exceptions", pr_number: 17529, scopes: ["ci"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 1, insertions_count: 0, deletions_count: 9}, + {sha: "078de661e7146a1924c0c31fed65b8b0ccbb7316", date: "2023-05-31 06:05:02 UTC", description: "Bump openssl from 0.10.52 to 0.10.53", pr_number: 17534, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 6, deletions_count: 6}, + {sha: "1565985746868265a1582a1b33b4eb56cc046c26", date: "2023-05-31 06:06:30 UTC", description: "Bump indicatif from 0.17.3 to 0.17.4", pr_number: 17532, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 6, deletions_count: 11}, + {sha: "8e113addc48328f3918e6abc7623284d93d4030b", date: "2023-05-31 06:07:26 UTC", description: "Bump once_cell from 1.17.1 to 1.17.2", pr_number: 17531, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "5a2fea10da7eaa04b7e51af84cdea87ab6e8326b", date: "2023-05-31 06:09:28 UTC", description: "Bump log from 0.4.17 to 0.4.18", pr_number: 17526, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 6}, + {sha: "ecb707a633020bca8c805d5764b85302b74ca477", date: "2023-05-31 08:08:20 UTC", description: "Bump graphql_client from 0.12.0 to 0.13.0", pr_number: 17541, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 7, deletions_count: 7}, + {sha: "b0ed167d1ae22b8f0a7a762ad50750c912f0833b", date: "2023-05-31 05:04:00 UTC", description: "remove more deprecated internal metrics", pr_number: 17542, scopes: ["observability"], type: "chore", breaking_change: false, author: "Toby Lawrence", files_count: 111, insertions_count: 216, deletions_count: 649}, + {sha: "3b87e00f3a62be93f55a89df676b47a8fad22201", date: "2023-05-31 05:02:15 UTC", description: "add missing logic to mark required checks failed", pr_number: 17543, scopes: ["ci"], type: "fix", breaking_change: false, author: "neuronull", files_count: 2, insertions_count: 13, deletions_count: 2}, + {sha: "e2c025591c572efdd04728fac301b2e025596516", date: "2023-05-31 06:14:59 UTC", description: "post failed status to PR and isolate branch checkout on comment trigger", pr_number: 17544, scopes: ["ci"], type: "fix", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 7, deletions_count: 3}, + {sha: "dbd7151aa4128638765e360f3f0f4e6582735041", date: "2023-05-31 12:57:35 UTC", description: "Bump opendal from 0.35.0 to 0.36.0", pr_number: 17540, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "3b2a2be1b075344a92294c1248b09844f895ad72", date: "2023-06-01 05:18:38 UTC", description: "ensure `sent_event` and `received_event` metrics are estimated json size", pr_number: 17465, scopes: ["observability"], type: "chore", breaking_change: false, author: "Stephen Wakely", files_count: 87, insertions_count: 807, deletions_count: 449}, + {sha: "247bb807cae195c5c987a43e3c4e6ab6b885a94b", date: "2023-05-31 23:49:54 UTC", description: "fix reference to supported aarch64 architecture", pr_number: 17553, scopes: ["external docs"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "0dfa09c4a9b7e753802a4fa0700557752e2fc945", date: "2023-06-01 01:25:38 UTC", description: "Bump chrono to 0.4.26", pr_number: 17537, scopes: ["deps"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 3, insertions_count: 12, deletions_count: 5}, + {sha: "349c7183067f0aa91b05914f34a68ee899fea88b", date: "2023-06-01 03:33:08 UTC", description: "Remove links to roadmap", pr_number: 17554, scopes: [], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 3, insertions_count: 0, deletions_count: 9}, + {sha: "bcc5b6c5c883e16bd959b610890f67ffc0405860", date: "2023-06-01 09:23:24 UTC", description: "Bump csv from 1.2.1 to 1.2.2", pr_number: 17555, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "7a4f1f77470fbc804299e2c1be867b193052d275", date: "2023-06-02 04:02:27 UTC", description: "correct emitted metrics", pr_number: 17562, scopes: ["observability"], type: "fix", breaking_change: false, author: "Stephen Wakely", files_count: 2, insertions_count: 8, deletions_count: 3}, + {sha: "23ed0e3adbffdd770a257635c3d6720a3bf072e7", date: "2023-06-02 05:49:12 UTC", description: "make shutdown duration configurable", pr_number: 17479, scopes: ["configurable shutdown duration"], type: "feat", breaking_change: false, author: "Dominic Burkart", files_count: 12, insertions_count: 130, deletions_count: 53}, + {sha: "f523f70d12053bd8d1d5ceee41c7c843780ded84", date: "2023-06-02 00:51:53 UTC", description: "Update field labels for commonly used sources and transforms ", pr_number: 17517, scopes: ["config"], type: "chore", breaking_change: false, author: "May Lee", files_count: 20, insertions_count: 41, deletions_count: 11}, + {sha: "ced219e70405c9ed9012444cc04efad8f91d3590", date: "2023-06-02 12:22:59 UTC", description: "zstd compression support", pr_number: 17371, scopes: ["compression"], type: "enhancement", breaking_change: false, author: "Andrey Koshchiy", files_count: 29, insertions_count: 455, deletions_count: 121}, + {sha: "e1ddd0e99c0290a645a484c45cc42a391803c6c0", date: "2023-06-02 04:31:32 UTC", description: "Update field labels for sinks", pr_number: 17560, scopes: ["config"], type: "chore", breaking_change: false, author: "May Lee", files_count: 8, insertions_count: 15, deletions_count: 0}, + {sha: "8a741d55b8bfe361d6c5449cab4fd3728e1dae8d", date: "2023-06-02 02:42:54 UTC", description: "Bump aws-actions/configure-aws-credentials from 2.0.0 to 2.1.0", pr_number: 17565, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 6, deletions_count: 6}, + {sha: "d7df52055152d9f85a6e48082d385e84c45f1501", date: "2023-06-03 02:15:58 UTC", description: "adapt int test to use breaking change of dep", pr_number: 17583, scopes: ["http_client source"], type: "fix", breaking_change: false, author: "neuronull", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "4af5e6d8886cfc326209f8d6aa65d27f86f6e579", date: "2023-06-03 03:07:14 UTC", description: "Bump openssl from 0.10.53 to 0.10.54", pr_number: 17573, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 4, deletions_count: 4}, + {sha: "8823561a8ad544b4acd29273b466b1a5bd606cc2", date: "2023-06-03 05:48:01 UTC", description: "Codify the use of abbreviate time units in config option names", pr_number: 17582, scopes: [], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 2, deletions_count: 1}, + {sha: "134578db2165b4b522013d0e7d6ac974f9e4e744", date: "2023-06-03 05:48:10 UTC", description: "Codify flag naming including sentinel values", pr_number: 17569, scopes: [], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 16, deletions_count: 0}, + {sha: "6e45477ddc27147887346c8d09dd077225ea2ef3", date: "2023-06-03 06:06:18 UTC", description: "Update field labels for the rest of the sources and transforms fields", pr_number: 17564, scopes: ["config"], type: "chore", breaking_change: false, author: "May Lee", files_count: 33, insertions_count: 46, deletions_count: 22}, + {sha: "1c1beb8123e1b0c82537ae3c2e26235bc6c0c43b", date: "2023-06-03 10:10:13 UTC", description: "Bump mock_instant from 0.3.0 to 0.3.1", pr_number: 17574, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "854980945e685485388bda2dd8f9cd9ad040029e", date: "2023-06-03 10:53:45 UTC", description: "Bump clap_complete from 4.3.0 to 4.3.1", pr_number: 17586, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "3395cfdb90b165653dda7e9014057aac1dba2d28", date: "2023-06-03 05:19:13 UTC", description: "bump pulsar from 5.1.1 to 6.0.0", pr_number: 17587, scopes: ["deps"], type: "chore", breaking_change: false, author: "neuronull", files_count: 4, insertions_count: 26, deletions_count: 14}, + {sha: "25e7699bb505e1856d04634ed6571eb22631b140", date: "2023-06-03 13:32:07 UTC", description: "use json size of unencoded event", pr_number: 17572, scopes: ["loki sink"], type: "fix", breaking_change: false, author: "Stephen Wakely", files_count: 2, insertions_count: 5, deletions_count: 17}, + {sha: "fa8a55385dd391aa2429c3f2e9821198c364c6a0", date: "2023-06-05 02:21:55 UTC", description: "int test yaml file detection", pr_number: 17590, scopes: ["ci"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 38, deletions_count: 0}, + {sha: "a164952a145109d95c465645bf08b387a61e408a", date: "2023-06-06 03:10:16 UTC", description: "Bump indicatif from 0.17.4 to 0.17.5", pr_number: 17597, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "da939ca645e49cd02cbd739cddcdfe00dcb88a55", date: "2023-06-06 00:27:39 UTC", description: "add sink prelude", pr_number: 17595, scopes: [], type: "chore", breaking_change: false, author: "Stephen Wakely", files_count: 29, insertions_count: 97, deletions_count: 239}, + {sha: "6b34868e285a4608914405b7701ae1ee82deb536", date: "2023-06-06 01:11:04 UTC", description: " move blocked/waiting gardener issues to triage on comment", pr_number: 17588, scopes: ["dev"], type: "enhancement", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 89, deletions_count: 0}, + {sha: "dc6bef2a2e6c47e145c776b4fd91042b112a0890", date: "2023-06-06 07:23:59 UTC", description: "Bump once_cell from 1.17.2 to 1.18.0", pr_number: 17596, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 6, insertions_count: 7, deletions_count: 7}, + {sha: "8e042590117989394f8bc246dc6d7de61d00123a", date: "2023-06-06 07:24:54 UTC", description: "Bump percent-encoding from 2.2.0 to 2.3.0", pr_number: 17602, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "7a55210ed814e0c47618905a299eba0d896a0646", date: "2023-06-06 07:50:36 UTC", description: "Bump cached from 0.43.0 to 0.44.0", pr_number: 17599, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 5, deletions_count: 13}, + {sha: "657758db74496ec9adede09fc8f132bd8bed3bc3", date: "2023-06-06 08:54:46 UTC", description: "Bump regex from 1.8.3 to 1.8.4", pr_number: 17601, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 5, insertions_count: 6, deletions_count: 6}, + {sha: "9395eba89ed10488914ac042aabba068356bb84b", date: "2023-06-06 20:59:56 UTC", description: "use correct secret for gardener board comment", pr_number: 17605, scopes: ["ci"], type: "fix", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "baa04e59d9b234c4e71f8545a6ad8fdb2517f805", date: "2023-06-06 21:05:53 UTC", description: "checkout a greater depth in regression workflow", pr_number: 17604, scopes: ["ci"], type: "fix", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 50, deletions_count: 1}, + {sha: "154e39382f4e80998814a693f9d6bb5c89ebebf7", date: "2023-06-07 03:10:22 UTC", description: "Bump hashbrown from 0.13.2 to 0.14.0", pr_number: 17609, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 12, deletions_count: 3}, + {sha: "d956092efdcc4ccea718365d9e9ef7bd537563a8", date: "2023-06-07 03:11:46 UTC", description: "Bump url from 2.3.1 to 2.4.0", pr_number: 17608, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 12, deletions_count: 12}, + {sha: "a9324892a289e94214707f1e09ea2931ae27d5e3", date: "2023-06-07 03:58:40 UTC", description: "Bump xml-rs from 0.8.4 to 0.8.14", pr_number: 17607, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "b5bd85f87e39389a2ea3bb9a3d588fcbdfd0e29d", date: "2023-06-07 08:04:28 UTC", description: "Bump opendal from 0.36.0 to 0.37.0", pr_number: 17614, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 4, deletions_count: 4}, + {sha: "bd880f55d2d8605733297acb4f96a8100a60dad4", date: "2023-06-07 08:22:12 UTC", description: "Bump getrandom from 0.2.9 to 0.2.10", pr_number: 17613, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 14, deletions_count: 14}, + {sha: "b400acced6bd61d5927ab75bb82643b5927c0cbd", date: "2023-06-07 02:34:00 UTC", description: "fix copy-paste issue in component spec", pr_number: 17616, scopes: ["docs"], type: "fix", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "c55c9ecbf904d9166c88af65a9a3f76f18289f58", date: "2023-06-07 19:45:36 UTC", description: "Bump tempfile from 3.5.0 to 3.6.0", pr_number: 17617, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 25, deletions_count: 23}, + {sha: "6c4856595410ee77d52d62ceb2cd808b1cdff04e", date: "2023-06-07 22:33:35 UTC", description: "Upgrade rust to 1.70.0", pr_number: 17585, scopes: ["deps"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 4, insertions_count: 6, deletions_count: 11}, + {sha: "460bbc7b9e532f93ac015ff871535c16135e4793", date: "2023-06-07 22:37:23 UTC", description: "Bump wiremock from 0.5.18 to 0.5.19", pr_number: 17618, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "579108353e50546081b830d4e5788be7bb76a892", date: "2023-06-08 01:53:30 UTC", description: "change command to find baseline sha from issue comment trigger", pr_number: 17622, scopes: ["ci"], type: "fix", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "3005141f2097169a05af418e5f80765468645700", date: "2023-06-08 02:55:32 UTC", description: "Bump docker/setup-qemu-action from 2.1.0 to 2.2.0", pr_number: 17623, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 2, deletions_count: 2}, + {sha: "a54a12faae72ee64f4ba842746837a4787af5dc2", date: "2023-06-08 08:56:13 UTC", description: "Bump docker/metadata-action from 4.4.0 to 4.5.0", pr_number: 17624, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "15bc42a21bed188819da4d12e38d108f2e840202", date: "2023-06-08 08:56:43 UTC", description: "Bump docker/setup-buildx-action from 2.5.0 to 2.6.0", pr_number: 17625, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 4, deletions_count: 4}, + {sha: "10cfd0aec905c605248ad9d36abb312d4bfc1a5b", date: "2023-06-08 09:26:19 UTC", description: "Bump libc from 0.2.144 to 0.2.146", pr_number: 17615, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "29315428b2c93ae0a5682ddb1fb25137b5eb3931", date: "2023-06-08 19:24:37 UTC", description: "Bump async-graphql from 5.0.9 to 5.0.10", pr_number: 17619, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 10, deletions_count: 10}, + {sha: "f1e1ae36ec4f244a03cbc7084cde64ea2d9631fa", date: "2023-06-08 22:23:07 UTC", description: "reg workflow alt approach to getting baseline sha", pr_number: 17645, scopes: ["ci"], type: "fix", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 3, deletions_count: 25}, + {sha: "e35150e8b376db1f19b60b828233eb47393bb2dd", date: "2023-06-09 04:23:41 UTC", description: "Bump serde from 1.0.163 to 1.0.164", pr_number: 17632, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 8, insertions_count: 11, deletions_count: 11}, + {sha: "593ea1bc89303f2f2344cca58d7c1aa5de939084", date: "2023-06-09 04:23:45 UTC", description: "Bump memmap2 from 0.6.2 to 0.7.0", pr_number: 17641, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "b3885f693ebbdddd338b72bfd594e164d4fa361d", date: "2023-06-09 04:26:45 UTC", description: "Bump async-graphql-warp from 5.0.9 to 5.0.10", pr_number: 17642, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "f20eb2ff554c0163ea4955c9a5ad1ef0acd9f492", date: "2023-06-09 04:40:57 UTC", description: "Bump proc-macro2 from 1.0.59 to 1.0.60", pr_number: 17643, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 66, deletions_count: 66}, + {sha: "2638cca6cbf5103f71944383255b3e335d7f5790", date: "2023-06-08 23:34:18 UTC", description: "use correct ID for Triage in Gardener Board", pr_number: 17647, scopes: ["ci"], type: "fix", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "380d7adb72a02e8da0af35fd3d80ecb1d8b0b541", date: "2023-06-09 09:16:28 UTC", description: "add more compression algorithms to Prometheus Remote Write", pr_number: 17334, scopes: ["prometheus"], type: "feat", breaking_change: false, author: "Alexander Zaitsev", files_count: 4, insertions_count: 77, deletions_count: 6}, + {sha: "a324a07ba1b62baac08d74b287595846b787b887", date: "2023-06-09 05:43:44 UTC", description: "add journal_namespace option", pr_number: 17648, scopes: ["journald source"], type: "feat", breaking_change: false, author: "Doug Smith", files_count: 2, insertions_count: 71, deletions_count: 6}, + {sha: "0dc450fac14ac0236ca48466fd4fe42630d421ed", date: "2023-06-09 05:44:35 UTC", description: "mark VectorSink::from_event_sink as deprecated", pr_number: 17649, scopes: ["sinks"], type: "chore", breaking_change: false, author: "Doug Smith", files_count: 20, insertions_count: 26, deletions_count: 0}, + {sha: "45a28f88a910c8492872773cc2e86045c8e2f4b6", date: "2023-06-10 05:28:33 UTC", description: "avoid importing vector-common in enrichment module", pr_number: 17653, scopes: ["enrichment"], type: "chore", breaking_change: false, author: "Jérémie Drouet", files_count: 4, insertions_count: 7, deletions_count: 6}, + {sha: "bf7d79623c0b575dd0bb6f851cc12c15cea5eb5f", date: "2023-06-10 03:12:55 UTC", description: "Add lossy option to JSON deserializer", pr_number: 17628, scopes: ["codecs"], type: "feat", breaking_change: false, author: "Doug Smith", files_count: 28, insertions_count: 1131, deletions_count: 686}, + {sha: "cb9a3a548877b222afb14159393b8bc7bc3f8518", date: "2023-06-10 02:20:50 UTC", description: "Bump docker/build-push-action from 4.0.0 to 4.1.0", pr_number: 17656, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "e1b335748ef3b1345db9f5b9af11b5df2f24868a", date: "2023-06-14 01:02:27 UTC", description: "Bump log from 0.4.18 to 0.4.19", pr_number: 17662, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "37a662a9c2e388dc1699f90288c5d856381d15d4", date: "2023-06-13 21:26:02 UTC", description: "deadlock when seeking after entire write fails to be flushed", pr_number: 17657, scopes: ["buffers"], type: "fix", breaking_change: false, author: "Toby Lawrence", files_count: 4, insertions_count: 224, deletions_count: 7}, + {sha: "19c4d4f72a4c08fdf51299bd7b3b906f8f8d08c1", date: "2023-06-14 03:40:45 UTC", description: "Bump wasm-bindgen from 0.2.86 to 0.2.87", pr_number: 17672, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 10, deletions_count: 10}, + {sha: "ab1169bd40ff7f1fa8cf1e77d24cd779112b2178", date: "2023-06-14 01:22:08 UTC", description: "Add apt retries to cross builds", pr_number: 17683, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 2, deletions_count: 0}, + {sha: "2dfa8509bcdb4220d32e3d91f7fdd61c081db5ea", date: "2023-06-14 06:42:33 UTC", description: "add lossy option to `gelf`, `native_json`, and `syslog` deserializers", pr_number: 17680, scopes: ["codecs"], type: "feat", breaking_change: false, author: "Doug Smith", files_count: 30, insertions_count: 1185, deletions_count: 165}, + {sha: "ac68a7b8d8238f4d64d5f3850e15dc9931e39349", date: "2023-06-15 01:17:32 UTC", description: "Bump rdkafka from 0.31.0 to 0.32.2", pr_number: 17664, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 5, deletions_count: 5}, + {sha: "8d98bb8c4f4a4dd44e433caf8846aee4df1eec2b", date: "2023-06-15 01:21:03 UTC", description: "Bump pulsar from 6.0.0 to 6.0.1", pr_number: 17673, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "714ccf8e77426b916ab88121c45a611106ebd6fe", date: "2023-06-15 00:21:21 UTC", description: "Bump crossbeam-utils from 0.8.15 to 0.8.16", pr_number: 17674, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 5, deletions_count: 5}, + {sha: "c97d619d47b1171d592dcf55692b5caa01e97992", date: "2023-06-15 01:39:40 UTC", description: "Bump uuid from 1.3.3 to 1.3.4", pr_number: 17682, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "80069871df7d0809411053435486c604b7b8c15d", date: "2023-06-15 01:40:20 UTC", description: "Bump docker/setup-buildx-action from 2.6.0 to 2.7.0", pr_number: 17685, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 4, deletions_count: 4}, + {sha: "71273dfc64206dd66290426fe7d65a68afb13d51", date: "2023-06-15 01:41:19 UTC", description: "Bump docker/metadata-action from 4.5.0 to 4.6.0", pr_number: 17686, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "bce5e65d9562983f0094f1b7359775cf17043285", date: "2023-06-15 01:41:49 UTC", description: "Bump docker/build-push-action from 4.1.0 to 4.1.1", pr_number: 17687, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "41ee39414ea3210c841659f1f41b3295ad8bfd23", date: "2023-06-14 22:01:17 UTC", description: "Drop use of `hashlink` crate", pr_number: 17678, scopes: ["deps"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 3, insertions_count: 5, deletions_count: 15}, + {sha: "59e2cbff7bce014209813369d2a33a25ac193bb3", date: "2023-06-15 00:20:47 UTC", description: "remove utf8 lossy conversion", pr_number: 17655, scopes: ["http_client source"], type: "fix", breaking_change: false, author: "Doug Smith", files_count: 1, insertions_count: 1, deletions_count: 2}, + {sha: "ee480cd08a5451bc3f0b83a2b037ba131e38d4b9", date: "2023-06-15 01:00:28 UTC", description: "Dropped error field from StreamClosed Error", pr_number: 17693, scopes: [], type: "chore", breaking_change: false, author: "Spencer Gilbert", files_count: 37, insertions_count: 74, deletions_count: 84}, + {sha: "9c4539436ecbbf48dc0dd454ea25230d539b2c9b", date: "2023-06-15 07:19:03 UTC", description: "consolidate enum types", pr_number: 17688, scopes: ["codecs"], type: "chore", breaking_change: false, author: "Doug Smith", files_count: 18, insertions_count: 144, deletions_count: 296}, + {sha: "2263756d0a39cb99d62a826ff0993f461ae80937", date: "2023-06-16 00:04:32 UTC", description: "Update to Alpine 3.18", pr_number: 17695, scopes: ["deps", "releasing"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 3, insertions_count: 5, deletions_count: 3}, + {sha: "079d895ebffeb62cf51cb11144b17fd481292510", date: "2023-06-16 05:55:48 UTC", description: "Add docker config to dependabot", pr_number: 17696, scopes: [], type: "chore", breaking_change: false, author: "Spencer Gilbert", files_count: 1, insertions_count: 10, deletions_count: 0}, + {sha: "960635387235ea270d748038a3a0ddd615813f29", date: "2023-06-16 04:00:58 UTC", description: "Make config schema output ordered", pr_number: 17694, scopes: ["config"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 6, insertions_count: 9, deletions_count: 17}, + {sha: "2ad964d43b9a47808104eced885cebf6541f4a72", date: "2023-06-16 23:48:28 UTC", description: "Additional notes on proposing new integrations", pr_number: 17658, scopes: [], type: "docs", breaking_change: false, author: "Spencer Gilbert", files_count: 1, insertions_count: 28, deletions_count: 6}, + {sha: "bebac21cb699be64d1b009d3619d5af5c5be20ec", date: "2023-06-17 12:32:09 UTC", description: "implement full retry of partial failures in firehose/streams", pr_number: 17535, scopes: ["kinesis sinks"], type: "feat", breaking_change: false, author: "dengmingtong", files_count: 13, insertions_count: 103, deletions_count: 23}, + {sha: "c21f892e574579e323742da009f15a39c43555af", date: "2023-06-17 08:03:47 UTC", description: "validate s3 sink flushes", pr_number: 17667, scopes: ["flush on shutdown"], type: "chore", breaking_change: false, author: "Dominic Burkart", files_count: 1, insertions_count: 74, deletions_count: 0}, + {sha: "d122d32b8c83133b753c9e31d19be6c6609fb9a5", date: "2023-06-20 03:43:56 UTC", description: "Bump sha2 from 0.10.6 to 0.10.7", pr_number: 17698, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 24, deletions_count: 24}, + {sha: "cd6d1540bf74d13ad6bc9c90fc3fe2affb11e6dc", date: "2023-06-21 00:06:24 UTC", description: "Bump notify from 6.0.0 to 6.0.1", pr_number: 17700, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "53e178570b5b87bc2124f4299865cbb00916fe20", date: "2023-06-21 01:55:02 UTC", description: "Bump gloo-utils from 0.1.6 to 0.1.7", pr_number: 17707, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 2, deletions_count: 2}, + {sha: "9cd54043fab1e82722adaeeaee290d7084074439", date: "2023-06-21 01:48:48 UTC", description: "Convert top-level sinks enum to typetag", pr_number: 17710, scopes: ["config"], type: "chore", breaking_change: false, author: "Bruce Guenter", files_count: 74, insertions_count: 270, deletions_count: 540}, + {sha: "6705bdde058b1a532eda9398c9610dff46bb783b", date: "2023-06-21 05:30:41 UTC", description: "Vector does not put the Proxy-Authorization header on the wire (#17353)", pr_number: 17363, scopes: ["auth"], type: "fix", breaking_change: false, author: "Sergey Yedrikov", files_count: 2, insertions_count: 38, deletions_count: 18}, + {sha: "12bc4a7d116273cda322fccf41b4e3ea6c333be3", date: "2023-06-21 04:17:53 UTC", description: "Bump aws-actions/configure-aws-credentials from 2.1.0 to 2.2.0", pr_number: 17697, scopes: ["ci"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 6, deletions_count: 6}, + {sha: "dd2527dcea295f4f9f6eb617306a822892e08a59", date: "2023-06-22 07:33:19 UTC", description: "Bump openssl from 0.10.54 to 0.10.55", pr_number: 17716, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 6, deletions_count: 6}, + {sha: "e8e7e0448f51ed9646c484123fd4953442545c86", date: "2023-06-22 00:00:00 UTC", description: "Retry `make check-component-docs` check", pr_number: 17718, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 5, deletions_count: 1}, + {sha: "ddebde97bac79eaecb7feb286bfe5a25591e7d13", date: "2023-06-22 03:29:50 UTC", description: "Upgrade Ruby version to 3.1.4", pr_number: 17722, scopes: ["deps"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 5, insertions_count: 6, deletions_count: 12}, + {sha: "bc6925592f8d954212efb99f2f17bcac8a454169", date: "2023-06-22 06:41:01 UTC", description: "reduce billable time of Test Suite", pr_number: 17714, scopes: ["ci"], type: "enhancement", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 36, deletions_count: 51}, + {sha: "25131efdbe855a8f4d2491bd68fb76c58f7f8ad4", date: "2023-06-22 23:54:09 UTC", description: "Bump serde_json from 1.0.96 to 1.0.97", pr_number: 17701, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 6, insertions_count: 7, deletions_count: 7}, + {sha: "e5e6b9635cf3fd13676d845f184ef3a04167ceef", date: "2023-06-22 23:54:27 UTC", description: "Bump tower-http from 0.4.0 to 0.4.1", pr_number: 17711, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 38, deletions_count: 32}, + {sha: "c96e3be34c239e94a366f9ced8e0e8b69570a562", date: "2023-06-22 22:55:03 UTC", description: "Bump mongodb from 2.5.0 to 2.6.0", pr_number: 17726, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "08099a8b567663416d907600e2f9c678482af272", date: "2023-06-23 01:28:52 UTC", description: "Have `tower_limit` use configured log level", pr_number: 17715, scopes: ["observability"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "a08443c890cc0e3223e4d17c71eb267f0305d50c", date: "2023-06-23 04:17:06 UTC", description: "Add @dsmith3197 to CODEOWNERS", pr_number: 17729, scopes: ["dev"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 2, insertions_count: 19, deletions_count: 18}, + {sha: "9a899c5d7c40a271b17eafec2f840c1bfd082b04", date: "2023-06-23 04:39:49 UTC", description: "Add additional warning around APM stats for `peer.service`", pr_number: 17733, scopes: ["datadog_traces sink"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 12, deletions_count: 1}, + {sha: "326ad0861215f22c83f681e725abb88b33107e2e", date: "2023-06-23 23:24:38 UTC", description: "Bump infer from 0.13.0 to 0.14.0", pr_number: 17737, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 4, deletions_count: 4}, + {sha: "cc52c0ea99e03f451c24c165b24430c045ff365d", date: "2023-06-24 04:00:39 UTC", description: "set exit flag to non-zero when shutdown times out", pr_number: 17676, scopes: ["error code when shutdown fails"], type: "feat", breaking_change: false, author: "Dominic Burkart", files_count: 3, insertions_count: 59, deletions_count: 14}, + {sha: "ff6a1b4f06b1e32f3192f2bc391e8ab59f466993", date: "2023-06-23 22:13:14 UTC", description: "Remove upload of config schema", pr_number: 17740, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 0, deletions_count: 6}, + {sha: "44be37843c0599abb64073fe737ce146e30b3aa5", date: "2023-06-24 02:57:59 UTC", description: "add metadata support to `set_semantic_meaning`", pr_number: 17730, scopes: ["schemas"], type: "feat", breaking_change: false, author: "Nathan Fox", files_count: 2, insertions_count: 23, deletions_count: 18}, + {sha: "7a0dec13537211b4a7e460cdf57b079709649b5f", date: "2023-06-24 02:55:43 UTC", description: "Move CONTRIBUTING.md to top-level", pr_number: 17744, scopes: ["docs"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 2, insertions_count: 256, deletions_count: 257}, + {sha: "7d10fc97f32c053f9336d1d69d530f39ef258268", date: "2023-06-24 04:54:20 UTC", description: "Clarify `bytes` framing for streams", pr_number: 17745, scopes: ["docs"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "92a36e0119e0e1f50b8bfcdcaf1c536018b69d5f", date: "2023-06-24 05:59:45 UTC", description: "refactor logic for int test file path changes detection", pr_number: 17725, scopes: ["ci"], type: "enhancement", breaking_change: false, author: "neuronull", files_count: 38, insertions_count: 413, deletions_count: 252}, + {sha: "4ebc3e1171cba4f00023f0ef860a6b66c98763a9", date: "2023-06-24 05:05:16 UTC", description: "Drop non-fatal template render errors to warnings", pr_number: 17746, scopes: ["loki sink", "observability"], type: "fix", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 21, deletions_count: 13}, + {sha: "c35ebd167b029eb0fb6c180301e8ff911f938f9f", date: "2023-06-24 06:27:25 UTC", description: "add domain label for vdev", pr_number: 17748, scopes: ["administration"], type: "chore", breaking_change: false, author: "neuronull", files_count: 1, insertions_count: 3, deletions_count: 0}, + {sha: "6e1878b1c151a19d7a99fd6c8c8a847cc69db3c8", date: "2023-06-27 00:31:06 UTC", description: "Bump itertools from 0.10.5 to 0.11.0", pr_number: 17736, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 3, insertions_count: 23, deletions_count: 14}, + {sha: "6a6b42bedbd27dec0c91e274698785cc73f805df", date: "2023-06-26 22:38:47 UTC", description: "Upgrade aws-smithy and aws-sdk crates", pr_number: 17731, scopes: [], type: "chore", breaking_change: false, author: "Spencer Gilbert", files_count: 27, insertions_count: 487, deletions_count: 468}, + {sha: "dcf7f9ae538c821eb7b3baf494d3e8938083832c", date: "2023-06-27 04:03:39 UTC", description: "emit `component_sent` events by `source` and `service`", pr_number: 17549, scopes: ["observability"], type: "chore", breaking_change: false, author: "Stephen Wakely", files_count: 77, insertions_count: 1387, deletions_count: 501}, + {sha: "94e3f1542be0c4ba93f554803973c9e26e7dc566", date: "2023-06-27 06:53:51 UTC", description: "remove aggregator beta warning", pr_number: 17750, scopes: [], type: "docs", breaking_change: false, author: "gadisn", files_count: 1, insertions_count: 0, deletions_count: 7}, + {sha: "63ba2a95d972bbba11cd9a1f913f2606bb2ba20b", date: "2023-06-26 23:53:17 UTC", description: "Bump proc-macro2 from 1.0.60 to 1.0.63", pr_number: 17757, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 1, insertions_count: 67, deletions_count: 67}, + {sha: "a53c7a2153960038b8e68e13d6beede09eb1a69a", date: "2023-06-26 23:48:37 UTC", description: "Add warning about Windows support", pr_number: 17762, scopes: ["kubernetes_logs source"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 3, deletions_count: 1}, + {sha: "e164b36436b85a332b5a3b4c492caab6b53578d3", date: "2023-06-27 06:53:13 UTC", description: "Bump serde_yaml from 0.9.21 to 0.9.22", pr_number: 17756, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 4, insertions_count: 46, deletions_count: 29}, + {sha: "96e68f76efe2208a8899b3f8961125ba5424a9ba", date: "2023-07-01 06:25:20 UTC", description: "Bump lru from 0.10.0 to 0.10.1", pr_number: 17810, scopes: ["deps"], type: "chore", breaking_change: false, author: "dependabot[bot]", files_count: 2, insertions_count: 3, deletions_count: 3}, + {sha: "708b7f6088c14180945d80e2a8f13ed471ded77a", date: "2023-07-01 00:17:31 UTC", description: "Add schedule to component features workflow conditional check", pr_number: 17816, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "fe730adee64c45bc9a0737838a8aaa2bd8ef61d8", date: "2023-07-01 01:38:51 UTC", description: "Bump up OSX runners for release builds", pr_number: 17823, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 1, deletions_count: 1}, + {sha: "47c3da1f21d3cc3d4af09d321ae3754972e0a150", date: "2023-07-01 06:35:02 UTC", description: "fix gardener issues comment workflow", pr_number: 17825, scopes: ["ci"], type: "chore", breaking_change: false, author: "Doug Smith", files_count: 1, insertions_count: 28, deletions_count: 29}, + {sha: "77ac63c5bd87309b1ddd54e55b933072b40e34ea", date: "2023-07-01 07:00:11 UTC", description: "refactor to new style", pr_number: 17723, scopes: ["clickhouse sink"], type: "chore", breaking_change: false, author: "Doug Smith", files_count: 9, insertions_count: 474, deletions_count: 299}, + {sha: "93ef6c3e9241601253b48e27ee817e73474a89c6", date: "2023-07-01 07:30:23 UTC", description: "add instructions for regenerating component docs and licenses", pr_number: 17828, scopes: ["docs"], type: "chore", breaking_change: false, author: "Doug Smith", files_count: 2, insertions_count: 19, deletions_count: 4}, + {sha: "4786743dcaa73e16781e8b43ce0a1ce0315a55d1", date: "2023-07-01 09:13:02 UTC", description: "`aws_ec2_metadata` transform when using log namespacing", pr_number: 17819, scopes: ["aws_ec2_metadata transform"], type: "fix", breaking_change: false, author: "Nathan Fox", files_count: 1, insertions_count: 38, deletions_count: 0}, + {sha: "ee10b8cbae51b9c0bade8d8bd8273a8dbeb3bb58", date: "2023-07-01 06:41:15 UTC", description: "revert fix gardener issues comment workflow", pr_number: 17829, scopes: ["ci"], type: "chore", breaking_change: false, author: "Jesse Szwedko", files_count: 1, insertions_count: 29, deletions_count: 28}, + ] +} diff --git a/website/cue/reference/versions.cue b/website/cue/reference/versions.cue index 0e7a2168f0805..4084da0652d6e 100644 --- a/website/cue/reference/versions.cue +++ b/website/cue/reference/versions.cue @@ -2,6 +2,7 @@ package metadata // This has to be maintained manually because there's currently no way to sort versions programmatically versions: [string, ...string] & [ + "0.31.0", "0.30.0", "0.29.1", "0.29.0",