Skip to content

Commit

Permalink
[CONSUL-546] Scripts Clean-up (#92)
Browse files Browse the repository at this point in the history
  • Loading branch information
joselo85 committed Dec 21, 2022
1 parent 6af2f31 commit b9bae6f
Show file tree
Hide file tree
Showing 6 changed files with 146 additions and 184 deletions.
2 changes: 1 addition & 1 deletion WINDOWS-TEST.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,4 +37,4 @@ To run a single test case, the name should be specified. For instance, to run th
go test -v -timeout=30m -tags integration ./test/integration/connect/envoy -run="TestEnvoy/case-badauthz" -win=true
```

> :warning: Note that the flag `-win=true` must be specified as shown in the above commands. This flag is very important because the same allows to indicate that the tests will be executed on the Windows environment.
> :warning: Note that the flag `-win=true` must be specified as shown in the above commands. This flag is very important because the same allows to indicate that the tests will be executed on the Windows environment. When executing the Envoy integration tests the **End of Line Sequence** of every related file and or script will be automatically changed from **LF to CRLF**.
3 changes: 2 additions & 1 deletion test/integration/connect/envoy/WindowsTroubleshooting.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ Where **TEST CASE** is the individual test case we want to execute (e.g. case-ba
> [!TIP]
> When executing the integration tests using **Powershell** you may need to set the ENVOY_VERSION value manually in line 20 of the [run-tests.windows.sh](run-tests.windows.sh) file.
## Troubleshooting
> [!WARNING]
> When executing the integration tests for Windows environments, the **End of Line Sequence** of every related file and/or script will be changed from **LF** to **CRLF**.
### About Envoy Integration Tests on Windows

Expand Down
Original file line number Diff line number Diff line change
@@ -1,74 +1,74 @@
#!/usr/bin/env bats

load helpers

@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}

@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}

@test "s3-v1 proxy admin is up on :19002" {
retry_default curl -f -s localhost:19002/stats -o /dev/null
}

@test "s3-v2 proxy admin is up on :19003" {
retry_default curl -f -s localhost:19003/stats -o /dev/null
}

@test "s3 proxy admin is up on :19004" {
retry_default curl -f -s localhost:19004/stats -o /dev/null
}

@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}

@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}

@test "s3-v1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21002 s3
}

@test "s3-v2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21003 s3
}

@test "s3 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21004 s3
}

@test "s2 proxies should be healthy" {
assert_service_has_healthy_instances s2 1
}

@test "s3 proxies should be healthy" {
assert_service_has_healthy_instances s3 3
}

# Note: when failover is configured the cluster is named for the original
# service not any destination related to failover.
@test "s1 upstream should have healthy endpoints for s2 and s3 together" {
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary HEALTHY 1
}

@test "s1 upstream should be able to connect to s2 via upstream s2 to start" {
assert_expected_fortio_name s2
}

@test "terminate instance of s2 envoy which should trigger failover to s3 when tcp check fails" {
kill_envoy s2
}

@test "s1 upstream should have healthy endpoints for s3-v1 and unhealthy endpoints for s2" {
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~v1.s3.default.primary HEALTHY 1
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary UNHEALTHY 1
}

@test "s1 upstream should be able to connect to s3-v1 now" {
assert_expected_fortio_name s3-v1
}
#!/usr/bin/env bats

load helpers

@test "s1 proxy admin is up on :19000" {
retry_default curl -f -s localhost:19000/stats -o /dev/null
}

@test "s2 proxy admin is up on :19001" {
retry_default curl -f -s localhost:19001/stats -o /dev/null
}

@test "s3-v1 proxy admin is up on :19002" {
retry_default curl -f -s localhost:19002/stats -o /dev/null
}

@test "s3-v2 proxy admin is up on :19003" {
retry_default curl -f -s localhost:19003/stats -o /dev/null
}

@test "s3 proxy admin is up on :19004" {
retry_default curl -f -s localhost:19004/stats -o /dev/null
}

@test "s1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21000 s1
}

@test "s2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21001 s2
}

@test "s3-v1 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21002 s3
}

@test "s3-v2 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21003 s3
}

@test "s3 proxy listener should be up and have right cert" {
assert_proxy_presents_cert_uri localhost:21004 s3
}

@test "s2 proxies should be healthy" {
assert_service_has_healthy_instances s2 1
}

@test "s3 proxies should be healthy" {
assert_service_has_healthy_instances s3 3
}

# Note: when failover is configured the cluster is named for the original
# service not any destination related to failover.
@test "s1 upstream should have healthy endpoints for s2 and s3 together" {
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary HEALTHY 1
}

@test "s1 upstream should be able to connect to s2 via upstream s2 to start" {
assert_expected_fortio_name s2
}

@test "terminate instance of s2 envoy which should trigger failover to s3 when tcp check fails" {
kill_envoy s2
}

@test "s1 upstream should have healthy endpoints for s3-v1 and unhealthy endpoints for s2" {
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~v1.s3.default.primary HEALTHY 1
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 failover-target~s2.default.primary UNHEALTHY 1
}

@test "s1 upstream should be able to connect to s3-v1 now" {
assert_expected_fortio_name s3-v1
}
48 changes: 24 additions & 24 deletions test/integration/connect/envoy/case-grpc/service_s1.hcl
Original file line number Diff line number Diff line change
@@ -1,25 +1,25 @@
services {
name = "s1"
port = 8079
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "grpc"
}
}
]
config {
protocol = "grpc"
envoy_dogstatsd_url = "udp://127.0.0.1:8125"
envoy_stats_tags = ["foo=bar"]
envoy_stats_flush_interval = "5s"
}
}
}
}
services {
name = "s1"
port = 8079
connect {
sidecar_service {
proxy {
upstreams = [
{
destination_name = "s2"
local_bind_port = 5000
config {
protocol = "grpc"
}
}
]
config {
protocol = "grpc"
envoy_dogstatsd_url = "udp://127.0.0.1:8125"
envoy_stats_tags = ["foo=bar"]
envoy_stats_flush_interval = "5s"
}
}
}
}
}
Loading

0 comments on commit b9bae6f

Please sign in to comment.