Skip to content

Commit

Permalink
Merge branch 'main' into add-sqrt
Browse files Browse the repository at this point in the history
  • Loading branch information
lyang24 authored Apr 6, 2023
2 parents 03bdb01 + 8e0bf7a commit 42a71e9
Show file tree
Hide file tree
Showing 116 changed files with 3,142 additions and 1,083 deletions.
15 changes: 14 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions ci/scripts/e2e-sink-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ export PGPASSWORD=postgres
psql -h db -U postgres -c "CREATE ROLE test LOGIN SUPERUSER PASSWORD 'connector';"
createdb -h db -U postgres test
psql -h db -U postgres -d test -c "CREATE TABLE t4 (v1 int PRIMARY KEY, v2 int);"
psql -h db -U postgres -d test -c "create table t5 (v1 smallint primary key, v2 int, v3 bigint, v4 float4, v5 float8, v6 decimal, v7 varchar, v8 timestamp, v9 boolean);"
psql -h db -U postgres -d test < ./e2e_test/sink/remote/pg_create_table.sql

node_port=50051
Expand Down Expand Up @@ -98,6 +99,7 @@ echo "--- testing sinks"
sqllogictest -p 4566 -d dev './e2e_test/sink/append_only_sink.slt'
sqllogictest -p 4566 -d dev './e2e_test/sink/create_sink_as.slt'
sqllogictest -p 4566 -d dev './e2e_test/sink/blackhole_sink.slt'
sqllogictest -p 4566 -d dev './e2e_test/sink/remote/types.slt'
sleep 1

# check sink destination postgres
Expand Down
4 changes: 2 additions & 2 deletions ci/workflows/pull-request.yml
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ steps:
config: ci/docker-compose.yml
mount-buildkite-agent: true
- ./ci/plugins/upload-failure-logs
timeout_in_minutes: 6
timeout_in_minutes: 7

- label: "regress test"
command: "ci/scripts/regress-test.sh -p ci-dev"
Expand Down Expand Up @@ -288,7 +288,7 @@ steps:
# files: "*-junit.xml"
# format: "junit"
- ./ci/plugins/upload-failure-logs
timeout_in_minutes: 18
timeout_in_minutes: 25

- label: "misc check"
command: "ci/scripts/misc-check.sh"
Expand Down
2 changes: 1 addition & 1 deletion docker/dashboards/risingwave-dev-dashboard.json

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion docker/dashboards/risingwave-user-dashboard.json

Large diffs are not rendered by default.

50 changes: 50 additions & 0 deletions e2e_test/batch/functions/trim.slt.part
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,51 @@ select trim(trailing 'cba' from 'abcxyzabc');
----
abcxyz

query T
select trim('cba' from 'abcxyzabc');
----
xyz

query T
select trim(both from ' xyz ');
----
xyz

query T
select trim(from ' xyz ');
----
xyz

query T
select trim(both from 'abcxyzabc', 'cba');
----
xyz

query T
select trim(both 'abcxyzabc', 'cba');
----
xyz

query T
select trim(from 'abcxyzabc', 'cba');
----
xyz

query T
select trim('abcxyzabc', 'cba');
----
xyz

query T
select trim(both ' xyz ');
----
xyz

query T
select trim(' xyz ');
----
xyz

query T
select ltrim('abcxyzabc', 'bca');
----
Expand All @@ -22,3 +67,8 @@ query T
select rtrim('abcxyzabc', 'bca');
----
abcxyz

query T
select btrim('abcxyzabc', 'bca');
----
xyz
30 changes: 30 additions & 0 deletions e2e_test/sink/remote/types.slt
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
statement ok
create table t5 (v1 smallint primary key, v2 int, v3 bigint, v4 float, v5 double, v6 decimal, v7 varchar, v8 timestamp, v9 boolean);

statement ok
create sink s from t5 with (
connector = 'jdbc',
jdbc.url='jdbc:postgresql://db:5432/test?user=test&password=connector',
table.name = 't5',
type = 'upsert'
);

statement ok
drop sink s;

statement ok
drop table t5;

statement ok
create table t6 (v1 smallint primary key, v2 int, v3 bigint, v4 float, v5 double, v6 decimal, v7 varchar, v8 timestamp, v9 boolean, v10 date, v11 struct<v12 time, v13 timestamptz>, v14 varchar[]);

statement error
create sink s from t6 with (
connector = 'jdbc',
jdbc.url='jdbc:postgresql://db:5432/test?user=test&password=connector',
table.name = 't6',
type = 'upsert'
);

statement ok
drop table t6;
76 changes: 69 additions & 7 deletions grafana/risingwave-dev-dashboard.dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
panels = Panels(datasource)
logging.basicConfig(level=logging.WARN)


def section_cluster_node(panels):
return [
panels.row("Cluster Node"),
Expand Down Expand Up @@ -65,6 +66,49 @@ def section_cluster_node(panels):
]


def section_recovery_node(panels):
return [
panels.row("Recovery"),
panels.timeseries_ops(
"Recovery Successful Rate",
"The rate of successful recovery attempts",
[
panels.target(f"sum(rate({metric('recovery_latency_count')}[$__rate_interval])) by (instance)",
"{{instance}}")
],
["last"],
),
panels.timeseries_count(
"Failed recovery attempts",
"Total number of failed reocovery attempts",
[
panels.target(f"sum({metric('recovery_failure_cnt')}) by (instance)",
"{{instance}}")
],
["last"],
),
panels.timeseries_latency(
"Recovery latency",
"Time spent in a successful recovery attempt",
[
*quantile(
lambda quantile, legend: panels.target(
f"histogram_quantile({quantile}, sum(rate({metric('recovery_latency_bucket')}[$__rate_interval])) by (le, instance))",
f"recovery latency p{legend}" +
" - {{instance}}",
),
[50, 90, 99, "max"],
),
panels.target(
f"sum by (le) (rate({metric('recovery_latency_sum')}[$__rate_interval])) / sum by (le) (rate({metric('recovery_latency_count')}[$__rate_interval]))",
"recovery latency avg",
),
],
["last"],
)
]


def section_compaction(outer_panels):
panels = outer_panels.sub_panel()
return [
Expand Down Expand Up @@ -330,7 +374,7 @@ def section_compaction(outer_panels):
],
),

panels.timeseries_count(
panels.timeseries_count(
"Hummock Sstable Stat",
"Avg count gotten from sstable_distinct_epoch_count, for observing sstable_distinct_epoch_count",
[
Expand All @@ -344,7 +388,7 @@ def section_compaction(outer_panels):
panels.timeseries_latency(
"Hummock Remote Read Duration",
"Total time of operations which read from remote storage when enable prefetch",
[
[
*quantile(
lambda quantile, legend: panels.target(
f"histogram_quantile({quantile}, sum(rate({metric('state_store_remote_read_time_per_task_bucket')}[$__rate_interval])) by (le, job, instance, table_id))",
Expand Down Expand Up @@ -501,7 +545,7 @@ def section_object_storage(outer_panels):
"Estimated S3 Cost (Monthly)",
"This metric uses the total size of data in S3 at this second to derive the cost of storing data "
"for a whole month. The price is 0.023 USD per GB. Please checkout AWS's pricing model for more "
"accurate calculation.",
"accurate calculation.",
[
panels.target(
f"sum({metric('storage_level_total_file_size')}) by (instance) * 0.023 / 1000 / 1000",
Expand Down Expand Up @@ -571,6 +615,16 @@ def section_streaming(panels):
)
]
),
panels.timeseries_count(
"Source Upstream Status",
"Monitor each source upstream, 0 means the upstream is not normal, 1 means the source is ready.",
[
panels.target(
f"{metric('source_status_is_up')}",
"source_id={{source_id}}, source_name={{source_name}} @ {{instance}}"
)
]
),
panels.timeseries_rowsps(
"Backfill Snapshot Read Throughput(rows)",
"Total number of rows that have been read from the backfill snapshot",
Expand Down Expand Up @@ -1153,6 +1207,7 @@ def section_batch_exchange(outer_panels):
),
]


def section_frontend(outer_panels):
panels = outer_panels.sub_panel()
return [
Expand Down Expand Up @@ -1184,7 +1239,7 @@ def section_frontend(outer_panels):
"",
[
panels.target(f"{metric('distributed_running_query_num')}",
"The number of running query in distributed execution mode"),
"The number of running query in distributed execution mode"),
],
["last"],
),
Expand All @@ -1193,7 +1248,7 @@ def section_frontend(outer_panels):
"",
[
panels.target(f"{metric('distributed_rejected_query_counter')}",
"The number of rejected query in distributed execution mode"),
"The number of rejected query in distributed execution mode"),
],
["last"],
),
Expand All @@ -1202,7 +1257,7 @@ def section_frontend(outer_panels):
"",
[
panels.target(f"{metric('distributed_completed_query_counter')}",
"The number of completed query in distributed execution mode"),
"The number of completed query in distributed execution mode"),
],
["last"],
),
Expand Down Expand Up @@ -1745,6 +1800,7 @@ def section_hummock_tiered_cache(outer_panels):
)
]


def section_hummock_manager(outer_panels):
panels = outer_panels.sub_panel()
total_key_size_filter = "metric='total_key_size'"
Expand Down Expand Up @@ -1891,6 +1947,7 @@ def section_hummock_manager(outer_panels):
)
]


def section_backup_manager(outer_panels):
panels = outer_panels.sub_panel()
return [
Expand All @@ -1916,7 +1973,7 @@ def section_backup_manager(outer_panels):
f"histogram_quantile({quantile}, sum(rate({metric('backup_job_latency_bucket')}[$__rate_interval])) by (le, state))",
f"Job Process Time p{legend}" +
" - {{state}}",
),
),
[50, 99, 999, "max"],
),
],
Expand All @@ -1925,6 +1982,7 @@ def section_backup_manager(outer_panels):
)
]


def grpc_metrics_target(panels, name, filter):
return panels.timeseries_latency_small(
f"{name} latency",
Expand Down Expand Up @@ -2166,6 +2224,7 @@ def section_grpc_hummock_meta_client(outer_panels):
),
]


def section_memory_manager(outer_panels):
panels = outer_panels.sub_panel()
return [
Expand Down Expand Up @@ -2236,6 +2295,7 @@ def section_memory_manager(outer_panels):
),
]


def section_connector_node(outer_panels):
panels = outer_panels.sub_panel()
return [
Expand All @@ -2256,6 +2316,7 @@ def section_connector_node(outer_panels):
)
]


templating = Templating()
if namespace_filter_enabled:
templating = Templating(
Expand Down Expand Up @@ -2295,6 +2356,7 @@ def section_connector_node(outer_panels):
version=dashboard_version,
panels=[
*section_cluster_node(panels),
*section_recovery_node(panels),
*section_streaming(panels),
*section_streaming_actors(panels),
*section_streaming_exchange(panels),
Expand Down
2 changes: 1 addition & 1 deletion grafana/risingwave-dev-dashboard.json

Large diffs are not rendered by default.

Loading

0 comments on commit 42a71e9

Please sign in to comment.