-
Notifications
You must be signed in to change notification settings - Fork 442
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[VL] Add sort merge join metrics #3920
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -352,12 +352,32 @@ class MetricsApiImpl extends MetricsApi with Logging { | |
sparkContext: SparkContext): Map[String, SQLMetric] = | ||
Map( | ||
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"), | ||
"numOutputBatches" -> SQLMetrics.createMetric(sparkContext, "number of output batches"), | ||
"prepareTime" -> SQLMetrics.createTimingMetric(sparkContext, "time to prepare left list"), | ||
"processTime" -> SQLMetrics.createTimingMetric(sparkContext, "time to process"), | ||
"joinTime" -> SQLMetrics.createTimingMetric(sparkContext, "time to merge join"), | ||
"totaltimeSortmergejoin" -> SQLMetrics | ||
.createTimingMetric(sparkContext, "totaltime sortmergejoin") | ||
"numOutputVectors" -> SQLMetrics.createMetric(sparkContext, "number of output vectors"), | ||
"numOutputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of output bytes"), | ||
"wallNanos" -> SQLMetrics.createNanoTimingMetric(sparkContext, "totaltime of merge join"), | ||
"cpuCount" -> SQLMetrics.createMetric(sparkContext, "cpu wall time count"), | ||
"peakMemoryBytes" -> SQLMetrics.createSizeMetric(sparkContext, "peak memory bytes"), | ||
"numMemoryAllocations" -> SQLMetrics.createMetric( | ||
sparkContext, | ||
"number of memory allocations"), | ||
"streamPreProjectionCpuCount" -> SQLMetrics.createMetric( | ||
sparkContext, | ||
"stream preProject cpu wall time count"), | ||
"streamPreProjectionWallNanos" -> SQLMetrics.createNanoTimingMetric( | ||
sparkContext, | ||
"totaltime of stream preProjection"), | ||
"bufferPreProjectionCpuCount" -> SQLMetrics.createMetric( | ||
sparkContext, | ||
"buffer preProject cpu wall time count"), | ||
"bufferPreProjectionWallNanos" -> SQLMetrics.createNanoTimingMetric( | ||
sparkContext, | ||
"totaltime of buffer preProjection"), | ||
"postProjectionCpuCount" -> SQLMetrics.createMetric( | ||
sparkContext, | ||
"postProject cpu wall time count"), | ||
"postProjectionWallNanos" -> SQLMetrics.createNanoTimingMetric( | ||
sparkContext, | ||
"totaltime of postProjection") | ||
) | ||
|
||
override def genSortMergeJoinTransformerMetricsUpdater( | ||
|
@@ -476,20 +496,13 @@ class MetricsApiImpl extends MetricsApi with Logging { | |
"postProjectionWallNanos" -> SQLMetrics.createNanoTimingMetric( | ||
sparkContext, | ||
"totaltime of postProjection"), | ||
"postProjectionOutputRows" -> SQLMetrics.createMetric( | ||
sparkContext, | ||
"number of postProjection output rows"), | ||
"postProjectionOutputVectors" -> SQLMetrics.createMetric( | ||
sparkContext, | ||
"number of postProjection output vectors"), | ||
"finalOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of final output rows"), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it's nerver used before, and replaced to numOutputRows in this pr |
||
"finalOutputVectors" -> SQLMetrics.createMetric( | ||
sparkContext, | ||
"number of final output vectors") | ||
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"), | ||
"numOutputVectors" -> SQLMetrics.createMetric(sparkContext, "number of output vectors"), | ||
"numOutputBytes" -> SQLMetrics.createSizeMetric(sparkContext, "number of output bytes") | ||
) | ||
|
||
override def genHashJoinTransformerMetricsUpdater( | ||
metrics: Map[String, SQLMetric]): MetricsUpdater = new HashJoinMetricsUpdaterImpl(metrics) | ||
metrics: Map[String, SQLMetric]): MetricsUpdater = new HashJoinMetricsUpdater(metrics) | ||
|
||
override def genGenerateTransformerMetrics(sparkContext: SparkContext): Map[String, SQLMetric] = { | ||
Map("numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows")) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
package io.glutenproject.execution | ||
|
||
import io.glutenproject.GlutenConfig | ||
|
||
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper | ||
import org.apache.spark.sql.internal.SQLConf | ||
|
||
class VeloxMetricsSuite extends VeloxWholeStageTransformerSuite with AdaptiveSparkPlanHelper { | ||
override protected val backend: String = "velox" | ||
override protected val resourcePath: String = "/tpch-data-parquet-velox" | ||
override protected val fileFormat: String = "parquet" | ||
|
||
override def beforeAll(): Unit = { | ||
super.beforeAll() | ||
|
||
spark | ||
.range(100) | ||
.selectExpr("id as c1", "id % 3 as c2") | ||
.write | ||
.format("parquet") | ||
.saveAsTable("metrics_t1") | ||
|
||
spark | ||
.range(200) | ||
.selectExpr("id as c1", "id % 3 as c2") | ||
.write | ||
.format("parquet") | ||
.saveAsTable("metrics_t2") | ||
} | ||
|
||
override protected def afterAll(): Unit = { | ||
spark.sql("drop table metrics_t1") | ||
spark.sql("drop table metrics_t2") | ||
|
||
super.afterAll() | ||
} | ||
|
||
test("test sort merge join metrics") { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @ulysses-you Do we need to add the test with post project? And also add the hash join test for both with pre project and post project? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Join always has the post project. The numOutputRows/numOutputVectors metrics are from post project. |
||
withSQLConf( | ||
GlutenConfig.COLUMNAR_FPRCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "false", | ||
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { | ||
// without preproject | ||
runQueryAndCompare( | ||
"SELECT * FROM metrics_t1 join metrics_t2 on metrics_t1.c1 = metrics_t2.c1" | ||
) { | ||
df => | ||
val smj = find(df.queryExecution.executedPlan) { | ||
case _: SortMergeJoinExecTransformer => true | ||
case _ => false | ||
} | ||
assert(smj.isDefined) | ||
val metrics = smj.get.metrics | ||
assert(metrics("numOutputRows").value == 100) | ||
assert(metrics("numOutputVectors").value > 0) | ||
assert(metrics("numOutputBytes").value > 0) | ||
} | ||
|
||
// with preproject | ||
runQueryAndCompare( | ||
"SELECT * FROM metrics_t1 join metrics_t2 on metrics_t1.c1 + 1 = metrics_t2.c1 + 1" | ||
) { | ||
df => | ||
val smj = find(df.queryExecution.executedPlan) { | ||
case _: SortMergeJoinExecTransformer => true | ||
case _ => false | ||
} | ||
assert(smj.isDefined) | ||
val metrics = smj.get.metrics | ||
assert(metrics("numOutputRows").value == 100) | ||
assert(metrics("numOutputVectors").value > 0) | ||
assert(metrics("streamPreProjectionCpuCount").value > 0) | ||
assert(metrics("bufferPreProjectionCpuCount").value > 0) | ||
} | ||
} | ||
} | ||
} |
This file was deleted.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@ulysses-you What are the differences between stream and buffer? How can they be mapped to the left and right in a sort merge join?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Merge join only supports inner and left outer, so the steam side is always left child and the buffer side is always right child. It's used to show the metrics pre-project for stream/buffer side.