-
Notifications
You must be signed in to change notification settings - Fork 1.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: Expose Parquet Schema Adapter #10515
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,8 +27,8 @@ use crate::datasource::physical_plan::file_stream::{ | |
FileOpenFuture, FileOpener, FileStream, | ||
}; | ||
use crate::datasource::physical_plan::{ | ||
parquet::page_filter::PagePruningPredicate, DisplayAs, FileGroupPartitioner, | ||
FileMeta, FileScanConfig, SchemaAdapter, | ||
parquet::page_filter::PagePruningPredicate, DefaultSchemaAdapterFactory, DisplayAs, | ||
FileGroupPartitioner, FileMeta, FileScanConfig, | ||
}; | ||
use crate::{ | ||
config::{ConfigOptions, TableParquetOptions}, | ||
|
@@ -67,9 +67,11 @@ mod metrics; | |
mod page_filter; | ||
mod row_filter; | ||
mod row_groups; | ||
mod schema_adapter; | ||
mod statistics; | ||
|
||
pub use metrics::ParquetFileMetrics; | ||
pub use schema_adapter::{SchemaAdapter, SchemaAdapterFactory, SchemaMapper}; | ||
|
||
/// Execution plan for scanning one or more Parquet partitions | ||
#[derive(Debug, Clone)] | ||
|
@@ -93,6 +95,8 @@ pub struct ParquetExec { | |
cache: PlanProperties, | ||
/// Options for reading Parquet files | ||
table_parquet_options: TableParquetOptions, | ||
/// Optional user defined schema adapter | ||
schema_adapter_factory: Option<Arc<dyn SchemaAdapterFactory>>, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it is about time (not this PR) to create a |
||
} | ||
|
||
impl ParquetExec { | ||
|
@@ -157,6 +161,7 @@ impl ParquetExec { | |
parquet_file_reader_factory: None, | ||
cache, | ||
table_parquet_options, | ||
schema_adapter_factory: None, | ||
} | ||
} | ||
|
||
|
@@ -195,6 +200,19 @@ impl ParquetExec { | |
self | ||
} | ||
|
||
/// Optional schema adapter factory. | ||
/// | ||
/// `SchemaAdapterFactory` allows user to specify how fields from the parquet file get mapped to | ||
/// that of the table schema. The default schema adapter uses arrow's cast library to map | ||
/// the parquet fields to the table schema. | ||
pub fn with_schema_adapter_factory( | ||
mut self, | ||
schema_adapter_factory: Arc<dyn SchemaAdapterFactory>, | ||
) -> Self { | ||
self.schema_adapter_factory = Some(schema_adapter_factory); | ||
self | ||
} | ||
|
||
/// If true, any filter [`Expr`]s on the scan will converted to a | ||
/// [`RowFilter`](parquet::arrow::arrow_reader::RowFilter) in the | ||
/// `ParquetRecordBatchStream`. These filters are applied by the | ||
|
@@ -402,6 +420,11 @@ impl ExecutionPlan for ParquetExec { | |
}) | ||
})?; | ||
|
||
let schema_adapter_factory = self | ||
.schema_adapter_factory | ||
.clone() | ||
.unwrap_or_else(|| Arc::new(DefaultSchemaAdapterFactory::default())); | ||
|
||
let opener = ParquetOpener { | ||
partition_index, | ||
projection: Arc::from(projection), | ||
|
@@ -418,6 +441,7 @@ impl ExecutionPlan for ParquetExec { | |
reorder_filters: self.reorder_filters(), | ||
enable_page_index: self.enable_page_index(), | ||
enable_bloom_filter: self.bloom_filter_on_read(), | ||
schema_adapter_factory, | ||
}; | ||
|
||
let stream = | ||
|
@@ -452,6 +476,7 @@ struct ParquetOpener { | |
reorder_filters: bool, | ||
enable_page_index: bool, | ||
enable_bloom_filter: bool, | ||
schema_adapter_factory: Arc<dyn SchemaAdapterFactory>, | ||
} | ||
|
||
impl FileOpener for ParquetOpener { | ||
|
@@ -475,7 +500,7 @@ impl FileOpener for ParquetOpener { | |
let batch_size = self.batch_size; | ||
let projection = self.projection.clone(); | ||
let projected_schema = SchemaRef::from(self.table_schema.project(&projection)?); | ||
let schema_adapter = SchemaAdapter::new(projected_schema); | ||
let schema_adapter = self.schema_adapter_factory.create(projected_schema); | ||
let predicate = self.predicate.clone(); | ||
let pruning_predicate = self.pruning_predicate.clone(); | ||
let page_pruning_predicate = self.page_pruning_predicate.clone(); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
// Licensed to the Apache Software Foundation (ASF) under one | ||
// or more contributor license agreements. See the NOTICE file | ||
// distributed with this work for additional information | ||
// regarding copyright ownership. The ASF licenses this file | ||
// to you under the Apache License, Version 2.0 (the | ||
// "License"); you may not use this file except in compliance | ||
// with the License. You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, | ||
// software distributed under the License is distributed on an | ||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
// KIND, either express or implied. See the License for the | ||
// specific language governing permissions and limitations | ||
// under the License. | ||
|
||
use arrow_array::RecordBatch; | ||
use arrow_schema::{Schema, SchemaRef}; | ||
use std::fmt::Debug; | ||
use std::sync::Arc; | ||
|
||
/// Factory of schema adapters. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I know that currently the only user of SchemaAdapter is parquet, but I don't think there is anything parquet specific about the logic here. What do you think about moving the code (and default impl) somewhere like
? Perhaps we could do that as a follow on PR as the way you have done this PR makes it easy to see what you have changed / not changed 👍 |
||
/// | ||
/// Provides means to implement custom schema adaptation. | ||
pub trait SchemaAdapterFactory: Debug + Send + Sync + 'static { | ||
/// Provides `SchemaAdapter` for the ParquetExec. | ||
fn create(&self, schema: SchemaRef) -> Box<dyn SchemaAdapter>; | ||
} | ||
|
||
/// A utility which can adapt file-level record batches to a table schema which may have a schema | ||
/// obtained from merging multiple file-level schemas. | ||
/// | ||
/// This is useful for enabling schema evolution in partitioned datasets. | ||
/// | ||
/// This has to be done in two stages. | ||
/// | ||
/// 1. Before reading the file, we have to map projected column indexes from the table schema to | ||
/// the file schema. | ||
/// | ||
/// 2. After reading a record batch we need to map the read columns back to the expected columns | ||
/// indexes and insert null-valued columns wherever the file schema was missing a colum present | ||
/// in the table schema. | ||
pub trait SchemaAdapter: Send + Sync { | ||
/// Map a column index in the table schema to a column index in a particular | ||
/// file schema | ||
/// | ||
/// Panics if index is not in range for the table schema | ||
fn map_column_index(&self, index: usize, file_schema: &Schema) -> Option<usize>; | ||
|
||
/// Creates a `SchemaMapping` that can be used to cast or map the columns from the file schema to the table schema. | ||
/// | ||
/// If the provided `file_schema` contains columns of a different type to the expected | ||
/// `table_schema`, the method will attempt to cast the array data from the file schema | ||
/// to the table schema where possible. | ||
/// | ||
/// Returns a [`SchemaMapper`] that can be applied to the output batch | ||
/// along with an ordered list of columns to project from the file | ||
fn map_schema( | ||
&self, | ||
file_schema: &Schema, | ||
) -> datafusion_common::Result<(Arc<dyn SchemaMapper>, Vec<usize>)>; | ||
} | ||
|
||
/// Transforms a RecordBatch from Parquet to a RecordBatch that meets the table schema. | ||
pub trait SchemaMapper: Send + Sync { | ||
/// Adapts a `RecordBatch` to match the `table_schema` using the stored mapping and conversions. | ||
fn map_batch(&self, batch: RecordBatch) -> datafusion_common::Result<RecordBatch>; | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It is unfortunate that we have to add the
all over the place
Maybe as a follow on PR we can pull this code into its own module (e.g.
datasource/schema_adaptor.rs
for example)