Skip to content

Commit

Permalink
Remove BoxedAsyncFileReader (#4150)
Browse files Browse the repository at this point in the history
  • Loading branch information
tustvold authored Nov 9, 2022
1 parent a32fb65 commit 0f18e76
Showing 1 changed file with 3 additions and 37 deletions.
40 changes: 3 additions & 37 deletions datafusion/core/src/physical_plan/file_format/parquet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -373,13 +373,13 @@ impl FileOpener for ParquetOpener {
&self.metrics,
);

let reader =
BoxedAsyncFileReader(self.parquet_file_reader_factory.create_reader(
let reader: Box<dyn AsyncFileReader> =
self.parquet_file_reader_factory.create_reader(
self.partition_index,
file_meta,
self.metadata_size_hint,
&self.metrics,
)?);
)?;

let schema_adapter = SchemaAdapter::new(self.table_schema.clone());
let batch_size = self.batch_size;
Expand Down Expand Up @@ -598,40 +598,6 @@ impl ParquetFileReaderFactory for DefaultParquetFileReaderFactory {
}
}

///
/// BoxedAsyncFileReader has been created to satisfy type requirements of
/// parquet stream builder constructor.
///
/// Temporary pending https://github.com/apache/arrow-rs/pull/2368
struct BoxedAsyncFileReader(Box<dyn AsyncFileReader + Send>);

impl AsyncFileReader for BoxedAsyncFileReader {
fn get_bytes(
&mut self,
range: Range<usize>,
) -> BoxFuture<'_, ::parquet::errors::Result<Bytes>> {
self.0.get_bytes(range)
}

fn get_byte_ranges(
&mut self,
ranges: Vec<Range<usize>>,
) -> BoxFuture<'_, parquet::errors::Result<Vec<Bytes>>>
// TODO: This where bound forces us to enable #![allow(where_clauses_object_safety)] (#3081)
// Upstream issue https://github.com/apache/arrow-rs/issues/2372
where
Self: Send,
{
self.0.get_byte_ranges(ranges)
}

fn get_metadata(
&mut self,
) -> BoxFuture<'_, ::parquet::errors::Result<Arc<ParquetMetaData>>> {
self.0.get_metadata()
}
}

/// Wraps parquet statistics in a way
/// that implements [`PruningStatistics`]
struct RowGroupPruningStatistics<'a> {
Expand Down

0 comments on commit 0f18e76

Please sign in to comment.