Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: optimize minor issues #1496

Merged
merged 8 commits into from
Mar 12, 2024
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions src/analytic_engine/src/manifest/details.rs
Original file line number Diff line number Diff line change
Expand Up @@ -376,8 +376,7 @@ pub struct Options {
pub scan_timeout: ReadableDuration,

/// Batch size to read manifest entries
// TODO: use NonZeroUsize
pub scan_batch_size: usize,
pub scan_batch_size: NonZeroUsize,

/// Timeout to store manifest entries
pub store_timeout: ReadableDuration,
Expand All @@ -388,7 +387,7 @@ impl Default for Options {
Self {
snapshot_every_n_updates: NonZeroUsize::new(100).unwrap(),
scan_timeout: ReadableDuration::secs(5),
scan_batch_size: 100,
scan_batch_size: NonZeroUsize::new(100).unwrap(),
store_timeout: ReadableDuration::secs(5),
}
}
Expand Down Expand Up @@ -690,7 +689,7 @@ impl MetaUpdateLogStore for WalBasedLogStore {
async fn scan(&self, start: ReadBoundary) -> Result<Self::Iter> {
let ctx = ReadContext {
timeout: self.opts.scan_timeout.0,
batch_size: self.opts.scan_batch_size,
batch_size: self.opts.scan_batch_size.into(),
};

let read_req = ReadRequest {
Expand Down
9 changes: 4 additions & 5 deletions src/components/parquet_ext/src/prune/min_max.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,9 @@ fn filter_row_groups_inner(
row_groups: &[RowGroupMetaData],
) -> Vec<bool> {
let mut results = vec![true; row_groups.len()];
let execution_props = ExecutionProps::new();
for expr in exprs {
match logical2physical(expr, &schema)
match logical2physical(expr, &schema, &execution_props)
.and_then(|physical_expr| PruningPredicate::try_new(physical_expr, schema.clone()))
{
Ok(pruning_predicate) => {
Expand All @@ -86,11 +87,9 @@ fn filter_row_groups_inner(
results
}

fn logical2physical(expr: &Expr, schema: &ArrowSchema) -> DataFusionResult<Arc<dyn PhysicalExpr>> {
fn logical2physical(expr: &Expr, schema: &ArrowSchema, execution_props: &ExecutionProps) -> DataFusionResult<Arc<dyn PhysicalExpr>> {
schema.clone().to_dfschema().and_then(|df_schema| {
// TODO: props should be an argument
let execution_props = ExecutionProps::new();
create_physical_expr(expr, &df_schema, schema, &execution_props)
create_physical_expr(expr, &df_schema, schema, execution_props)
})
}

Expand Down
4 changes: 0 additions & 4 deletions src/components/tracing_util/src/logging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,6 @@ pub fn init_tracing_with_file(config: &Config, node_addr: &str, rotation: Rotati
.with_span_events(FmtSpan::ENTER | FmtSpan::CLOSE);

let subscriber = Registry::default().with(f_layer);
// TODO: subscriber.with(layer1) has the different type with
// subscriber.with(layer1).with(layer2)...
// So left some duplicated codes here. Maybe we can use marco to simplify
// it.
match &config.console {
Some(console) => {
let console_addr = format!("{}:{}", node_addr, console.port);
Expand Down
2 changes: 0 additions & 2 deletions src/table_engine/src/partition/rule/key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -237,8 +237,6 @@ fn expand_partition_keys_group<'a>(
for filter_idx in group {
let filter = &filters[*filter_idx];
let datums = match &filter.condition {
// Only `Eq` is supported now.
// TODO: to support `In`'s extracting.
PartitionCondition::Eq(datum) => vec![datum.as_view()],
PartitionCondition::In(datums) => datums.iter().map(Datum::as_view).collect_vec(),
_ => {
Expand Down
Loading