Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions datafusion/datasource-parquet/src/opener.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
//! [`ParquetOpener`] for opening Parquet files

use crate::page_filter::PagePruningAccessPlanFilter;
use crate::row_filter::build_projection_read_plan;
use crate::row_group_filter::RowGroupAccessPlanFilter;
use crate::{
ParquetAccessPlan, ParquetFileMetrics, ParquetFileReaderFactory,
Expand Down Expand Up @@ -65,7 +66,7 @@ use parquet::arrow::arrow_reader::{
};
use parquet::arrow::async_reader::AsyncFileReader;
use parquet::arrow::push_decoder::{ParquetPushDecoder, ParquetPushDecoderBuilder};
use parquet::arrow::{ParquetRecordBatchStreamBuilder, ProjectionMask};
use parquet::arrow::ParquetRecordBatchStreamBuilder;
use parquet::file::metadata::{PageIndexPolicy, ParquetMetaDataReader};

/// Implements [`FileOpener`] for a parquet file
Expand Down Expand Up @@ -572,12 +573,14 @@ impl FileOpener for ParquetOpener {
// metrics from the arrow reader itself
let arrow_reader_metrics = ArrowReaderMetrics::enabled();

let indices = projection.column_indices();
let mask =
ProjectionMask::roots(reader_metadata.parquet_schema(), indices.clone());
let read_plan = build_projection_read_plan(
projection.expr_iter(),
&physical_file_schema,
reader_metadata.parquet_schema(),
);

let decoder = builder
.with_projection(mask)
.with_projection(read_plan.projection_mask)
.with_metrics(arrow_reader_metrics.clone())
.build()?;

Expand All @@ -590,7 +593,7 @@ impl FileOpener for ParquetOpener {
// Rebase column indices to match the narrowed stream schema.
// The projection expressions have indices based on physical_file_schema,
// but the stream only contains the columns selected by the ProjectionMask.
let stream_schema = Arc::new(physical_file_schema.project(&indices)?);
let stream_schema = read_plan.projected_schema;
let replace_schema = stream_schema != output_schema;
let projection = projection
.try_map_exprs(|expr| reassign_expr_columns(expr, &stream_schema))?;
Expand Down
53 changes: 53 additions & 0 deletions datafusion/datasource-parquet/src/row_filter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -579,6 +579,59 @@ pub(crate) fn build_parquet_read_plan(
)))
}

/// Builds a unified [`ParquetReadPlan`] for a set of projection expressions
///
/// Unlike [`build_parquet_read_plan`] (which is used for filter pushdown and
/// returns `None` when an expression references unsupported nested types or
/// missing columns), this function always succeeds. It collects every column
/// that *can* be resolved in the file and produces a leaf-level projection
/// mask. Columns missing from the file are silently skipped since the projection
/// layer handles those by inserting nulls.
pub(crate) fn build_projection_read_plan(
exprs: impl IntoIterator<Item = Arc<dyn PhysicalExpr>>,
file_schema: &Schema,
schema_descr: &SchemaDescriptor,
) -> ParquetReadPlan {
let mut all_root_indices = Vec::new();
let mut all_struct_accesses = Vec::new();

for expr in exprs {
let mut checker = PushdownChecker::new(file_schema, true);
let _ = expr.visit(&mut checker);
let columns = checker.into_sorted_columns();

all_root_indices.extend_from_slice(&columns.required_columns);
all_struct_accesses.extend(columns.struct_field_accesses);
}

all_root_indices.sort_unstable();
all_root_indices.dedup();

let leaf_indices = {
let mut out =
leaf_indices_for_roots(all_root_indices.iter().copied(), schema_descr);
let struct_leaf_indices =
resolve_struct_field_leaves(&all_struct_accesses, file_schema, schema_descr);

out.extend_from_slice(&struct_leaf_indices);
out.sort_unstable();
out.dedup();

out
};

let projection_mask =
ProjectionMask::leaves(schema_descr, leaf_indices.iter().copied());

let projected_schema =
build_filter_schema(file_schema, &all_root_indices, &all_struct_accesses);

ParquetReadPlan {
projection_mask,
projected_schema,
}
}

fn leaf_indices_for_roots<I>(
root_indices: I,
schema_descr: &SchemaDescriptor,
Expand Down
Loading