should be it
This commit is contained in:
16
external/duckdb/extension/parquet/reader/CMakeLists.txt
vendored
Normal file
16
external/duckdb/extension/parquet/reader/CMakeLists.txt
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
add_library_unity(
|
||||
duckdb_parquet_readers
|
||||
OBJECT
|
||||
decimal_column_reader.cpp
|
||||
expression_column_reader.cpp
|
||||
list_column_reader.cpp
|
||||
row_number_column_reader.cpp
|
||||
string_column_reader.cpp
|
||||
struct_column_reader.cpp
|
||||
variant_column_reader.cpp)
|
||||
|
||||
add_subdirectory(variant)
|
||||
|
||||
set(PARQUET_EXTENSION_FILES
|
||||
${PARQUET_EXTENSION_FILES} $<TARGET_OBJECTS:duckdb_parquet_readers>
|
||||
PARENT_SCOPE)
|
||||
56
external/duckdb/extension/parquet/reader/decimal_column_reader.cpp
vendored
Normal file
56
external/duckdb/extension/parquet/reader/decimal_column_reader.cpp
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
#include "reader/decimal_column_reader.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
template <bool FIXED>
|
||||
static unique_ptr<ColumnReader> CreateDecimalReaderInternal(ParquetReader &reader, const ParquetColumnSchema &schema) {
|
||||
switch (schema.type.InternalType()) {
|
||||
case PhysicalType::INT16:
|
||||
return make_uniq<DecimalColumnReader<int16_t, FIXED>>(reader, schema);
|
||||
case PhysicalType::INT32:
|
||||
return make_uniq<DecimalColumnReader<int32_t, FIXED>>(reader, schema);
|
||||
case PhysicalType::INT64:
|
||||
return make_uniq<DecimalColumnReader<int64_t, FIXED>>(reader, schema);
|
||||
case PhysicalType::INT128:
|
||||
return make_uniq<DecimalColumnReader<hugeint_t, FIXED>>(reader, schema);
|
||||
case PhysicalType::DOUBLE:
|
||||
return make_uniq<DecimalColumnReader<double, FIXED>>(reader, schema);
|
||||
default:
|
||||
throw InternalException("Unrecognized type for Decimal");
|
||||
}
|
||||
}
|
||||
|
||||
template <>
|
||||
double ParquetDecimalUtils::ReadDecimalValue(const_data_ptr_t pointer, idx_t size,
|
||||
const ParquetColumnSchema &schema_ele) {
|
||||
double res = 0;
|
||||
bool positive = (*pointer & 0x80) == 0;
|
||||
for (idx_t i = 0; i < size; i += 8) {
|
||||
auto byte_size = MinValue<idx_t>(sizeof(uint64_t), size - i);
|
||||
uint64_t input = 0;
|
||||
auto res_ptr = reinterpret_cast<uint8_t *>(&input);
|
||||
for (idx_t k = 0; k < byte_size; k++) {
|
||||
auto byte = pointer[i + k];
|
||||
res_ptr[sizeof(uint64_t) - k - 1] = positive ? byte : byte ^ 0xFF;
|
||||
}
|
||||
res *= double(NumericLimits<uint64_t>::Maximum()) + 1;
|
||||
res += static_cast<double>(input);
|
||||
}
|
||||
if (!positive) {
|
||||
res += 1;
|
||||
res /= pow(10, schema_ele.type_scale);
|
||||
return -res;
|
||||
}
|
||||
res /= pow(10, schema_ele.type_scale);
|
||||
return res;
|
||||
}
|
||||
|
||||
unique_ptr<ColumnReader> ParquetDecimalUtils::CreateReader(ParquetReader &reader, const ParquetColumnSchema &schema) {
|
||||
if (schema.parquet_type == Type::FIXED_LEN_BYTE_ARRAY) {
|
||||
return CreateDecimalReaderInternal<true>(reader, schema);
|
||||
} else {
|
||||
return CreateDecimalReaderInternal<false>(reader, schema);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
50
external/duckdb/extension/parquet/reader/expression_column_reader.cpp
vendored
Normal file
50
external/duckdb/extension/parquet/reader/expression_column_reader.cpp
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
#include "reader/expression_column_reader.hpp"
|
||||
#include "parquet_reader.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Expression Column Reader
|
||||
//===--------------------------------------------------------------------===//
|
||||
ExpressionColumnReader::ExpressionColumnReader(ClientContext &context, unique_ptr<ColumnReader> child_reader_p,
|
||||
unique_ptr<Expression> expr_p, const ParquetColumnSchema &schema_p)
|
||||
: ColumnReader(child_reader_p->Reader(), schema_p), child_reader(std::move(child_reader_p)),
|
||||
expr(std::move(expr_p)), executor(context, expr.get()) {
|
||||
vector<LogicalType> intermediate_types {child_reader->Type()};
|
||||
intermediate_chunk.Initialize(reader.allocator, intermediate_types);
|
||||
}
|
||||
|
||||
ExpressionColumnReader::ExpressionColumnReader(ClientContext &context, unique_ptr<ColumnReader> child_reader_p,
|
||||
unique_ptr<Expression> expr_p,
|
||||
unique_ptr<ParquetColumnSchema> owned_schema_p)
|
||||
: ColumnReader(child_reader_p->Reader(), *owned_schema_p), child_reader(std::move(child_reader_p)),
|
||||
expr(std::move(expr_p)), executor(context, expr.get()), owned_schema(std::move(owned_schema_p)) {
|
||||
vector<LogicalType> intermediate_types {child_reader->Type()};
|
||||
intermediate_chunk.Initialize(reader.allocator, intermediate_types);
|
||||
}
|
||||
|
||||
void ExpressionColumnReader::InitializeRead(idx_t row_group_idx_p, const vector<ColumnChunk> &columns,
|
||||
TProtocol &protocol_p) {
|
||||
child_reader->InitializeRead(row_group_idx_p, columns, protocol_p);
|
||||
}
|
||||
|
||||
idx_t ExpressionColumnReader::Read(uint64_t num_values, data_ptr_t define_out, data_ptr_t repeat_out, Vector &result) {
|
||||
intermediate_chunk.Reset();
|
||||
auto &intermediate_vector = intermediate_chunk.data[0];
|
||||
|
||||
auto amount = child_reader->Read(num_values, define_out, repeat_out, intermediate_vector);
|
||||
// Execute the expression
|
||||
intermediate_chunk.SetCardinality(amount);
|
||||
executor.ExecuteExpression(intermediate_chunk, result);
|
||||
return amount;
|
||||
}
|
||||
|
||||
void ExpressionColumnReader::Skip(idx_t num_values) {
|
||||
child_reader->Skip(num_values);
|
||||
}
|
||||
|
||||
idx_t ExpressionColumnReader::GroupRowsAvailable() {
|
||||
return child_reader->GroupRowsAvailable();
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
190
external/duckdb/extension/parquet/reader/list_column_reader.cpp
vendored
Normal file
190
external/duckdb/extension/parquet/reader/list_column_reader.cpp
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
#include "reader/list_column_reader.hpp"
|
||||
#include "parquet_reader.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
struct ListReaderData {
|
||||
ListReaderData(list_entry_t *result_ptr, ValidityMask &result_mask)
|
||||
: result_ptr(result_ptr), result_mask(result_mask) {
|
||||
}
|
||||
|
||||
list_entry_t *result_ptr;
|
||||
ValidityMask &result_mask;
|
||||
};
|
||||
|
||||
struct TemplatedListReader {
|
||||
using DATA = ListReaderData;
|
||||
|
||||
static DATA Initialize(optional_ptr<Vector> result_out) {
|
||||
D_ASSERT(ListVector::GetListSize(*result_out) == 0);
|
||||
|
||||
auto result_ptr = FlatVector::GetData<list_entry_t>(*result_out);
|
||||
auto &result_mask = FlatVector::Validity(*result_out);
|
||||
return ListReaderData(result_ptr, result_mask);
|
||||
}
|
||||
|
||||
static idx_t GetOffset(optional_ptr<Vector> result_out) {
|
||||
return ListVector::GetListSize(*result_out);
|
||||
}
|
||||
|
||||
static void HandleRepeat(DATA &data, idx_t offset) {
|
||||
data.result_ptr[offset].length++;
|
||||
}
|
||||
|
||||
static void HandleListStart(DATA &data, idx_t offset, idx_t offset_in_child, idx_t length) {
|
||||
data.result_ptr[offset].offset = offset_in_child;
|
||||
data.result_ptr[offset].length = length;
|
||||
}
|
||||
|
||||
static void HandleNull(DATA &data, idx_t offset) {
|
||||
data.result_mask.SetInvalid(offset);
|
||||
data.result_ptr[offset].offset = 0;
|
||||
data.result_ptr[offset].length = 0;
|
||||
}
|
||||
|
||||
static void AppendVector(optional_ptr<Vector> result_out, Vector &read_vector, idx_t child_idx) {
|
||||
ListVector::Append(*result_out, read_vector, child_idx);
|
||||
}
|
||||
};
|
||||
|
||||
struct TemplatedListSkipper {
|
||||
using DATA = bool;
|
||||
|
||||
static DATA Initialize(optional_ptr<Vector>) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static idx_t GetOffset(optional_ptr<Vector>) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void HandleRepeat(DATA &, idx_t) {
|
||||
}
|
||||
|
||||
static void HandleListStart(DATA &, idx_t, idx_t, idx_t) {
|
||||
}
|
||||
|
||||
static void HandleNull(DATA &, idx_t) {
|
||||
}
|
||||
|
||||
static void AppendVector(optional_ptr<Vector>, Vector &, idx_t) {
|
||||
}
|
||||
};
|
||||
|
||||
template <class OP>
|
||||
idx_t ListColumnReader::ReadInternal(uint64_t num_values, data_ptr_t define_out, data_ptr_t repeat_out,
|
||||
optional_ptr<Vector> result_out) {
|
||||
idx_t result_offset = 0;
|
||||
auto data = OP::Initialize(result_out);
|
||||
|
||||
// if an individual list is longer than STANDARD_VECTOR_SIZE we actually have to loop the child read to fill it
|
||||
bool finished = false;
|
||||
while (!finished) {
|
||||
idx_t child_actual_num_values = 0;
|
||||
|
||||
// check if we have any overflow from a previous read
|
||||
if (overflow_child_count == 0) {
|
||||
// we don't: read elements from the child reader
|
||||
child_defines.zero();
|
||||
child_repeats.zero();
|
||||
// we don't know in advance how many values to read because of the beautiful repetition/definition setup
|
||||
// we just read (up to) a vector from the child column, and see if we have read enough
|
||||
// if we have not read enough, we read another vector
|
||||
// if we have read enough, we leave any unhandled elements in the overflow vector for a subsequent read
|
||||
auto child_req_num_values =
|
||||
MinValue<idx_t>(STANDARD_VECTOR_SIZE, child_column_reader->GroupRowsAvailable());
|
||||
read_vector.ResetFromCache(read_cache);
|
||||
child_actual_num_values =
|
||||
child_column_reader->Read(child_req_num_values, child_defines_ptr, child_repeats_ptr, read_vector);
|
||||
} else {
|
||||
// we do: use the overflow values
|
||||
child_actual_num_values = overflow_child_count;
|
||||
overflow_child_count = 0;
|
||||
}
|
||||
|
||||
if (child_actual_num_values == 0) {
|
||||
// no more elements available: we are done
|
||||
break;
|
||||
}
|
||||
read_vector.Verify(child_actual_num_values);
|
||||
idx_t current_chunk_offset = OP::GetOffset(result_out);
|
||||
|
||||
// hard-won piece of code this, modify at your own risk
|
||||
// the intuition is that we have to only collapse values into lists that are repeated *on this level*
|
||||
// the rest is pretty much handed up as-is as a single-valued list or NULL
|
||||
idx_t child_idx;
|
||||
for (child_idx = 0; child_idx < child_actual_num_values; child_idx++) {
|
||||
if (child_repeats_ptr[child_idx] == MaxRepeat()) {
|
||||
// value repeats on this level, append
|
||||
D_ASSERT(result_offset > 0);
|
||||
OP::HandleRepeat(data, result_offset - 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (result_offset >= num_values) {
|
||||
// we ran out of output space
|
||||
finished = true;
|
||||
break;
|
||||
}
|
||||
if (child_defines_ptr[child_idx] >= MaxDefine()) {
|
||||
// value has been defined down the stack, hence its NOT NULL
|
||||
OP::HandleListStart(data, result_offset, child_idx + current_chunk_offset, 1);
|
||||
} else if (child_defines_ptr[child_idx] == MaxDefine() - 1) {
|
||||
// empty list
|
||||
OP::HandleListStart(data, result_offset, child_idx + current_chunk_offset, 0);
|
||||
} else {
|
||||
// value is NULL somewhere up the stack
|
||||
OP::HandleNull(data, result_offset);
|
||||
}
|
||||
|
||||
if (repeat_out) {
|
||||
repeat_out[result_offset] = child_repeats_ptr[child_idx];
|
||||
}
|
||||
if (define_out) {
|
||||
define_out[result_offset] = child_defines_ptr[child_idx];
|
||||
}
|
||||
|
||||
result_offset++;
|
||||
}
|
||||
// actually append the required elements to the child list
|
||||
OP::AppendVector(result_out, read_vector, child_idx);
|
||||
|
||||
// we have read more values from the child reader than we can fit into the result for this read
|
||||
// we have to pass everything from child_idx to child_actual_num_values into the next call
|
||||
if (child_idx < child_actual_num_values && result_offset == num_values) {
|
||||
read_vector.Slice(read_vector, child_idx, child_actual_num_values);
|
||||
overflow_child_count = child_actual_num_values - child_idx;
|
||||
read_vector.Verify(overflow_child_count);
|
||||
|
||||
// move values in the child repeats and defines *backward* by child_idx
|
||||
for (idx_t repdef_idx = 0; repdef_idx < overflow_child_count; repdef_idx++) {
|
||||
child_defines_ptr[repdef_idx] = child_defines_ptr[child_idx + repdef_idx];
|
||||
child_repeats_ptr[repdef_idx] = child_repeats_ptr[child_idx + repdef_idx];
|
||||
}
|
||||
}
|
||||
}
|
||||
return result_offset;
|
||||
}
|
||||
|
||||
idx_t ListColumnReader::Read(uint64_t num_values, data_ptr_t define_out, data_ptr_t repeat_out, Vector &result_out) {
|
||||
ApplyPendingSkips(define_out, repeat_out);
|
||||
return ReadInternal<TemplatedListReader>(num_values, define_out, repeat_out, result_out);
|
||||
}
|
||||
|
||||
ListColumnReader::ListColumnReader(ParquetReader &reader, const ParquetColumnSchema &schema,
|
||||
unique_ptr<ColumnReader> child_column_reader_p)
|
||||
: ColumnReader(reader, schema), child_column_reader(std::move(child_column_reader_p)),
|
||||
read_cache(reader.allocator, ListType::GetChildType(Type())), read_vector(read_cache), overflow_child_count(0) {
|
||||
|
||||
child_defines.resize(reader.allocator, STANDARD_VECTOR_SIZE);
|
||||
child_repeats.resize(reader.allocator, STANDARD_VECTOR_SIZE);
|
||||
child_defines_ptr = (uint8_t *)child_defines.ptr;
|
||||
child_repeats_ptr = (uint8_t *)child_repeats.ptr;
|
||||
}
|
||||
|
||||
void ListColumnReader::ApplyPendingSkips(data_ptr_t define_out, data_ptr_t repeat_out) {
|
||||
ReadInternal<TemplatedListSkipper>(pending_skips, nullptr, nullptr, nullptr);
|
||||
pending_skips = 0;
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
46
external/duckdb/extension/parquet/reader/row_number_column_reader.cpp
vendored
Normal file
46
external/duckdb/extension/parquet/reader/row_number_column_reader.cpp
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
#include "reader/row_number_column_reader.hpp"
|
||||
#include "parquet_reader.hpp"
|
||||
#include "duckdb/storage/table/row_group.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Row NumberColumn Reader
|
||||
//===--------------------------------------------------------------------===//
|
||||
RowNumberColumnReader::RowNumberColumnReader(ParquetReader &reader, const ParquetColumnSchema &schema)
|
||||
: ColumnReader(reader, schema) {
|
||||
}
|
||||
|
||||
void RowNumberColumnReader::InitializeRead(idx_t row_group_idx_p, const vector<ColumnChunk> &columns,
|
||||
TProtocol &protocol_p) {
|
||||
row_group_offset = 0;
|
||||
auto &row_groups = reader.GetFileMetadata()->row_groups;
|
||||
for (idx_t i = 0; i < row_group_idx_p; i++) {
|
||||
row_group_offset += row_groups[i].num_rows;
|
||||
}
|
||||
}
|
||||
|
||||
void RowNumberColumnReader::Filter(uint64_t num_values, data_ptr_t define_out, data_ptr_t repeat_out,
|
||||
Vector &result_out, const TableFilter &filter, TableFilterState &filter_state,
|
||||
SelectionVector &sel, idx_t &approved_tuple_count, bool is_first_filter) {
|
||||
// check the row id stats if this filter has any chance of passing
|
||||
auto prune_result = RowGroup::CheckRowIdFilter(filter, row_group_offset, row_group_offset + num_values);
|
||||
if (prune_result == FilterPropagateResult::FILTER_ALWAYS_FALSE) {
|
||||
// filter is always false - don't read anything
|
||||
approved_tuple_count = 0;
|
||||
Skip(num_values);
|
||||
return;
|
||||
}
|
||||
ColumnReader::Filter(num_values, define_out, repeat_out, result_out, filter, filter_state, sel,
|
||||
approved_tuple_count, is_first_filter);
|
||||
}
|
||||
|
||||
idx_t RowNumberColumnReader::Read(uint64_t num_values, data_ptr_t define_out, data_ptr_t repeat_out, Vector &result) {
|
||||
auto data_ptr = FlatVector::GetData<int64_t>(result);
|
||||
for (idx_t i = 0; i < num_values; i++) {
|
||||
data_ptr[i] = UnsafeNumericCast<int64_t>(row_group_offset++);
|
||||
}
|
||||
return num_values;
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
81
external/duckdb/extension/parquet/reader/string_column_reader.cpp
vendored
Normal file
81
external/duckdb/extension/parquet/reader/string_column_reader.cpp
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
#include "reader/string_column_reader.hpp"
|
||||
#include "utf8proc_wrapper.hpp"
|
||||
#include "parquet_reader.hpp"
|
||||
#include "duckdb/common/types/blob.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// String Column Reader
|
||||
//===--------------------------------------------------------------------===//
|
||||
StringColumnReader::StringColumnReader(ParquetReader &reader, const ParquetColumnSchema &schema)
|
||||
: ColumnReader(reader, schema), string_column_type(GetStringColumnType(Type())) {
|
||||
fixed_width_string_length = 0;
|
||||
if (schema.parquet_type == Type::FIXED_LEN_BYTE_ARRAY) {
|
||||
fixed_width_string_length = schema.type_length;
|
||||
}
|
||||
}
|
||||
|
||||
void StringColumnReader::VerifyString(const char *str_data, uint32_t str_len, const bool is_varchar) {
|
||||
if (!is_varchar) {
|
||||
return;
|
||||
}
|
||||
// verify if a string is actually UTF8, and if there are no null bytes in the middle of the string
|
||||
// technically Parquet should guarantee this, but reality is often disappointing
|
||||
UnicodeInvalidReason reason;
|
||||
size_t pos;
|
||||
auto utf_type = Utf8Proc::Analyze(str_data, str_len, &reason, &pos);
|
||||
if (utf_type == UnicodeType::INVALID) {
|
||||
throw InvalidInputException("Invalid string encoding found in Parquet file: value \"%s\" is not valid UTF8!",
|
||||
Blob::ToString(string_t(str_data, str_len)));
|
||||
}
|
||||
}
|
||||
|
||||
void StringColumnReader::VerifyString(const char *str_data, uint32_t str_len) {
|
||||
switch (string_column_type) {
|
||||
case StringColumnType::VARCHAR:
|
||||
VerifyString(str_data, str_len, true);
|
||||
break;
|
||||
case StringColumnType::JSON: {
|
||||
const auto error = StringUtil::ValidateJSON(str_data, str_len);
|
||||
if (!error.empty()) {
|
||||
throw InvalidInputException("Invalid JSON found in Parquet file: %s", error);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
class ParquetStringVectorBuffer : public VectorBuffer {
|
||||
public:
|
||||
explicit ParquetStringVectorBuffer(shared_ptr<ResizeableBuffer> buffer_p)
|
||||
: VectorBuffer(VectorBufferType::OPAQUE_BUFFER), buffer(std::move(buffer_p)) {
|
||||
}
|
||||
|
||||
private:
|
||||
shared_ptr<ResizeableBuffer> buffer;
|
||||
};
|
||||
|
||||
void StringColumnReader::ReferenceBlock(Vector &result, shared_ptr<ResizeableBuffer> &block) {
|
||||
StringVector::AddBuffer(result, make_buffer<ParquetStringVectorBuffer>(block));
|
||||
}
|
||||
|
||||
void StringColumnReader::Plain(shared_ptr<ResizeableBuffer> &plain_data, uint8_t *defines, idx_t num_values,
|
||||
idx_t result_offset, Vector &result) {
|
||||
ReferenceBlock(result, plain_data);
|
||||
PlainTemplated<string_t, StringParquetValueConversion>(*plain_data, defines, num_values, result_offset, result);
|
||||
}
|
||||
|
||||
void StringColumnReader::PlainSkip(ByteBuffer &plain_data, uint8_t *defines, idx_t num_values) {
|
||||
PlainSkipTemplated<StringParquetValueConversion>(plain_data, defines, num_values);
|
||||
}
|
||||
|
||||
void StringColumnReader::PlainSelect(shared_ptr<ResizeableBuffer> &plain_data, uint8_t *defines, idx_t num_values,
|
||||
Vector &result, const SelectionVector &sel, idx_t count) {
|
||||
ReferenceBlock(result, plain_data);
|
||||
PlainSelectTemplated<string_t, StringParquetValueConversion>(*plain_data, defines, num_values, result, sel, count);
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
138
external/duckdb/extension/parquet/reader/struct_column_reader.cpp
vendored
Normal file
138
external/duckdb/extension/parquet/reader/struct_column_reader.cpp
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
#include "reader/struct_column_reader.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Struct Column Reader
|
||||
//===--------------------------------------------------------------------===//
|
||||
StructColumnReader::StructColumnReader(ParquetReader &reader, const ParquetColumnSchema &schema,
|
||||
vector<unique_ptr<ColumnReader>> child_readers_p)
|
||||
: ColumnReader(reader, schema), child_readers(std::move(child_readers_p)) {
|
||||
D_ASSERT(Type().InternalType() == PhysicalType::STRUCT);
|
||||
}
|
||||
|
||||
ColumnReader &StructColumnReader::GetChildReader(idx_t child_idx) {
|
||||
if (!child_readers[child_idx]) {
|
||||
throw InternalException("StructColumnReader::GetChildReader(%d) - but this child reader is not set", child_idx);
|
||||
}
|
||||
return *child_readers[child_idx].get();
|
||||
}
|
||||
|
||||
void StructColumnReader::InitializeRead(idx_t row_group_idx_p, const vector<ColumnChunk> &columns,
|
||||
TProtocol &protocol_p) {
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
child->InitializeRead(row_group_idx_p, columns, protocol_p);
|
||||
}
|
||||
}
|
||||
|
||||
idx_t StructColumnReader::Read(uint64_t num_values, data_ptr_t define_out, data_ptr_t repeat_out, Vector &result) {
|
||||
auto &struct_entries = StructVector::GetEntries(result);
|
||||
D_ASSERT(StructType::GetChildTypes(Type()).size() == struct_entries.size());
|
||||
|
||||
if (pending_skips > 0) {
|
||||
throw InternalException("StructColumnReader cannot have pending skips");
|
||||
}
|
||||
|
||||
// If the child reader values are all valid, "define_out" may not be initialized at all
|
||||
// So, we just initialize them to all be valid beforehand
|
||||
std::fill_n(define_out, num_values, MaxDefine());
|
||||
|
||||
optional_idx read_count;
|
||||
for (idx_t i = 0; i < child_readers.size(); i++) {
|
||||
auto &child = child_readers[i];
|
||||
auto &target_vector = *struct_entries[i];
|
||||
if (!child) {
|
||||
// if we are not scanning this vector - set it to NULL
|
||||
target_vector.SetVectorType(VectorType::CONSTANT_VECTOR);
|
||||
ConstantVector::SetNull(target_vector, true);
|
||||
continue;
|
||||
}
|
||||
auto child_num_values = child->Read(num_values, define_out, repeat_out, target_vector);
|
||||
if (!read_count.IsValid()) {
|
||||
read_count = child_num_values;
|
||||
} else if (read_count.GetIndex() != child_num_values) {
|
||||
throw std::runtime_error("Struct child row count mismatch");
|
||||
}
|
||||
}
|
||||
if (!read_count.IsValid()) {
|
||||
read_count = num_values;
|
||||
}
|
||||
// set the validity mask for this level
|
||||
auto &validity = FlatVector::Validity(result);
|
||||
for (idx_t i = 0; i < read_count.GetIndex(); i++) {
|
||||
if (define_out[i] < MaxDefine()) {
|
||||
validity.SetInvalid(i);
|
||||
}
|
||||
}
|
||||
|
||||
return read_count.GetIndex();
|
||||
}
|
||||
|
||||
void StructColumnReader::Skip(idx_t num_values) {
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
child->Skip(num_values);
|
||||
}
|
||||
}
|
||||
|
||||
void StructColumnReader::RegisterPrefetch(ThriftFileTransport &transport, bool allow_merge) {
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
child->RegisterPrefetch(transport, allow_merge);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t StructColumnReader::TotalCompressedSize() {
|
||||
uint64_t size = 0;
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
size += child->TotalCompressedSize();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static bool TypeHasExactRowCount(const LogicalType &type) {
|
||||
switch (type.id()) {
|
||||
case LogicalTypeId::LIST:
|
||||
case LogicalTypeId::MAP:
|
||||
return false;
|
||||
case LogicalTypeId::STRUCT:
|
||||
for (auto &kv : StructType::GetChildTypes(type)) {
|
||||
if (TypeHasExactRowCount(kv.second)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
idx_t StructColumnReader::GroupRowsAvailable() {
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
if (TypeHasExactRowCount(child->Type())) {
|
||||
return child->GroupRowsAvailable();
|
||||
}
|
||||
}
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
return child->GroupRowsAvailable();
|
||||
}
|
||||
throw InternalException("No projected columns in struct?");
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
7
external/duckdb/extension/parquet/reader/variant/CMakeLists.txt
vendored
Normal file
7
external/duckdb/extension/parquet/reader/variant/CMakeLists.txt
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
add_library_unity(
|
||||
duckdb_parquet_reader_variant OBJECT variant_binary_decoder.cpp
|
||||
variant_value.cpp variant_shredded_conversion.cpp)
|
||||
|
||||
set(PARQUET_EXTENSION_FILES
|
||||
${PARQUET_EXTENSION_FILES} $<TARGET_OBJECTS:duckdb_parquet_reader_variant>
|
||||
PARENT_SCOPE)
|
||||
365
external/duckdb/extension/parquet/reader/variant/variant_binary_decoder.cpp
vendored
Normal file
365
external/duckdb/extension/parquet/reader/variant/variant_binary_decoder.cpp
vendored
Normal file
@@ -0,0 +1,365 @@
|
||||
#include "reader/variant/variant_binary_decoder.hpp"
|
||||
#include "duckdb/common/printer.hpp"
|
||||
#include "utf8proc_wrapper.hpp"
|
||||
|
||||
#include "reader/uuid_column_reader.hpp"
|
||||
|
||||
#include "duckdb/common/types/timestamp.hpp"
|
||||
#include "duckdb/common/types/decimal.hpp"
|
||||
#include "duckdb/common/types/uuid.hpp"
|
||||
#include "duckdb/common/types/time.hpp"
|
||||
#include "duckdb/common/types/date.hpp"
|
||||
#include "duckdb/common/types/blob.hpp"
|
||||
|
||||
static constexpr uint8_t VERSION_MASK = 0xF;
|
||||
static constexpr uint8_t SORTED_STRINGS_MASK = 0x1;
|
||||
static constexpr uint8_t SORTED_STRINGS_SHIFT = 4;
|
||||
static constexpr uint8_t OFFSET_SIZE_MINUS_ONE_MASK = 0x3;
|
||||
static constexpr uint8_t OFFSET_SIZE_MINUS_ONE_SHIFT = 6;
|
||||
|
||||
static constexpr uint8_t BASIC_TYPE_MASK = 0x3;
|
||||
static constexpr uint8_t VALUE_HEADER_SHIFT = 2;
|
||||
|
||||
//! Object and Array header
|
||||
static constexpr uint8_t FIELD_OFFSET_SIZE_MINUS_ONE_MASK = 0x3;
|
||||
|
||||
//! Object header
|
||||
static constexpr uint8_t FIELD_ID_SIZE_MINUS_ONE_MASK = 0x3;
|
||||
static constexpr uint8_t FIELD_ID_SIZE_MINUS_ONE_SHIFT = 2;
|
||||
|
||||
static constexpr uint8_t OBJECT_IS_LARGE_MASK = 0x1;
|
||||
static constexpr uint8_t OBJECT_IS_LARGE_SHIFT = 4;
|
||||
|
||||
//! Array header
|
||||
static constexpr uint8_t ARRAY_IS_LARGE_MASK = 0x1;
|
||||
static constexpr uint8_t ARRAY_IS_LARGE_SHIFT = 2;
|
||||
|
||||
using namespace duckdb_yyjson;
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
namespace {
|
||||
|
||||
static idx_t ReadVariableLengthLittleEndian(idx_t length_in_bytes, const_data_ptr_t &ptr) {
|
||||
if (length_in_bytes > sizeof(idx_t)) {
|
||||
throw NotImplementedException("Can't read little-endian value of %d bytes", length_in_bytes);
|
||||
}
|
||||
idx_t result = 0;
|
||||
memcpy(reinterpret_cast<uint8_t *>(&result), ptr, length_in_bytes);
|
||||
ptr += length_in_bytes;
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
VariantMetadataHeader VariantMetadataHeader::FromHeaderByte(uint8_t byte) {
|
||||
VariantMetadataHeader header;
|
||||
header.version = byte & VERSION_MASK;
|
||||
header.sorted_strings = (byte >> SORTED_STRINGS_SHIFT) & SORTED_STRINGS_MASK;
|
||||
header.offset_size = ((byte >> OFFSET_SIZE_MINUS_ONE_SHIFT) & OFFSET_SIZE_MINUS_ONE_MASK) + 1;
|
||||
|
||||
if (header.version != 1) {
|
||||
throw NotImplementedException("Only version 1 of the Variant encoding scheme is supported, found version: %d",
|
||||
header.version);
|
||||
}
|
||||
|
||||
return header;
|
||||
}
|
||||
|
||||
VariantMetadata::VariantMetadata(const string_t &metadata) : metadata(metadata) {
|
||||
auto metadata_data = metadata.GetData();
|
||||
|
||||
header = VariantMetadataHeader::FromHeaderByte(metadata_data[0]);
|
||||
|
||||
const_data_ptr_t ptr = reinterpret_cast<const_data_ptr_t>(metadata_data + sizeof(uint8_t));
|
||||
idx_t dictionary_size = ReadVariableLengthLittleEndian(header.offset_size, ptr);
|
||||
|
||||
auto offsets = ptr;
|
||||
auto bytes = offsets + ((dictionary_size + 1) * header.offset_size);
|
||||
idx_t last_offset = ReadVariableLengthLittleEndian(header.offset_size, ptr);
|
||||
for (idx_t i = 0; i < dictionary_size; i++) {
|
||||
auto next_offset = ReadVariableLengthLittleEndian(header.offset_size, ptr);
|
||||
strings.emplace_back(reinterpret_cast<const char *>(bytes + last_offset), next_offset - last_offset);
|
||||
last_offset = next_offset;
|
||||
}
|
||||
}
|
||||
|
||||
VariantValueMetadata VariantValueMetadata::FromHeaderByte(uint8_t byte) {
|
||||
VariantValueMetadata result;
|
||||
result.basic_type = VariantBasicTypeFromByte(byte & BASIC_TYPE_MASK);
|
||||
uint8_t value_header = byte >> VALUE_HEADER_SHIFT;
|
||||
switch (result.basic_type) {
|
||||
case VariantBasicType::PRIMITIVE: {
|
||||
result.primitive_type = VariantPrimitiveTypeFromByte(value_header);
|
||||
break;
|
||||
}
|
||||
case VariantBasicType::SHORT_STRING: {
|
||||
result.string_size = value_header;
|
||||
break;
|
||||
}
|
||||
case VariantBasicType::OBJECT: {
|
||||
result.field_offset_size = (value_header & FIELD_OFFSET_SIZE_MINUS_ONE_MASK) + 1;
|
||||
result.field_id_size = ((value_header >> FIELD_ID_SIZE_MINUS_ONE_SHIFT) & FIELD_ID_SIZE_MINUS_ONE_MASK) + 1;
|
||||
result.is_large = (value_header >> OBJECT_IS_LARGE_SHIFT) & OBJECT_IS_LARGE_MASK;
|
||||
break;
|
||||
}
|
||||
case VariantBasicType::ARRAY: {
|
||||
result.field_offset_size = (value_header & FIELD_OFFSET_SIZE_MINUS_ONE_MASK) + 1;
|
||||
result.is_large = (value_header >> ARRAY_IS_LARGE_SHIFT) & ARRAY_IS_LARGE_MASK;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw InternalException("VariantBasicType (%d) not handled", static_cast<uint8_t>(result.basic_type));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static T DecodeDecimal(const_data_ptr_t data, uint8_t &scale, uint8_t &width) {
|
||||
scale = Load<uint8_t>(data);
|
||||
data++;
|
||||
|
||||
auto result = Load<T>(data);
|
||||
//! FIXME: The spec says:
|
||||
//! The implied precision of a decimal value is `floor(log_10(val)) + 1`
|
||||
width = DecimalWidth<T>::max;
|
||||
return result;
|
||||
}
|
||||
|
||||
template <>
|
||||
hugeint_t DecodeDecimal(const_data_ptr_t data, uint8_t &scale, uint8_t &width) {
|
||||
scale = Load<uint8_t>(data);
|
||||
data++;
|
||||
|
||||
hugeint_t result;
|
||||
result.lower = Load<uint64_t>(data);
|
||||
result.upper = Load<int64_t>(data + sizeof(uint64_t));
|
||||
//! FIXME: The spec says:
|
||||
//! The implied precision of a decimal value is `floor(log_10(val)) + 1`
|
||||
width = DecimalWidth<hugeint_t>::max;
|
||||
return result;
|
||||
}
|
||||
|
||||
VariantValue VariantBinaryDecoder::PrimitiveTypeDecode(const VariantValueMetadata &value_metadata,
|
||||
const_data_ptr_t data) {
|
||||
switch (value_metadata.primitive_type) {
|
||||
case VariantPrimitiveType::NULL_TYPE: {
|
||||
return VariantValue(Value());
|
||||
}
|
||||
case VariantPrimitiveType::BOOLEAN_TRUE: {
|
||||
return VariantValue(Value::BOOLEAN(true));
|
||||
}
|
||||
case VariantPrimitiveType::BOOLEAN_FALSE: {
|
||||
return VariantValue(Value::BOOLEAN(false));
|
||||
}
|
||||
case VariantPrimitiveType::INT8: {
|
||||
auto value = Load<int8_t>(data);
|
||||
return VariantValue(Value::TINYINT(value));
|
||||
}
|
||||
case VariantPrimitiveType::INT16: {
|
||||
auto value = Load<int16_t>(data);
|
||||
return VariantValue(Value::SMALLINT(value));
|
||||
}
|
||||
case VariantPrimitiveType::INT32: {
|
||||
auto value = Load<int32_t>(data);
|
||||
return VariantValue(Value::INTEGER(value));
|
||||
}
|
||||
case VariantPrimitiveType::INT64: {
|
||||
auto value = Load<int64_t>(data);
|
||||
return VariantValue(Value::BIGINT(value));
|
||||
}
|
||||
case VariantPrimitiveType::DOUBLE: {
|
||||
double value = Load<double>(data);
|
||||
return VariantValue(Value::DOUBLE(value));
|
||||
}
|
||||
case VariantPrimitiveType::FLOAT: {
|
||||
float value = Load<float>(data);
|
||||
return VariantValue(Value::FLOAT(value));
|
||||
}
|
||||
case VariantPrimitiveType::DECIMAL4: {
|
||||
uint8_t scale;
|
||||
uint8_t width;
|
||||
|
||||
auto value = DecodeDecimal<int32_t>(data, scale, width);
|
||||
auto value_str = Decimal::ToString(value, width, scale);
|
||||
return VariantValue(Value(value_str));
|
||||
}
|
||||
case VariantPrimitiveType::DECIMAL8: {
|
||||
uint8_t scale;
|
||||
uint8_t width;
|
||||
|
||||
auto value = DecodeDecimal<int64_t>(data, scale, width);
|
||||
auto value_str = Decimal::ToString(value, width, scale);
|
||||
return VariantValue(Value(value_str));
|
||||
}
|
||||
case VariantPrimitiveType::DECIMAL16: {
|
||||
uint8_t scale;
|
||||
uint8_t width;
|
||||
|
||||
auto value = DecodeDecimal<hugeint_t>(data, scale, width);
|
||||
auto value_str = Decimal::ToString(value, width, scale);
|
||||
return VariantValue(Value(value_str));
|
||||
}
|
||||
case VariantPrimitiveType::DATE: {
|
||||
date_t value;
|
||||
value.days = Load<int32_t>(data);
|
||||
return VariantValue(Value::DATE(value));
|
||||
}
|
||||
case VariantPrimitiveType::TIMESTAMP_MICROS: {
|
||||
timestamp_tz_t micros_ts_tz;
|
||||
micros_ts_tz.value = Load<int64_t>(data);
|
||||
return VariantValue(Value::TIMESTAMPTZ(micros_ts_tz));
|
||||
}
|
||||
case VariantPrimitiveType::TIMESTAMP_NTZ_MICROS: {
|
||||
timestamp_t micros_ts;
|
||||
micros_ts.value = Load<int64_t>(data);
|
||||
|
||||
auto value = Value::TIMESTAMP(micros_ts);
|
||||
auto value_str = value.ToString();
|
||||
return VariantValue(Value(value_str));
|
||||
}
|
||||
case VariantPrimitiveType::BINARY: {
|
||||
//! Follow the JSON serialization guide by converting BINARY to Base64:
|
||||
//! For example: `"dmFyaWFudAo="`
|
||||
auto size = Load<uint32_t>(data);
|
||||
auto string_data = reinterpret_cast<const char *>(data + sizeof(uint32_t));
|
||||
auto base64_string = Blob::ToBase64(string_t(string_data, size));
|
||||
return VariantValue(Value(base64_string));
|
||||
}
|
||||
case VariantPrimitiveType::STRING: {
|
||||
auto size = Load<uint32_t>(data);
|
||||
auto string_data = reinterpret_cast<const char *>(data + sizeof(uint32_t));
|
||||
if (!Utf8Proc::IsValid(string_data, size)) {
|
||||
throw InternalException("Can't decode Variant short-string, string isn't valid UTF8");
|
||||
}
|
||||
return VariantValue(Value(string(string_data, size)));
|
||||
}
|
||||
case VariantPrimitiveType::TIME_NTZ_MICROS: {
|
||||
dtime_t micros_time;
|
||||
micros_time.micros = Load<int64_t>(data);
|
||||
return VariantValue(Value::TIME(micros_time));
|
||||
}
|
||||
case VariantPrimitiveType::TIMESTAMP_NANOS: {
|
||||
timestamp_ns_t nanos_ts;
|
||||
nanos_ts.value = Load<int64_t>(data);
|
||||
|
||||
//! Convert the nanos timestamp to a micros timestamp (not lossless)
|
||||
auto micros_ts = Timestamp::FromEpochNanoSeconds(nanos_ts.value);
|
||||
return VariantValue(Value::TIMESTAMPTZ(timestamp_tz_t(micros_ts)));
|
||||
}
|
||||
case VariantPrimitiveType::TIMESTAMP_NTZ_NANOS: {
|
||||
timestamp_ns_t nanos_ts;
|
||||
nanos_ts.value = Load<int64_t>(data);
|
||||
|
||||
auto value = Value::TIMESTAMPNS(nanos_ts);
|
||||
auto value_str = value.ToString();
|
||||
return VariantValue(Value(value_str));
|
||||
}
|
||||
case VariantPrimitiveType::UUID: {
|
||||
auto uuid_value = UUIDValueConversion::ReadParquetUUID(data);
|
||||
auto value_str = UUID::ToString(uuid_value);
|
||||
return VariantValue(Value(value_str));
|
||||
}
|
||||
default:
|
||||
throw NotImplementedException("Variant PrimitiveTypeDecode not implemented for type (%d)",
|
||||
static_cast<uint8_t>(value_metadata.primitive_type));
|
||||
}
|
||||
}
|
||||
|
||||
VariantValue VariantBinaryDecoder::ShortStringDecode(const VariantValueMetadata &value_metadata,
|
||||
const_data_ptr_t data) {
|
||||
D_ASSERT(value_metadata.string_size < 64);
|
||||
auto string_data = reinterpret_cast<const char *>(data);
|
||||
if (!Utf8Proc::IsValid(string_data, value_metadata.string_size)) {
|
||||
throw InternalException("Can't decode Variant short-string, string isn't valid UTF8");
|
||||
}
|
||||
return VariantValue(Value(string(string_data, value_metadata.string_size)));
|
||||
}
|
||||
|
||||
VariantValue VariantBinaryDecoder::ObjectDecode(const VariantMetadata &metadata,
|
||||
const VariantValueMetadata &value_metadata, const_data_ptr_t data) {
|
||||
VariantValue ret(VariantValueType::OBJECT);
|
||||
|
||||
auto field_offset_size = value_metadata.field_offset_size;
|
||||
auto field_id_size = value_metadata.field_id_size;
|
||||
auto is_large = value_metadata.is_large;
|
||||
|
||||
idx_t num_elements;
|
||||
if (is_large) {
|
||||
num_elements = Load<uint32_t>(data);
|
||||
data += sizeof(uint32_t);
|
||||
} else {
|
||||
num_elements = Load<uint8_t>(data);
|
||||
data += sizeof(uint8_t);
|
||||
}
|
||||
|
||||
auto field_ids = data;
|
||||
auto field_offsets = data + (num_elements * field_id_size);
|
||||
auto values = field_offsets + ((num_elements + 1) * field_offset_size);
|
||||
|
||||
idx_t last_offset = ReadVariableLengthLittleEndian(field_offset_size, field_offsets);
|
||||
for (idx_t i = 0; i < num_elements; i++) {
|
||||
auto field_id = ReadVariableLengthLittleEndian(field_id_size, field_ids);
|
||||
auto next_offset = ReadVariableLengthLittleEndian(field_offset_size, field_offsets);
|
||||
|
||||
auto value = Decode(metadata, values + last_offset);
|
||||
auto &key = metadata.strings[field_id];
|
||||
|
||||
ret.AddChild(key, std::move(value));
|
||||
last_offset = next_offset;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
VariantValue VariantBinaryDecoder::ArrayDecode(const VariantMetadata &metadata,
|
||||
const VariantValueMetadata &value_metadata, const_data_ptr_t data) {
|
||||
VariantValue ret(VariantValueType::ARRAY);
|
||||
|
||||
auto field_offset_size = value_metadata.field_offset_size;
|
||||
auto is_large = value_metadata.is_large;
|
||||
|
||||
uint32_t num_elements;
|
||||
if (is_large) {
|
||||
num_elements = Load<uint32_t>(data);
|
||||
data += sizeof(uint32_t);
|
||||
} else {
|
||||
num_elements = Load<uint8_t>(data);
|
||||
data += sizeof(uint8_t);
|
||||
}
|
||||
|
||||
auto field_offsets = data;
|
||||
auto values = field_offsets + ((num_elements + 1) * field_offset_size);
|
||||
|
||||
idx_t last_offset = ReadVariableLengthLittleEndian(field_offset_size, field_offsets);
|
||||
for (idx_t i = 0; i < num_elements; i++) {
|
||||
auto next_offset = ReadVariableLengthLittleEndian(field_offset_size, field_offsets);
|
||||
|
||||
ret.AddItem(Decode(metadata, values + last_offset));
|
||||
last_offset = next_offset;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
VariantValue VariantBinaryDecoder::Decode(const VariantMetadata &variant_metadata, const_data_ptr_t data) {
|
||||
auto value_metadata = VariantValueMetadata::FromHeaderByte(data[0]);
|
||||
|
||||
data++;
|
||||
switch (value_metadata.basic_type) {
|
||||
case VariantBasicType::PRIMITIVE: {
|
||||
return PrimitiveTypeDecode(value_metadata, data);
|
||||
}
|
||||
case VariantBasicType::SHORT_STRING: {
|
||||
return ShortStringDecode(value_metadata, data);
|
||||
}
|
||||
case VariantBasicType::OBJECT: {
|
||||
return ObjectDecode(variant_metadata, value_metadata, data);
|
||||
}
|
||||
case VariantBasicType::ARRAY: {
|
||||
return ArrayDecode(variant_metadata, value_metadata, data);
|
||||
}
|
||||
default:
|
||||
throw InternalException("Unexpected value for VariantBasicType");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
577
external/duckdb/extension/parquet/reader/variant/variant_shredded_conversion.cpp
vendored
Normal file
577
external/duckdb/extension/parquet/reader/variant/variant_shredded_conversion.cpp
vendored
Normal file
@@ -0,0 +1,577 @@
|
||||
#include "reader/variant/variant_shredded_conversion.hpp"
|
||||
#include "column_reader.hpp"
|
||||
#include "utf8proc_wrapper.hpp"
|
||||
|
||||
#include "duckdb/common/types/timestamp.hpp"
|
||||
#include "duckdb/common/types/decimal.hpp"
|
||||
#include "duckdb/common/types/uuid.hpp"
|
||||
#include "duckdb/common/types/time.hpp"
|
||||
#include "duckdb/common/types/date.hpp"
|
||||
#include "duckdb/common/types/blob.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
template <class T>
|
||||
struct ConvertShreddedValue {
|
||||
static VariantValue Convert(T val);
|
||||
static VariantValue ConvertDecimal(T val, uint8_t width, uint8_t scale) {
|
||||
throw InternalException("ConvertShreddedValue::ConvertDecimal not implemented for type");
|
||||
}
|
||||
static VariantValue ConvertBlob(T val) {
|
||||
throw InternalException("ConvertShreddedValue::ConvertBlob not implemented for type");
|
||||
}
|
||||
};
|
||||
|
||||
//! boolean
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<bool>::Convert(bool val) {
|
||||
return VariantValue(Value::BOOLEAN(val));
|
||||
}
|
||||
//! int8
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<int8_t>::Convert(int8_t val) {
|
||||
return VariantValue(Value::TINYINT(val));
|
||||
}
|
||||
//! int16
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<int16_t>::Convert(int16_t val) {
|
||||
return VariantValue(Value::SMALLINT(val));
|
||||
}
|
||||
//! int32
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<int32_t>::Convert(int32_t val) {
|
||||
return VariantValue(Value::INTEGER(val));
|
||||
}
|
||||
//! int64
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<int64_t>::Convert(int64_t val) {
|
||||
return VariantValue(Value::BIGINT(val));
|
||||
}
|
||||
//! float
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<float>::Convert(float val) {
|
||||
return VariantValue(Value::FLOAT(val));
|
||||
}
|
||||
//! double
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<double>::Convert(double val) {
|
||||
return VariantValue(Value::DOUBLE(val));
|
||||
}
|
||||
//! decimal4/decimal8/decimal16
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<int32_t>::ConvertDecimal(int32_t val, uint8_t width, uint8_t scale) {
|
||||
auto value_str = Decimal::ToString(val, width, scale);
|
||||
return VariantValue(Value(value_str));
|
||||
}
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<int64_t>::ConvertDecimal(int64_t val, uint8_t width, uint8_t scale) {
|
||||
auto value_str = Decimal::ToString(val, width, scale);
|
||||
return VariantValue(Value(value_str));
|
||||
}
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<hugeint_t>::ConvertDecimal(hugeint_t val, uint8_t width, uint8_t scale) {
|
||||
auto value_str = Decimal::ToString(val, width, scale);
|
||||
return VariantValue(Value(value_str));
|
||||
}
|
||||
//! date
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<date_t>::Convert(date_t val) {
|
||||
return VariantValue(Value::DATE(val));
|
||||
}
|
||||
//! time
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<dtime_t>::Convert(dtime_t val) {
|
||||
return VariantValue(Value::TIME(val));
|
||||
}
|
||||
//! timestamptz(6)
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<timestamp_tz_t>::Convert(timestamp_tz_t val) {
|
||||
return VariantValue(Value::TIMESTAMPTZ(val));
|
||||
}
|
||||
////! timestamptz(9)
|
||||
// template <>
|
||||
// VariantValue ConvertShreddedValue<timestamp_ns_tz_t>::Convert(timestamp_ns_tz_t val) {
|
||||
// return VariantValue(Value::TIMESTAMPNS_TZ(val));
|
||||
//}
|
||||
//! timestampntz(6)
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<timestamp_t>::Convert(timestamp_t val) {
|
||||
return VariantValue(Value::TIMESTAMP(val));
|
||||
}
|
||||
//! timestampntz(9)
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<timestamp_ns_t>::Convert(timestamp_ns_t val) {
|
||||
return VariantValue(Value::TIMESTAMPNS(val));
|
||||
}
|
||||
//! binary
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<string_t>::ConvertBlob(string_t val) {
|
||||
return VariantValue(Value(Blob::ToBase64(val)));
|
||||
}
|
||||
//! string
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<string_t>::Convert(string_t val) {
|
||||
if (!Utf8Proc::IsValid(val.GetData(), val.GetSize())) {
|
||||
throw InternalException("Can't decode Variant string, it isn't valid UTF8");
|
||||
}
|
||||
return VariantValue(Value(val.GetString()));
|
||||
}
|
||||
//! uuid
|
||||
template <>
|
||||
VariantValue ConvertShreddedValue<hugeint_t>::Convert(hugeint_t val) {
|
||||
return VariantValue(Value(UUID::ToString(val)));
|
||||
}
|
||||
|
||||
template <class T, class OP, LogicalTypeId TYPE_ID>
|
||||
vector<VariantValue> ConvertTypedValues(Vector &vec, Vector &metadata, Vector &blob, idx_t offset, idx_t length,
|
||||
idx_t total_size, const bool is_field) {
|
||||
UnifiedVectorFormat metadata_format;
|
||||
metadata.ToUnifiedFormat(length, metadata_format);
|
||||
auto metadata_data = metadata_format.GetData<string_t>(metadata_format);
|
||||
|
||||
UnifiedVectorFormat typed_format;
|
||||
vec.ToUnifiedFormat(total_size, typed_format);
|
||||
auto data = typed_format.GetData<T>(typed_format);
|
||||
|
||||
UnifiedVectorFormat value_format;
|
||||
blob.ToUnifiedFormat(total_size, value_format);
|
||||
auto value_data = value_format.GetData<string_t>(value_format);
|
||||
|
||||
auto &validity = typed_format.validity;
|
||||
auto &value_validity = value_format.validity;
|
||||
auto &type = vec.GetType();
|
||||
|
||||
//! Values only used for Decimal conversion
|
||||
uint8_t width;
|
||||
uint8_t scale;
|
||||
if (TYPE_ID == LogicalTypeId::DECIMAL) {
|
||||
type.GetDecimalProperties(width, scale);
|
||||
}
|
||||
|
||||
vector<VariantValue> ret(length);
|
||||
if (validity.AllValid()) {
|
||||
for (idx_t i = 0; i < length; i++) {
|
||||
auto index = typed_format.sel->get_index(i + offset);
|
||||
if (TYPE_ID == LogicalTypeId::DECIMAL) {
|
||||
ret[i] = OP::ConvertDecimal(data[index], width, scale);
|
||||
} else if (TYPE_ID == LogicalTypeId::BLOB) {
|
||||
ret[i] = OP::ConvertBlob(data[index]);
|
||||
} else {
|
||||
ret[i] = OP::Convert(data[index]);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (idx_t i = 0; i < length; i++) {
|
||||
auto typed_index = typed_format.sel->get_index(i + offset);
|
||||
auto value_index = value_format.sel->get_index(i + offset);
|
||||
if (validity.RowIsValid(typed_index)) {
|
||||
//! This is a leaf, partially shredded values aren't possible here
|
||||
D_ASSERT(!value_validity.RowIsValid(value_index));
|
||||
if (TYPE_ID == LogicalTypeId::DECIMAL) {
|
||||
ret[i] = OP::ConvertDecimal(data[typed_index], width, scale);
|
||||
} else if (TYPE_ID == LogicalTypeId::BLOB) {
|
||||
ret[i] = OP::ConvertBlob(data[typed_index]);
|
||||
} else {
|
||||
ret[i] = OP::Convert(data[typed_index]);
|
||||
}
|
||||
} else {
|
||||
if (is_field && !value_validity.RowIsValid(value_index)) {
|
||||
//! Value is missing for this field
|
||||
continue;
|
||||
}
|
||||
D_ASSERT(value_validity.RowIsValid(value_index));
|
||||
auto metadata_value = metadata_data[metadata_format.sel->get_index(i)];
|
||||
VariantMetadata variant_metadata(metadata_value);
|
||||
ret[i] = VariantBinaryDecoder::Decode(variant_metadata,
|
||||
const_data_ptr_cast(value_data[value_index].GetData()));
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<VariantValue> VariantShreddedConversion::ConvertShreddedLeaf(Vector &metadata, Vector &value,
|
||||
Vector &typed_value, idx_t offset, idx_t length,
|
||||
idx_t total_size, const bool is_field) {
|
||||
D_ASSERT(!typed_value.GetType().IsNested());
|
||||
vector<VariantValue> result;
|
||||
|
||||
auto &type = typed_value.GetType();
|
||||
switch (type.id()) {
|
||||
//! boolean
|
||||
case LogicalTypeId::BOOLEAN: {
|
||||
return ConvertTypedValues<bool, ConvertShreddedValue<bool>, LogicalTypeId::BOOLEAN>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! int8
|
||||
case LogicalTypeId::TINYINT: {
|
||||
return ConvertTypedValues<int8_t, ConvertShreddedValue<int8_t>, LogicalTypeId::TINYINT>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! int16
|
||||
case LogicalTypeId::SMALLINT: {
|
||||
return ConvertTypedValues<int16_t, ConvertShreddedValue<int16_t>, LogicalTypeId::SMALLINT>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! int32
|
||||
case LogicalTypeId::INTEGER: {
|
||||
return ConvertTypedValues<int32_t, ConvertShreddedValue<int32_t>, LogicalTypeId::INTEGER>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! int64
|
||||
case LogicalTypeId::BIGINT: {
|
||||
return ConvertTypedValues<int64_t, ConvertShreddedValue<int64_t>, LogicalTypeId::BIGINT>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! float
|
||||
case LogicalTypeId::FLOAT: {
|
||||
return ConvertTypedValues<float, ConvertShreddedValue<float>, LogicalTypeId::FLOAT>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! double
|
||||
case LogicalTypeId::DOUBLE: {
|
||||
return ConvertTypedValues<double, ConvertShreddedValue<double>, LogicalTypeId::DOUBLE>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! decimal4/decimal8/decimal16
|
||||
case LogicalTypeId::DECIMAL: {
|
||||
auto physical_type = type.InternalType();
|
||||
switch (physical_type) {
|
||||
case PhysicalType::INT32: {
|
||||
return ConvertTypedValues<int32_t, ConvertShreddedValue<int32_t>, LogicalTypeId::DECIMAL>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
case PhysicalType::INT64: {
|
||||
return ConvertTypedValues<int64_t, ConvertShreddedValue<int64_t>, LogicalTypeId::DECIMAL>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
case PhysicalType::INT128: {
|
||||
return ConvertTypedValues<hugeint_t, ConvertShreddedValue<hugeint_t>, LogicalTypeId::DECIMAL>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
default:
|
||||
throw NotImplementedException("Decimal with PhysicalType (%s) not implemented for shredded Variant",
|
||||
EnumUtil::ToString(physical_type));
|
||||
}
|
||||
}
|
||||
//! date
|
||||
case LogicalTypeId::DATE: {
|
||||
return ConvertTypedValues<date_t, ConvertShreddedValue<date_t>, LogicalTypeId::DATE>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! time
|
||||
case LogicalTypeId::TIME: {
|
||||
return ConvertTypedValues<dtime_t, ConvertShreddedValue<dtime_t>, LogicalTypeId::TIME>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! timestamptz(6) (timestamptz(9) not implemented in DuckDB)
|
||||
case LogicalTypeId::TIMESTAMP_TZ: {
|
||||
return ConvertTypedValues<timestamp_tz_t, ConvertShreddedValue<timestamp_tz_t>, LogicalTypeId::TIMESTAMP_TZ>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! timestampntz(6)
|
||||
case LogicalTypeId::TIMESTAMP: {
|
||||
return ConvertTypedValues<timestamp_t, ConvertShreddedValue<timestamp_t>, LogicalTypeId::TIMESTAMP>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! timestampntz(9)
|
||||
case LogicalTypeId::TIMESTAMP_NS: {
|
||||
return ConvertTypedValues<timestamp_ns_t, ConvertShreddedValue<timestamp_ns_t>, LogicalTypeId::TIMESTAMP_NS>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! binary
|
||||
case LogicalTypeId::BLOB: {
|
||||
return ConvertTypedValues<string_t, ConvertShreddedValue<string_t>, LogicalTypeId::BLOB>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! string
|
||||
case LogicalTypeId::VARCHAR: {
|
||||
return ConvertTypedValues<string_t, ConvertShreddedValue<string_t>, LogicalTypeId::VARCHAR>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
//! uuid
|
||||
case LogicalTypeId::UUID: {
|
||||
return ConvertTypedValues<hugeint_t, ConvertShreddedValue<hugeint_t>, LogicalTypeId::UUID>(
|
||||
typed_value, metadata, value, offset, length, total_size, is_field);
|
||||
}
|
||||
default:
|
||||
throw NotImplementedException("Variant shredding on type: '%s' is not implemented", type.ToString());
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
struct ShreddedVariantField {
|
||||
public:
|
||||
explicit ShreddedVariantField(const string &field_name) : field_name(field_name) {
|
||||
}
|
||||
|
||||
public:
|
||||
string field_name;
|
||||
//! Values for the field, for all rows
|
||||
vector<VariantValue> values;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
template <bool IS_REQUIRED>
|
||||
static vector<VariantValue> ConvertBinaryEncoding(Vector &metadata, Vector &value, idx_t offset, idx_t length,
|
||||
idx_t total_size) {
|
||||
UnifiedVectorFormat value_format;
|
||||
value.ToUnifiedFormat(total_size, value_format);
|
||||
auto value_data = value_format.GetData<string_t>(value_format);
|
||||
auto &validity = value_format.validity;
|
||||
|
||||
UnifiedVectorFormat metadata_format;
|
||||
metadata.ToUnifiedFormat(length, metadata_format);
|
||||
auto metadata_data = metadata_format.GetData<string_t>(metadata_format);
|
||||
auto metadata_validity = metadata_format.validity;
|
||||
|
||||
vector<VariantValue> ret(length);
|
||||
if (IS_REQUIRED) {
|
||||
for (idx_t i = 0; i < length; i++) {
|
||||
auto index = value_format.sel->get_index(i + offset);
|
||||
|
||||
// Variant itself is NULL
|
||||
if (!validity.RowIsValid(index) && !metadata_validity.RowIsValid(metadata_format.sel->get_index(i))) {
|
||||
ret[i] = VariantValue(Value());
|
||||
continue;
|
||||
}
|
||||
|
||||
D_ASSERT(validity.RowIsValid(index));
|
||||
auto &metadata_value = metadata_data[metadata_format.sel->get_index(i)];
|
||||
VariantMetadata variant_metadata(metadata_value);
|
||||
auto binary_value = value_data[index].GetData();
|
||||
ret[i] = VariantBinaryDecoder::Decode(variant_metadata, const_data_ptr_cast(binary_value));
|
||||
}
|
||||
} else {
|
||||
//! Even though 'typed_value' is not present, 'value' is allowed to contain NULLs because we're scanning an
|
||||
//! Object's shredded field.
|
||||
//! When 'value' is null for a row, that means the Object does not contain this field
|
||||
//! for that row.
|
||||
for (idx_t i = 0; i < length; i++) {
|
||||
auto index = value_format.sel->get_index(i + offset);
|
||||
if (validity.RowIsValid(index)) {
|
||||
auto &metadata_value = metadata_data[metadata_format.sel->get_index(i)];
|
||||
VariantMetadata variant_metadata(metadata_value);
|
||||
auto binary_value = value_data[index].GetData();
|
||||
ret[i] = VariantBinaryDecoder::Decode(variant_metadata, const_data_ptr_cast(binary_value));
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static VariantValue ConvertPartiallyShreddedObject(vector<ShreddedVariantField> &shredded_fields,
|
||||
const UnifiedVectorFormat &metadata_format,
|
||||
const UnifiedVectorFormat &value_format, idx_t i, idx_t offset) {
|
||||
auto ret = VariantValue(VariantValueType::OBJECT);
|
||||
auto index = value_format.sel->get_index(i + offset);
|
||||
auto value_data = value_format.GetData<string_t>(value_format);
|
||||
auto metadata_data = metadata_format.GetData<string_t>(metadata_format);
|
||||
auto &value_validity = value_format.validity;
|
||||
|
||||
for (idx_t field_index = 0; field_index < shredded_fields.size(); field_index++) {
|
||||
auto &shredded_field = shredded_fields[field_index];
|
||||
auto &field_value = shredded_field.values[i];
|
||||
|
||||
if (field_value.IsMissing()) {
|
||||
//! This field is missing from the value, skip it
|
||||
continue;
|
||||
}
|
||||
ret.AddChild(shredded_field.field_name, std::move(field_value));
|
||||
}
|
||||
|
||||
if (value_validity.RowIsValid(index)) {
|
||||
//! Object is partially shredded, decode the object and merge the values
|
||||
auto &metadata_value = metadata_data[metadata_format.sel->get_index(i)];
|
||||
VariantMetadata variant_metadata(metadata_value);
|
||||
auto binary_value = value_data[index].GetData();
|
||||
auto unshredded = VariantBinaryDecoder::Decode(variant_metadata, const_data_ptr_cast(binary_value));
|
||||
if (unshredded.value_type != VariantValueType::OBJECT) {
|
||||
throw InvalidInputException("Partially shredded objects have to encode Object Variants in the 'value'");
|
||||
}
|
||||
for (auto &item : unshredded.object_children) {
|
||||
ret.AddChild(item.first, std::move(item.second));
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<VariantValue> VariantShreddedConversion::ConvertShreddedObject(Vector &metadata, Vector &value,
|
||||
Vector &typed_value, idx_t offset, idx_t length,
|
||||
idx_t total_size, const bool is_field) {
|
||||
auto &type = typed_value.GetType();
|
||||
D_ASSERT(type.id() == LogicalTypeId::STRUCT);
|
||||
auto &fields = StructType::GetChildTypes(type);
|
||||
auto &entries = StructVector::GetEntries(typed_value);
|
||||
D_ASSERT(entries.size() == fields.size());
|
||||
|
||||
//! 'value'
|
||||
UnifiedVectorFormat value_format;
|
||||
value.ToUnifiedFormat(total_size, value_format);
|
||||
auto value_data = value_format.GetData<string_t>(value_format);
|
||||
auto &validity = value_format.validity;
|
||||
(void)validity;
|
||||
|
||||
//! 'metadata'
|
||||
UnifiedVectorFormat metadata_format;
|
||||
metadata.ToUnifiedFormat(length, metadata_format);
|
||||
auto metadata_data = metadata_format.GetData<string_t>(metadata_format);
|
||||
|
||||
//! 'typed_value'
|
||||
UnifiedVectorFormat typed_format;
|
||||
typed_value.ToUnifiedFormat(total_size, typed_format);
|
||||
auto &typed_validity = typed_format.validity;
|
||||
|
||||
//! Process all fields to get the shredded field values
|
||||
vector<ShreddedVariantField> shredded_fields;
|
||||
shredded_fields.reserve(fields.size());
|
||||
for (idx_t i = 0; i < fields.size(); i++) {
|
||||
auto &field = fields[i];
|
||||
auto &field_name = field.first;
|
||||
auto &field_vec = *entries[i];
|
||||
|
||||
shredded_fields.emplace_back(field_name);
|
||||
auto &shredded_field = shredded_fields.back();
|
||||
shredded_field.values = Convert(metadata, field_vec, offset, length, total_size, true);
|
||||
}
|
||||
|
||||
vector<VariantValue> ret(length);
|
||||
if (typed_validity.AllValid()) {
|
||||
for (idx_t i = 0; i < length; i++) {
|
||||
ret[i] = ConvertPartiallyShreddedObject(shredded_fields, metadata_format, value_format, i, offset);
|
||||
}
|
||||
} else {
|
||||
//! For some of the rows, the value is not an object
|
||||
for (idx_t i = 0; i < length; i++) {
|
||||
auto typed_index = typed_format.sel->get_index(i + offset);
|
||||
auto value_index = value_format.sel->get_index(i + offset);
|
||||
if (typed_validity.RowIsValid(typed_index)) {
|
||||
ret[i] = ConvertPartiallyShreddedObject(shredded_fields, metadata_format, value_format, i, offset);
|
||||
} else {
|
||||
if (is_field && !validity.RowIsValid(value_index)) {
|
||||
//! This object is a field in the parent object, the value is missing, skip it
|
||||
continue;
|
||||
}
|
||||
D_ASSERT(validity.RowIsValid(value_index));
|
||||
auto &metadata_value = metadata_data[metadata_format.sel->get_index(i)];
|
||||
VariantMetadata variant_metadata(metadata_value);
|
||||
auto binary_value = value_data[value_index].GetData();
|
||||
ret[i] = VariantBinaryDecoder::Decode(variant_metadata, const_data_ptr_cast(binary_value));
|
||||
if (ret[i].value_type == VariantValueType::OBJECT) {
|
||||
throw InvalidInputException(
|
||||
"When 'typed_value' for a shredded Object is NULL, 'value' can not contain an Object value");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<VariantValue> VariantShreddedConversion::ConvertShreddedArray(Vector &metadata, Vector &value,
|
||||
Vector &typed_value, idx_t offset, idx_t length,
|
||||
idx_t total_size, const bool is_field) {
|
||||
auto &child = ListVector::GetEntry(typed_value);
|
||||
auto list_size = ListVector::GetListSize(typed_value);
|
||||
|
||||
//! 'value'
|
||||
UnifiedVectorFormat value_format;
|
||||
value.ToUnifiedFormat(total_size, value_format);
|
||||
auto value_data = value_format.GetData<string_t>(value_format);
|
||||
|
||||
//! 'metadata'
|
||||
UnifiedVectorFormat metadata_format;
|
||||
metadata.ToUnifiedFormat(length, metadata_format);
|
||||
auto metadata_data = metadata_format.GetData<string_t>(metadata_format);
|
||||
|
||||
//! 'typed_value'
|
||||
UnifiedVectorFormat list_format;
|
||||
typed_value.ToUnifiedFormat(total_size, list_format);
|
||||
auto list_data = list_format.GetData<list_entry_t>(list_format);
|
||||
auto &validity = list_format.validity;
|
||||
auto &value_validity = value_format.validity;
|
||||
|
||||
vector<VariantValue> ret(length);
|
||||
if (validity.AllValid()) {
|
||||
//! We can be sure that none of the values are binary encoded
|
||||
for (idx_t i = 0; i < length; i++) {
|
||||
auto typed_index = list_format.sel->get_index(i + offset);
|
||||
auto entry = list_data[typed_index];
|
||||
Vector child_metadata(metadata.GetValue(i));
|
||||
ret[i] = VariantValue(VariantValueType::ARRAY);
|
||||
ret[i].array_items = Convert(child_metadata, child, entry.offset, entry.length, list_size, false);
|
||||
}
|
||||
} else {
|
||||
for (idx_t i = 0; i < length; i++) {
|
||||
auto typed_index = list_format.sel->get_index(i + offset);
|
||||
auto value_index = value_format.sel->get_index(i + offset);
|
||||
if (validity.RowIsValid(typed_index)) {
|
||||
auto entry = list_data[typed_index];
|
||||
Vector child_metadata(metadata.GetValue(i));
|
||||
ret[i] = VariantValue(VariantValueType::ARRAY);
|
||||
ret[i].array_items = Convert(child_metadata, child, entry.offset, entry.length, list_size, false);
|
||||
} else {
|
||||
if (is_field && !value_validity.RowIsValid(value_index)) {
|
||||
//! Value is missing for this field
|
||||
continue;
|
||||
}
|
||||
D_ASSERT(value_validity.RowIsValid(value_index));
|
||||
auto metadata_value = metadata_data[metadata_format.sel->get_index(i)];
|
||||
VariantMetadata variant_metadata(metadata_value);
|
||||
ret[i] = VariantBinaryDecoder::Decode(variant_metadata,
|
||||
const_data_ptr_cast(value_data[value_index].GetData()));
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<VariantValue> VariantShreddedConversion::Convert(Vector &metadata, Vector &group, idx_t offset, idx_t length,
|
||||
idx_t total_size, bool is_field) {
|
||||
D_ASSERT(group.GetType().id() == LogicalTypeId::STRUCT);
|
||||
|
||||
auto &group_entries = StructVector::GetEntries(group);
|
||||
auto &group_type_children = StructType::GetChildTypes(group.GetType());
|
||||
D_ASSERT(group_type_children.size() == group_entries.size());
|
||||
|
||||
//! From the spec:
|
||||
//! The Parquet columns used to store variant metadata and values must be accessed by name, not by position.
|
||||
optional_ptr<Vector> value;
|
||||
optional_ptr<Vector> typed_value;
|
||||
for (idx_t i = 0; i < group_entries.size(); i++) {
|
||||
auto &name = group_type_children[i].first;
|
||||
auto &vec = group_entries[i];
|
||||
if (name == "value") {
|
||||
value = vec.get();
|
||||
} else if (name == "typed_value") {
|
||||
typed_value = vec.get();
|
||||
} else {
|
||||
throw InvalidInputException("Variant group can only contain 'value'/'typed_value', not: %s", name);
|
||||
}
|
||||
}
|
||||
if (!value) {
|
||||
throw InvalidInputException("Required column 'value' not found in Variant group");
|
||||
}
|
||||
|
||||
if (typed_value) {
|
||||
auto &type = typed_value->GetType();
|
||||
vector<VariantValue> ret;
|
||||
if (type.id() == LogicalTypeId::STRUCT) {
|
||||
return ConvertShreddedObject(metadata, *value, *typed_value, offset, length, total_size, is_field);
|
||||
} else if (type.id() == LogicalTypeId::LIST) {
|
||||
return ConvertShreddedArray(metadata, *value, *typed_value, offset, length, total_size, is_field);
|
||||
} else {
|
||||
return ConvertShreddedLeaf(metadata, *value, *typed_value, offset, length, total_size, is_field);
|
||||
}
|
||||
} else {
|
||||
if (is_field) {
|
||||
return ConvertBinaryEncoding<false>(metadata, *value, offset, length, total_size);
|
||||
} else {
|
||||
//! Only 'value' is present, we can assume this to be 'required', so it can't contain NULLs
|
||||
return ConvertBinaryEncoding<true>(metadata, *value, offset, length, total_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
85
external/duckdb/extension/parquet/reader/variant/variant_value.cpp
vendored
Normal file
85
external/duckdb/extension/parquet/reader/variant/variant_value.cpp
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
#include "reader/variant/variant_value.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
void VariantValue::AddChild(const string &key, VariantValue &&val) {
|
||||
D_ASSERT(value_type == VariantValueType::OBJECT);
|
||||
object_children.emplace(key, std::move(val));
|
||||
}
|
||||
|
||||
void VariantValue::AddItem(VariantValue &&val) {
|
||||
D_ASSERT(value_type == VariantValueType::ARRAY);
|
||||
array_items.push_back(std::move(val));
|
||||
}
|
||||
|
||||
yyjson_mut_val *VariantValue::ToJSON(ClientContext &context, yyjson_mut_doc *doc) const {
|
||||
switch (value_type) {
|
||||
case VariantValueType::PRIMITIVE: {
|
||||
if (primitive_value.IsNull()) {
|
||||
return yyjson_mut_null(doc);
|
||||
}
|
||||
switch (primitive_value.type().id()) {
|
||||
case LogicalTypeId::BOOLEAN: {
|
||||
if (primitive_value.GetValue<bool>()) {
|
||||
return yyjson_mut_true(doc);
|
||||
} else {
|
||||
return yyjson_mut_false(doc);
|
||||
}
|
||||
}
|
||||
case LogicalTypeId::TINYINT:
|
||||
return yyjson_mut_int(doc, primitive_value.GetValue<int8_t>());
|
||||
case LogicalTypeId::SMALLINT:
|
||||
return yyjson_mut_int(doc, primitive_value.GetValue<int16_t>());
|
||||
case LogicalTypeId::INTEGER:
|
||||
return yyjson_mut_int(doc, primitive_value.GetValue<int32_t>());
|
||||
case LogicalTypeId::BIGINT:
|
||||
return yyjson_mut_int(doc, primitive_value.GetValue<int64_t>());
|
||||
case LogicalTypeId::FLOAT:
|
||||
return yyjson_mut_real(doc, primitive_value.GetValue<float>());
|
||||
case LogicalTypeId::DOUBLE:
|
||||
return yyjson_mut_real(doc, primitive_value.GetValue<double>());
|
||||
case LogicalTypeId::DATE:
|
||||
case LogicalTypeId::TIME:
|
||||
case LogicalTypeId::VARCHAR: {
|
||||
auto value_str = primitive_value.ToString();
|
||||
return yyjson_mut_strncpy(doc, value_str.c_str(), value_str.size());
|
||||
}
|
||||
case LogicalTypeId::TIMESTAMP: {
|
||||
auto value_str = primitive_value.ToString();
|
||||
return yyjson_mut_strncpy(doc, value_str.c_str(), value_str.size());
|
||||
}
|
||||
case LogicalTypeId::TIMESTAMP_TZ: {
|
||||
auto value_str = primitive_value.CastAs(context, LogicalType::VARCHAR).GetValue<string>();
|
||||
return yyjson_mut_strncpy(doc, value_str.c_str(), value_str.size());
|
||||
}
|
||||
case LogicalTypeId::TIMESTAMP_NS: {
|
||||
auto value_str = primitive_value.CastAs(context, LogicalType::VARCHAR).GetValue<string>();
|
||||
return yyjson_mut_strncpy(doc, value_str.c_str(), value_str.size());
|
||||
}
|
||||
default:
|
||||
throw InternalException("Unexpected primitive type: %s", primitive_value.type().ToString());
|
||||
}
|
||||
}
|
||||
case VariantValueType::OBJECT: {
|
||||
auto obj = yyjson_mut_obj(doc);
|
||||
for (const auto &it : object_children) {
|
||||
auto &key = it.first;
|
||||
auto value = it.second.ToJSON(context, doc);
|
||||
yyjson_mut_obj_add_val(doc, obj, key.c_str(), value);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
case VariantValueType::ARRAY: {
|
||||
auto arr = yyjson_mut_arr(doc);
|
||||
for (auto &item : array_items) {
|
||||
auto value = item.ToJSON(context, doc);
|
||||
yyjson_mut_arr_add_val(arr, value);
|
||||
}
|
||||
return arr;
|
||||
}
|
||||
default:
|
||||
throw InternalException("Can't serialize this VariantValue type to JSON");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
161
external/duckdb/extension/parquet/reader/variant_column_reader.cpp
vendored
Normal file
161
external/duckdb/extension/parquet/reader/variant_column_reader.cpp
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
#include "reader/variant_column_reader.hpp"
|
||||
#include "reader/variant/variant_binary_decoder.hpp"
|
||||
#include "reader/variant/variant_shredded_conversion.hpp"
|
||||
|
||||
namespace duckdb {
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Variant Column Reader
|
||||
//===--------------------------------------------------------------------===//
|
||||
VariantColumnReader::VariantColumnReader(ClientContext &context, ParquetReader &reader,
|
||||
const ParquetColumnSchema &schema,
|
||||
vector<unique_ptr<ColumnReader>> child_readers_p)
|
||||
: ColumnReader(reader, schema), context(context), child_readers(std::move(child_readers_p)) {
|
||||
D_ASSERT(Type().InternalType() == PhysicalType::VARCHAR);
|
||||
|
||||
if (child_readers[0]->Schema().name == "metadata" && child_readers[1]->Schema().name == "value") {
|
||||
metadata_reader_idx = 0;
|
||||
value_reader_idx = 1;
|
||||
} else if (child_readers[1]->Schema().name == "metadata" && child_readers[0]->Schema().name == "value") {
|
||||
metadata_reader_idx = 1;
|
||||
value_reader_idx = 0;
|
||||
} else {
|
||||
throw InternalException("The Variant column must have 'metadata' and 'value' as the first two columns");
|
||||
}
|
||||
}
|
||||
|
||||
ColumnReader &VariantColumnReader::GetChildReader(idx_t child_idx) {
|
||||
if (!child_readers[child_idx]) {
|
||||
throw InternalException("VariantColumnReader::GetChildReader(%d) - but this child reader is not set",
|
||||
child_idx);
|
||||
}
|
||||
return *child_readers[child_idx].get();
|
||||
}
|
||||
|
||||
void VariantColumnReader::InitializeRead(idx_t row_group_idx_p, const vector<ColumnChunk> &columns,
|
||||
TProtocol &protocol_p) {
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
child->InitializeRead(row_group_idx_p, columns, protocol_p);
|
||||
}
|
||||
}
|
||||
|
||||
static LogicalType GetIntermediateGroupType(optional_ptr<ColumnReader> typed_value) {
|
||||
child_list_t<LogicalType> children;
|
||||
children.emplace_back("value", LogicalType::BLOB);
|
||||
if (typed_value) {
|
||||
children.emplace_back("typed_value", typed_value->Type());
|
||||
}
|
||||
return LogicalType::STRUCT(std::move(children));
|
||||
}
|
||||
|
||||
idx_t VariantColumnReader::Read(uint64_t num_values, data_ptr_t define_out, data_ptr_t repeat_out, Vector &result) {
|
||||
if (pending_skips > 0) {
|
||||
throw InternalException("VariantColumnReader cannot have pending skips");
|
||||
}
|
||||
optional_ptr<ColumnReader> typed_value_reader = child_readers.size() == 3 ? child_readers[2].get() : nullptr;
|
||||
|
||||
// If the child reader values are all valid, "define_out" may not be initialized at all
|
||||
// So, we just initialize them to all be valid beforehand
|
||||
std::fill_n(define_out, num_values, MaxDefine());
|
||||
|
||||
optional_idx read_count;
|
||||
|
||||
Vector metadata_intermediate(LogicalType::BLOB, num_values);
|
||||
Vector intermediate_group(GetIntermediateGroupType(typed_value_reader), num_values);
|
||||
auto &group_entries = StructVector::GetEntries(intermediate_group);
|
||||
auto &value_intermediate = *group_entries[0];
|
||||
|
||||
auto metadata_values =
|
||||
child_readers[metadata_reader_idx]->Read(num_values, define_out, repeat_out, metadata_intermediate);
|
||||
auto value_values = child_readers[value_reader_idx]->Read(num_values, define_out, repeat_out, value_intermediate);
|
||||
|
||||
D_ASSERT(child_readers[metadata_reader_idx]->Schema().name == "metadata");
|
||||
D_ASSERT(child_readers[value_reader_idx]->Schema().name == "value");
|
||||
|
||||
if (metadata_values != value_values) {
|
||||
throw InvalidInputException(
|
||||
"The Variant column did not contain the same amount of values for 'metadata' and 'value'");
|
||||
}
|
||||
|
||||
auto result_data = FlatVector::GetData<string_t>(result);
|
||||
auto &result_validity = FlatVector::Validity(result);
|
||||
|
||||
vector<VariantValue> conversion_result;
|
||||
if (typed_value_reader) {
|
||||
auto typed_values = typed_value_reader->Read(num_values, define_out, repeat_out, *group_entries[1]);
|
||||
if (typed_values != value_values) {
|
||||
throw InvalidInputException(
|
||||
"The shredded Variant column did not contain the same amount of values for 'typed_value' and 'value'");
|
||||
}
|
||||
}
|
||||
conversion_result =
|
||||
VariantShreddedConversion::Convert(metadata_intermediate, intermediate_group, 0, num_values, num_values, false);
|
||||
|
||||
for (idx_t i = 0; i < conversion_result.size(); i++) {
|
||||
auto &variant = conversion_result[i];
|
||||
if (variant.IsNull()) {
|
||||
result_validity.SetInvalid(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
//! Write the result to a string
|
||||
VariantDecodeResult decode_result;
|
||||
decode_result.doc = yyjson_mut_doc_new(nullptr);
|
||||
auto json_val = variant.ToJSON(context, decode_result.doc);
|
||||
|
||||
size_t len;
|
||||
decode_result.data =
|
||||
yyjson_mut_val_write_opts(json_val, YYJSON_WRITE_ALLOW_INF_AND_NAN, nullptr, &len, nullptr);
|
||||
if (!decode_result.data) {
|
||||
throw InvalidInputException("Could not serialize the JSON to string, yyjson failed");
|
||||
}
|
||||
result_data[i] = StringVector::AddString(result, decode_result.data, static_cast<idx_t>(len));
|
||||
}
|
||||
|
||||
read_count = value_values;
|
||||
return read_count.GetIndex();
|
||||
}
|
||||
|
||||
void VariantColumnReader::Skip(idx_t num_values) {
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
child->Skip(num_values);
|
||||
}
|
||||
}
|
||||
|
||||
void VariantColumnReader::RegisterPrefetch(ThriftFileTransport &transport, bool allow_merge) {
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
child->RegisterPrefetch(transport, allow_merge);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t VariantColumnReader::TotalCompressedSize() {
|
||||
uint64_t size = 0;
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
size += child->TotalCompressedSize();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
idx_t VariantColumnReader::GroupRowsAvailable() {
|
||||
for (auto &child : child_readers) {
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
return child->GroupRowsAvailable();
|
||||
}
|
||||
throw InternalException("No projected columns in struct?");
|
||||
}
|
||||
|
||||
} // namespace duckdb
|
||||
Reference in New Issue
Block a user