should be it
This commit is contained in:
19
external/duckdb/benchmark/micro/copy/from_parquet_large_row_groups.benchmark
vendored
Normal file
19
external/duckdb/benchmark/micro/copy/from_parquet_large_row_groups.benchmark
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# name: benchmark/micro/copy/from_parquet_large_row_groups.benchmark
|
||||
# description: Import data from big batches
|
||||
# group: [copy]
|
||||
|
||||
name Import data from big row groups
|
||||
group copy
|
||||
|
||||
require parquet
|
||||
|
||||
storage persistent
|
||||
|
||||
load
|
||||
COPY (FROM range(100000000)) TO '${BENCHMARK_DIR}/big_batches.parquet' (ROW_GROUP_SIZE 1000000);
|
||||
|
||||
run
|
||||
CREATE TABLE big AS FROM '${BENCHMARK_DIR}/big_batches.parquet';
|
||||
|
||||
cleanup
|
||||
DROP TABLE big;
|
||||
21
external/duckdb/benchmark/micro/copy/from_parquet_medium_row_groups.benchmark
vendored
Normal file
21
external/duckdb/benchmark/micro/copy/from_parquet_medium_row_groups.benchmark
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# name: benchmark/micro/copy/from_parquet_medium_row_groups.benchmark
|
||||
# description: Import data from medium batches
|
||||
# group: [copy]
|
||||
|
||||
name Import data from medium row groups
|
||||
group copy
|
||||
|
||||
require parquet
|
||||
|
||||
storage persistent
|
||||
|
||||
load
|
||||
COPY (FROM range(100000000)) TO '${BENCHMARK_DIR}/medium_batches.parquet' (ROW_GROUP_SIZE 200000);
|
||||
|
||||
run
|
||||
CREATE TABLE medium AS FROM '${BENCHMARK_DIR}/medium_batches.parquet';
|
||||
|
||||
cleanup
|
||||
DROP TABLE medium;
|
||||
|
||||
|
||||
21
external/duckdb/benchmark/micro/copy/from_parquet_small_row_groups.benchmark
vendored
Normal file
21
external/duckdb/benchmark/micro/copy/from_parquet_small_row_groups.benchmark
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# name: benchmark/micro/copy/from_parquet_small_row_groups.benchmark
|
||||
# description: Import data from small batches
|
||||
# group: [copy]
|
||||
|
||||
name Import data from small row groups
|
||||
group copy
|
||||
|
||||
require parquet
|
||||
|
||||
storage persistent
|
||||
|
||||
load
|
||||
COPY (FROM range(100000000)) TO '${BENCHMARK_DIR}/small_batches.parquet' (ROW_GROUP_SIZE 5000);
|
||||
|
||||
run
|
||||
CREATE TABLE small AS FROM '${BENCHMARK_DIR}/small_batches.parquet';
|
||||
|
||||
cleanup
|
||||
DROP TABLE small;
|
||||
|
||||
|
||||
12
external/duckdb/benchmark/micro/copy/to_parquet_partition_by_few.benchmark
vendored
Normal file
12
external/duckdb/benchmark/micro/copy/to_parquet_partition_by_few.benchmark
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# name: benchmark/micro/copy/to_parquet_partition_by_few.benchmark
|
||||
# description: Copy to Parquet, partition_by with few (2) partitions
|
||||
# group: [copy]
|
||||
|
||||
name Copy to Parquet, 2 partitions
|
||||
group copy
|
||||
|
||||
load
|
||||
CREATE TABLE tbl AS SELECT i%2::INT32 as part_col, i::INT32 FROM range(0,25000000) tbl(i)
|
||||
|
||||
run
|
||||
COPY tbl TO '${BENCHMARK_DIR}/partitioned_write' (FORMAT parquet, PARTITION_BY part_col, OVERWRITE_OR_IGNORE TRUE);
|
||||
13
external/duckdb/benchmark/micro/copy/to_parquet_partition_by_many.benchmark
vendored
Normal file
13
external/duckdb/benchmark/micro/copy/to_parquet_partition_by_many.benchmark
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# name: benchmark/micro/copy/to_parquet_partition_by_many.benchmark
|
||||
# description: Copy to Parquet, partition_by with many (1000) partitions
|
||||
# group: [copy]
|
||||
|
||||
name Copy to Parquet, 1000 partitions
|
||||
group copy
|
||||
|
||||
load
|
||||
CREATE TABLE tbl AS SELECT i%1000::INT32 as part_col, i::INT32 FROM range(0,25000000) tbl(i);
|
||||
SET partitioned_write_max_open_files=10000;
|
||||
|
||||
run
|
||||
COPY tbl TO '${BENCHMARK_DIR}/partitioned_write' (FORMAT parquet, PARTITION_BY part_col, OVERWRITE_OR_IGNORE TRUE);
|
||||
Reference in New Issue
Block a user