should be it

This commit is contained in:
2025-10-24 19:21:19 -05:00
parent a4b23fc57c
commit f09560c7b1
14047 changed files with 3161551 additions and 1 deletions

View File

@@ -0,0 +1,10 @@
# name: benchmark/micro/logger/disabled/logging_disabled_client_context.benchmark
# description: Benchmarking the Client Context logger
# group: [disabled]
name Client Context NopLogger
group logger
# Note: this will call the Logger, but logging is disabled so this will call the NopLogger
run
SELECT write_log('hello world', scope := 'connection') from range(0,50000000);

View File

@@ -0,0 +1,10 @@
# name: benchmark/micro/logger/disabled/logging_disabled_file_opener.benchmark
# description: Benchmarking the File Opener logger (has an extra step of indirection so will be a little slower)
# group: [disabled]
name FileOpener NopLogger
group logger
# Note: this will call the Logger, but logging is disabled so this will call the NopLogger
run
SELECT write_log('hello world', scope := 'file_opener') from range(0,50000000);

View File

@@ -0,0 +1,10 @@
# name: benchmark/micro/logger/disabled/logging_disabled_global.benchmark
# description: Benchmarking the Global logger
# group: [disabled]
name Global NopLogger
group case
# Note: this will call the Logger, but logging is disabled so this will call the NopLogger
run
SELECT write_log('hello world', scope := 'database') from range(0,50000000);

View File

@@ -0,0 +1,10 @@
# name: benchmark/micro/logger/disabled/logging_disabled_reference.benchmark
# description: Logger disabled benchmark without Logger calls (for reference)
# group: [disabled]
name Disabled logger reference
group case
# Note: this will NOT call any logger code, it's simply for reference for the other benchmarks
run
SELECT write_log('hello world', scope := 'database', disable_logging := true) from range(0,50000000);

View File

@@ -0,0 +1,20 @@
# name: benchmark/micro/logger/enabled/logging_enabled_client_context.benchmark
# description: Benchmarking the Client Context Memory logger
# group: [enabled]
name Client Context Memory Logger
group logger
load
set enable_logging=true;
set logging_storage='memory';
set logging_level='debug'
run
SELECT write_log('hello world', level := 'warn', scope := 'connection', log_type := 'duckdb.SomeName.SomeOtherName.BlaBla' ) from range(0,1000000);
cleanup
set enable_logging=false;
set logging_storage='stdout';
set logging_storage='memory';
set enable_logging=true;

View File

@@ -0,0 +1,20 @@
# name: benchmark/micro/logger/enabled/logging_enabled_file_opener.benchmark
# description: Benchmarking the File Opener Memory logger
# group: [enabled]
name FileOpener Memory Logger
group logger
load
set enable_logging=true;
set logging_storage='memory';
set logging_level='debug'
run
SELECT write_log('hello world', level := 'warn', scope := 'file_opener', log_type := 'duckdb.SomeName.SomeOtherName.BlaBla' ) from range(0,1000000);
cleanup
set enable_logging=false;
set logging_storage='stdout';
set logging_storage='memory';
set enable_logging=true;

View File

@@ -0,0 +1,20 @@
# name: benchmark/micro/logger/enabled/logging_enabled_global.benchmark
# description: Benchmarking the Global Memory logger
# group: [enabled]
name Global Memory Logger
group logger
load
set enable_logging=true;
set logging_storage='memory';
set logging_level='debug'
run
SELECT write_log('hello world', level := 'warn', scope := 'database', log_type := 'duckdb.SomeName.SomeOtherName.BlaBla' ) from range(0,1000000);
cleanup
set enable_logging=false;
set logging_storage='stdout';
set logging_storage='memory';
set enable_logging=true;

View File

@@ -0,0 +1,16 @@
# name: benchmark/micro/logger/file_handle_log/csv/file_handle_logging_csv.benchmark
# description:
# group: [csv]
name Client Context
group logger
load
CALL enable_logging('FileSystem');
CREATE TABLE test AS select i from range(0,100000000) t(i);
run
copy test to '${BENCHMARK_DIR}/file_handle_logging.csv';
cleanup
CALL truncate_duckdb_logs();

View File

@@ -0,0 +1,12 @@
# name: benchmark/micro/logger/file_handle_log/csv/file_handle_logging_csv_baseline.benchmark
# description:
# group: [csv]
name File Handle Log (parquet, baseline)
group logger
load
CREATE TABLE test AS select i from range(0,100000000) t(i);
run
copy test to '${BENCHMARK_DIR}/file_handle_logging.csv';

View File

@@ -0,0 +1,16 @@
# name: benchmark/micro/logger/file_handle_log/parquet/file_handle_logging_parquet.benchmark
# description:
# group: [parquet]
name Client Context
group logger
load
CALL enable_logging('FileSystem');
CREATE TABLE test AS select i from range(0,100000000) t(i);
run
copy test to '${BENCHMARK_DIR}/file_handle_logging.parquet';
cleanup
CALL truncate_duckdb_logs();

View File

@@ -0,0 +1,12 @@
# name: benchmark/micro/logger/file_handle_log/parquet/file_handle_logging_parquet_baseline.benchmark
# description:
# group: [parquet]
name File Handle Log (parquet, baseline)
group logger
load
CREATE TABLE test AS select i from range(0,100000000) t(i);
run
copy test to '${BENCHMARK_DIR}/file_handle_logging.parquet';

View File

@@ -0,0 +1,12 @@
# name: benchmark/micro/logger/filtered_out_by_log_type/client_context.benchmark
# description: Benchmarking the Client Context logger filtering out disabled log types
# group: [filtered_out_by_log_type]
name Client Context
group logger
load
CALL enable_logging('FileSystem');
run
SELECT write_log('hello world', level := 'warn', scope := 'connection', log_type := 'duckdb.SomeName.SomeOtherName.BlaBla' ) from range(0,10000000);

View File

@@ -0,0 +1,13 @@
# name: benchmark/micro/logger/filtered_out_by_log_type/file_opener.benchmark
# description: Benchmarking the File Opener logger filtering out disabled log types
# group: [filtered_out_by_log_type]
name FileOpener
group logger
load
CALL enable_logging('FileSystem');
# Note: this will call the Logger, but the log type is filtered out
run
SELECT write_log('hello world', level := 'warn', scope := 'file_opener', log_type := 'duckdb.SomeName.SomeOtherName.BlaBla' ) from range(0,10000000);

View File

@@ -0,0 +1,13 @@
# name: benchmark/micro/logger/filtered_out_by_log_type/global.benchmark
# description: Benchmarking the Global logger filtering out disabled log types
# group: [filtered_out_by_log_type]
name Global Logger Filtering out log types
group case
load
CALL enable_logging('FileSystem');
# Note: this will call the Logger, but the log type is filtered out
run
SELECT write_log('hello world', level := 'warn', scope := 'database', log_type := 'duckdb.SomeName.SomeOtherName.BlaBla' ) from range(0,10000000);

View File

@@ -0,0 +1,13 @@
# name: benchmark/micro/logger/filtered_out_by_log_type/reference.benchmark
# description: Logger disabled benchmark without Logger calls (for reference)
# group: [filtered_out_by_log_type]
name Disabled logger reference
group case
load
CALL enable_logging('FileSystem');
# Note: this will NOT call any logger code, it's simply for reference for the other benchmarks
run
SELECT write_log('hello world', disable_logging := true, level := 'warn', scope := 'database', log_type := 'duckdb.SomeName.SomeOtherName.BlaBla' ) from range(0,10000000);

View File

@@ -0,0 +1,27 @@
# name: benchmark/micro/logger/logging_overhead/duckdb_persistent_q1_with_default_logging.benchmark
# description: Test overhead on query that writes lineitem sf1 to a duckdb database and runs Q1 on it
# group: [logging_overhead]
name Q1 (Parquet)
group logger
subgroup logging_overhead
require parquet
require tpch
load
CALL dbgen(sf=1);
CALL enable_logging(storage_path='${BENCHMARK_DIR}/duckdb_persistent_q1_with_default_logging')
run
ATTACH '${BENCHMARK_DIR}/duckdb_persistent_q1_with_default_logging.db' as my_db;
CREATE OR REPLACE TABLE my_db.lineitem AS FROM memory.lineitem;
use my_db;
PRAGMA tpch(1);
cleanup
CALL truncate_duckdb_logs();
use memory;
DETACH my_db;
result extension/tpch/dbgen/answers/sf1/q01.csv

View File

@@ -0,0 +1,24 @@
# name: benchmark/micro/logger/logging_overhead/parquet_q1_with_default_logging.benchmark
# description: Test overhead on query that writes lineitem sf1 to a duckdb database and runs Q1 on it
# group: [logging_overhead]
name Q1 (Parquet)
group logger
subgroup logging_overhead
require parquet
require tpch
load
CALL dbgen(sf=1, suffix='_normal');
CALL enable_logging(storage_path='${BENCHMARK_DIR}/parquet_q1_with_logging')
run
COPY lineitem_normal TO '${BENCHMARK_DIR}/parquet_q1_with_logging.parquet';
CREATE OR REPLACE VIEW lineitem AS SELECT * FROM read_parquet('${BENCHMARK_DIR}/parquet_q1_with_logging.parquet');
PRAGMA tpch(1)
cleanup
CALL truncate_duckdb_logs()
result extension/tpch/dbgen/answers/sf1/q01.csv

View File

@@ -0,0 +1,24 @@
# name: benchmark/micro/logger/logging_overhead/parquet_q1_with_filesystem_logging.benchmark
# description: Execute Q1 over lineitem stored in a parquet file with filesystem logging enabled. This comes at a significant, but reasonable overhead.
# group: [logging_overhead]
name Q1 (Parquet)
group logger
subgroup logging_overhead
require parquet
require tpch
load
CALL dbgen(sf=1, suffix='_normal');
CALL enable_logging('FileSystem', storage_path='${BENCHMARK_DIR}/parquet_q1_with_logging')
run
COPY lineitem_normal TO '${BENCHMARK_DIR}/parquet_q1_with_logging.parquet';
CREATE OR REPLACE VIEW lineitem AS SELECT * FROM read_parquet('${BENCHMARK_DIR}/parquet_q1_with_logging.parquet');
PRAGMA tpch(1)
cleanup
CALL truncate_duckdb_logs()
result extension/tpch/dbgen/answers/sf1/q01.csv

View File

@@ -0,0 +1,17 @@
# name: benchmark/micro/logger/storage/file/buffer_size/custom_100.benchmark
# description: benchmarking with a custom, 100 entry buffer size
# group: [buffer_size]
name Client Context Memory Logger
group logger
# Note: we write a single log entry to trigger the lazy file initialization
load
CALL enable_logging(level='info', storage_path='${BENCHMARK_DIR}/logging_enabled_client_context', storage_config={'buffer_size':100});
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' );
run
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' ) from range(0,20000);
cleanup
CALL truncate_duckdb_logs()

View File

@@ -0,0 +1,17 @@
# name: benchmark/micro/logger/storage/file/buffer_size/custom_20k.benchmark
# description: benchmarking with a custom, ~20k entry buffer size
# group: [buffer_size]
name Client Context Memory Logger
group logger
# Note: we write a single log entry to trigger the lazy file initialization
load
CALL enable_logging(level='info', storage_path='${BENCHMARK_DIR}/logging_enabled_client_context', storage_config={'buffer_size':10*2048});
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' );
run
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' ) from range(0,20000);
cleanup
CALL truncate_duckdb_logs()

View File

@@ -0,0 +1,17 @@
# name: benchmark/micro/logger/storage/file/buffer_size/custom_none.benchmark
# description: benchmarking with buffering disabled
# group: [buffer_size]
name Client Context Memory Logger
group logger
# Note: we write a single log entry to trigger the lazy file initialization
load
CALL enable_logging(level='info', storage_path='${BENCHMARK_DIR}/logging_enabled_client_context', storage_config={'buffer_size':0});
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' );
run
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' ) from range(0,20000);
cleanup
CALL truncate_duckdb_logs()

View File

@@ -0,0 +1,17 @@
# name: benchmark/micro/logger/storage/file/buffer_size/default_2048.benchmark
# description: benchmarking with the default, 2028 entry buffer size
# group: [buffer_size]
name Client Context Memory Logger
group logger
# Note: we write a single log entry to trigger the lazy file initialization
load
CALL enable_logging(level='info', storage_path='${BENCHMARK_DIR}/logging_enabled_client_context');
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' );
run
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' ) from range(0,20000);
cleanup
CALL truncate_duckdb_logs()

View File

@@ -0,0 +1,17 @@
# name: benchmark/micro/logger/storage/file/log_message_size/huge_string.benchmark
# description: test writing big strings as log messages
# group: [log_message_size]
name Client Context Memory Logger
group logger
# Note: we write a single log entry to trigger the lazy file initialization
load
CALL enable_logging(level='info', storage_path='${BENCHMARK_DIR}/logging_enabled_client_context');
SELECT write_log('hi', level := 'info', scope := 'connection' );
run
SELECT write_log(repeat('hellohello', 1000), level := 'info', scope := 'connection' ) from range(0,20000);
cleanup
CALL truncate_duckdb_logs()

View File

@@ -0,0 +1,17 @@
# name: benchmark/micro/logger/storage/file/log_message_size/small_string.benchmark
# description: test writing small strings as log messages
# group: [log_message_size]
name Client Context Memory Logger
group logger
# Note: we write a single log entry to trigger the lazy file initialization
load
CALL enable_logging(level='info', storage_path='${BENCHMARK_DIR}/logging_enabled_client_context');
SELECT write_log('hi', level := 'info', scope := 'connection' );
run
SELECT write_log('hi', level := 'info', scope := 'connection' ) from range(0,2000000);
cleanup
CALL truncate_duckdb_logs()

View File

@@ -0,0 +1,17 @@
# name: benchmark/micro/logger/storage/file/normalization/denormalized.benchmark
# description: test the denormalized (single file) log file output
# group: [normalization]
name Client Context Memory Logger
group logger
# Note: we write a single log entry to trigger the lazy file initialization
load
CALL enable_logging(level='info', storage_path='${BENCHMARK_DIR}/logging_enabled_client_context/file.csv');
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' );
run
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' ) from range(0,1000000);
cleanup
CALL truncate_duckdb_logs()

View File

@@ -0,0 +1,17 @@
# name: benchmark/micro/logger/storage/file/normalization/normalized.benchmark
# description: test the normalized (split file) log file output
# group: [normalization]
name Client Context Memory Logger
group logger
# Note: we write a single log entry to trigger the lazy file initialization
load
CALL enable_logging(level='info', storage_path='${BENCHMARK_DIR}/logging_enabled_client_context');
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' );
run
SELECT write_log('well hello hello hello hello hello hello world', level := 'info', scope := 'connection' ) from range(0,1000000);
cleanup
CALL truncate_duckdb_logs()