should be it

This commit is contained in:
2025-10-24 19:21:19 -05:00
parent a4b23fc57c
commit f09560c7b1
14047 changed files with 3161551 additions and 1 deletions

View File

@@ -0,0 +1,42 @@
# name: test/sql/logging/file_system_logging_attach.test
# group: [logging]
# We directly compare to the number of bytes written in the last statement,
# which is the block size.
require block_size 262144
require parquet
require noforcestorage
require no_alternative_verify
statement ok
SET enable_logging=true;
statement ok
SET logging_level='trace';
statement ok
ATTACH '__TEST_DIR__/filehandle_logging.db' AS db;
statement ok
CREATE TABLE db.test AS SELECT 1;
statement ok
DETACH db;
# Note: regex for test stability
query IIII
SELECT scope, type, log_level, regexp_replace(message, '\"path\":.*filehandle_logging.db"', '"path":"filehandle_logging.db"') AS msg
FROM duckdb_logs
WHERE type = 'FileSystem' AND contains(msg, '"path":"filehandle_logging.db"')
ORDER BY timestamp
----
DATABASE FileSystem TRACE {"fs":"LocalFileSystem","path":"filehandle_logging.db","op":"OPEN"}
DATABASE FileSystem TRACE {"fs":"LocalFileSystem","path":"filehandle_logging.db","op":"WRITE","bytes":"4096","pos":"0"}
DATABASE FileSystem TRACE {"fs":"LocalFileSystem","path":"filehandle_logging.db","op":"WRITE","bytes":"4096","pos":"4096"}
DATABASE FileSystem TRACE {"fs":"LocalFileSystem","path":"filehandle_logging.db","op":"WRITE","bytes":"4096","pos":"8192"}
DATABASE FileSystem TRACE {"fs":"LocalFileSystem","path":"filehandle_logging.db","op":"WRITE","bytes":"262144","pos":"12288"}
DATABASE FileSystem TRACE {"fs":"LocalFileSystem","path":"filehandle_logging.db","op":"WRITE","bytes":"4096","pos":"4096"}
DATABASE FileSystem TRACE {"fs":"LocalFileSystem","path":"filehandle_logging.db","op":"CLOSE"}

View File

@@ -0,0 +1,35 @@
# name: test/sql/logging/file_system_logging_attach_deadlock.test
# group: [logging]
# We directly compare to the number of bytes written in the last statement,
# which is the block size.
require block_size 262144
require parquet
require noforcestorage
require no_alternative_verify
statement ok
SET memory_limit='1000kb';
statement ok
SET enable_logging=true;
statement ok
SET logging_level='trace';
statement ok
ATTACH '__TEST_DIR__/filehandle_logging.db' AS db;
statement ok
CREATE TABLE db.test AS SELECT 1;
statement ok
DETACH db;
statement error
SELECT * FROM duckdb_logs;
----
Out of Memory Error

View File

@@ -0,0 +1,64 @@
# name: test/sql/logging/logging.test
# description: Test basic logging functionality
# group: [logging]
require noforcestorage
query IIIIIIIIII
FROM duckdb_logs
----
query IIIIII
DESCRIBE FROM duckdb_logs
----
context_id UBIGINT YES NULL NULL NULL
scope VARCHAR YES NULL NULL NULL
connection_id UBIGINT YES NULL NULL NULL
transaction_id UBIGINT YES NULL NULL NULL
query_id UBIGINT YES NULL NULL NULL
thread_id UBIGINT YES NULL NULL NULL
timestamp TIMESTAMP WITH TIME ZONE YES NULL NULL NULL
type VARCHAR YES NULL NULL NULL
log_level VARCHAR YES NULL NULL NULL
message VARCHAR YES NULL NULL NULL
statement ok
set logging_level='debug';
statement ok
set enable_logging=true;
statement ok
SELECT 1;
query IIIII
SELECT * EXCLUDE (context_id, timestamp, connection_id, transaction_id, query_id) FROM duckdb_logs
----
CONNECTION NULL QueryLog INFO SELECT 1;
statement ok
set enable_logging=false;
# With logging disabled, the log will persist
query IIIII
SELECT * EXCLUDE (context_id, timestamp, connection_id, transaction_id, query_id) FROM duckdb_logs where type = 'QueryLog'
----
CONNECTION NULL QueryLog INFO SELECT 1;
CONNECTION NULL QueryLog INFO SELECT * EXCLUDE (context_id, timestamp, connection_id, transaction_id, query_id) FROM duckdb_logs
CONNECTION NULL QueryLog INFO set enable_logging=false;
statement ok
set logging_storage='stdout';
statement ok
set logging_storage='memory';
statement error
set logging_storage='quack';
----
Invalid Input Error: Log storage 'quack' is not yet registered
# Storage is now cleared because switching storage will clear it
query IIIIII
SELECT * EXCLUDE (timestamp, connection_id, transaction_id, query_id) FROM duckdb_logs
----

View File

@@ -0,0 +1,106 @@
# name: test/sql/logging/logging_buffer_size.test
# description:
# group: [logging]
require noforcestorage
# By default we flush the logger automatically at certain sensible moments (most importantly, on Query End)
# for this test we want to disable automatic flushing to test the buffer size param
statement ok
CALL enable_logging('QueryLog', storage_config={'buffer_size': 10, 'only_flush_on_full_buffer': true});
statement ok
SELECT 1 as a;
query I
SELECT count(*) FROM duckdb_logs
----
0
loop i 0 10
statement ok
SELECT 1 as a;
endloop
# We expect a single flush to have happened, e.g. 10 log entries
query I
SELECT count(*) FROM duckdb_logs
----
10
statement ok
CALL disable_logging()
statement ok
CALL truncate_duckdb_logs()
# Change logging to manualog log type
statement ok
CALL enable_logging('', level='trace', storage='memory', storage_config={'buffer_size': 3000, 'only_flush_on_full_buffer': true});
# Buffers aaaaalmost full
statement ok
SELECT write_log('hello from the connection log scope', level := 'error', scope := 'connection') from range(0,2999);
query I
SELECT count(*) FROM duckdb_logs()
----
0
# "it's only a wafer-thin mint"
statement ok
SELECT write_log('hello from the connection log scope', level := 'error', scope := 'connection');
# Buffer has expl.. been flushed
query I
SELECT count(*) FROM duckdb_logs() where type=''
----
3000
# Note that there are 2 logging contexts now, since we wrote the log entries across 2 queries with each their own logging context
query I
SELECT count(*) FROM duckdb_log_contexts()
----
2
# Try with some big buffers now
statement ok
CALL disable_logging()
statement ok
CALL truncate_duckdb_logs()
statement ok
CALL enable_logging(level='trace', storage='memory', storage_config={'buffer_size': 20*2048});
statement ok
SELECT write_log('hello from the connection log scope', level := 'error', scope := 'connection') from range(0,40*2048);
statement ok
FROM duckdb_logs()
statement ok
CALL enable_logging(level='trace', storage='file', storage_config={'buffer_size': 20*2048, 'path': '__TEST_DIR__/logging_buffer_size'});
statement ok
SELECT write_log('hello from the connection log scope', level := 'error', scope := 'connection') from range(0,40*2048);
# Try direct flushing
statement ok
CALL enable_logging(level='trace', storage='file', storage_config={'buffer_size': 0, 'path': '__TEST_DIR__/logging_buffer_size'});
statement ok
SELECT write_log('hello from the connection log scope', level := 'error', scope := 'connection') from range(0, 2048);
# Try weird buffer size
statement error
CALL enable_logging(level='trace', storage='file', storage_config={'buffer_size': -1, 'path': '__TEST_DIR__/logging_buffer_size'});
----
Invalid Input Error

View File

@@ -0,0 +1,223 @@
# name: test/sql/logging/logging_call_functions.test
# description: Test logging
# group: [logging]
require noforcestorage
# Create test view to get log settings
statement ok
CREATE VIEW log_settings AS select name, value from duckdb_settings() where name in ['logging_level', 'logging_mode', 'logging_storage', 'enable_logging', 'enabled_log_types', 'disabled_log_types'] ORDER BY name
####
# Most simple way to enable logging: turns on default behaviour
###
statement ok
CALL enable_logging();
query II
FROM log_settings;
----
disabled_log_types (empty)
enable_logging 1
enabled_log_types (empty)
logging_level INFO
logging_mode LEVEL_ONLY
logging_storage memory
statement ok
CALL disable_logging()
# Logging now disabled again
query II
FROM log_settings;
----
disabled_log_types (empty)
enable_logging 0
enabled_log_types (empty)
logging_level INFO
logging_mode LEVEL_ONLY
logging_storage memory
####
# Enabling logging at a specific level
###
statement ok
CALL enable_logging(level='DEBUG');
query II
FROM log_settings;
----
disabled_log_types (empty)
enable_logging 1
enabled_log_types (empty)
logging_level DEBUG
logging_mode LEVEL_ONLY
logging_storage memory
# Note: disable_logging resets the log config
statement ok
CALL disable_logging()
# disable logging turns off logging only
query II
FROM log_settings;
----
disabled_log_types (empty)
enable_logging 0
enabled_log_types (empty)
logging_level DEBUG
logging_mode LEVEL_ONLY
logging_storage memory
####
# Enabling logging of a specific type
###
statement ok
CALL enable_logging('QueryLog');
query II
FROM log_settings;
----
disabled_log_types (empty)
enable_logging 1
enabled_log_types QueryLog
logging_level INFO
logging_mode ENABLE_SELECTED
logging_storage memory
####
# Enabling logging of multiple types (note that log level will match lowest level of the types)
###
statement ok
CALL enable_logging(['QueryLog', 'FileSystem']);
# Use sorted list to avoid indeterministic result
query II
SELECT name, list_sort(split(value, ',')) FROM log_settings;
----
disabled_log_types ['']
enable_logging [1]
enabled_log_types [FileSystem, QueryLog]
logging_level [TRACE]
logging_mode [ENABLE_SELECTED]
logging_storage [memory]
# Due to different file locking behaviour, this currently fails on windows
require notwindows
####
# Configuring a different log storage, using the config param to pass arbitrary (extensible!) options
###
statement ok
CALL enable_logging('FileSystem', storage='file', storage_config={'path': '__TEST_DIR__/logging_call_functions_test_log'});
query II
FROM log_settings;
----
disabled_log_types (empty)
enable_logging 1
enabled_log_types FileSystem
logging_level TRACE
logging_mode ENABLE_SELECTED
logging_storage file
####
# Syntactic sugar exists to easily pass some log_storage related settings
###
statement ok
CALL enable_logging(storage='file', storage_path='__TEST_DIR__/logging_call_functions');
statement ok
CALL disable_logging()
####
# For the storage_path setting we can even omit the storage and it will automatically set it to storage='file'
###
statement ok
CALL enable_logging(storage_path='__TEST_DIR__/logging_call_functions');
query II
FROM log_settings;
----
disabled_log_types (empty)
enable_logging 1
enabled_log_types (empty)
logging_level INFO
logging_mode LEVEL_ONLY
logging_storage file
# Reset
statement ok
CALL enable_logging()
####
# Some invalid invocations throw nice errors
###
statement error
CALL enable_logging(hocus_pocus='this is bogus');
----
Binder Error: Invalid named parameter "hocus_pocus" for function enable_logging
# enable_logging is currently not atomic TODO: fix this
query II
FROM log_settings;
----
disabled_log_types (empty)
enable_logging 1
enabled_log_types (empty)
logging_level INFO
logging_mode LEVEL_ONLY
logging_storage memory
# Unknown storage option: this error is thrown by the log storage, because not all log storages implement the same params
statement error
CALL enable_logging(storage='file', level='DEBUG', storage_config={'hocus_pocus': 'this is bogus', 'path': '__TEST_DIR__/hi.csv'});
----
Invalid Input Error: Unrecognized log storage config option for storage: 'FileLogStorage': 'hocus_pocus'
# enable_logging is currently not atomic TODO: fix this
query II
FROM log_settings;
----
disabled_log_types (empty)
enable_logging 1
enabled_log_types (empty)
logging_level DEBUG
logging_mode LEVEL_ONLY
logging_storage file
statement ok
CALL disable_logging();
# The path param does not exist for the memory storage
statement error
CALL enable_logging(storage='memory', storage_config={'path': 'bla'});
----
Invalid Input Error: Unrecognized log storage config option for storage: 'InMemoryLogStorage': 'path'
# storage_config should be a struct
statement error
CALL enable_logging(storage_config='hi')
----
Invalid Input Error: EnableLogging: storage_config must be a struct
# Logging config remains untouched TODO: fix
query II
FROM log_settings;
----
disabled_log_types (empty)
enable_logging 1
enabled_log_types (empty)
logging_level INFO
logging_mode LEVEL_ONLY
logging_storage memory

View File

@@ -0,0 +1,68 @@
# name: test/sql/logging/logging_context_ids.test
# description: Check the connection_id and transaction_id fields
# group: [logging]
require noforcestorage
statement ok con1
CALL enable_logging();
# We use these to offset the ids which don't start at 0 here due to internal queries/transactions that DuckDB performs
statement ok con2
set variable base_transaction_id = current_transaction_id() + 2
statement ok con2
set variable base_query_id = current_query_id() + 1
# Con2 will do use autocommit on a new connection
statement ok con2
SELECT write_log('hey1', log_type := 'test_logging_autocommit')
statement ok con2
SELECT write_log('hey2', log_type := 'test_logging_autocommit')
# We expect transaction_ids 1 & 2 here
query II con2
SELECT
transaction_id - getvariable('base_transaction_id') as relative_transaction_id,
query_id - getvariable('base_query_id') as relative_query_id,
FROM duckdb_logs
WHERE
connection_id=current_connection_id() and
type='test_logging_autocommit';
----
0 0
1 1
# Con3 will do the same, but within a transaction
# Again, we calculate the offsets first
statement ok con3
set variable base_transaction_id = current_transaction_id() + 2
statement ok con3
set variable base_query_id = current_query_id() + 1
statement ok con3
BEGIN TRANSACTION;
statement ok con3
SELECT write_log('hey1', log_type := 'test_logging_transaction')
statement ok con3
SELECT write_log('hey2', log_type := 'test_logging_transaction')
statement ok con3
COMMIT
# Now both queries were performed in the same transaction
query II con3
SELECT
transaction_id - getvariable('base_transaction_id') as relative_transaction_id,
query_id - getvariable('base_query_id') as query_id,
FROM duckdb_logs
WHERE
connection_id=current_connection_id() and
type='test_logging_transaction';
----
0 1
0 2

View File

@@ -0,0 +1,208 @@
# name: test/sql/logging/logging_csv.test
# description: Test csv formatted log storages (stdout & file)
# group: [logging]
require noforcestorage
# Due to different file locking behaviour, this currently fails on windows
require notwindows
# Enable FileSystem logging to single csv file
statement ok
CALL enable_logging(['FileSystem'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_log.csv', 'normalize': false});
# Read some data to trigger FileSystem log
statement ok
FROM "data/csv/big_number.csv"
query IIIIII
DESCRIBE FROM '__TEST_DIR__/logging_csv_log.csv';
----
context_id BIGINT YES NULL NULL NULL
scope VARCHAR YES NULL NULL NULL
connection_id BIGINT YES NULL NULL NULL
transaction_id BIGINT YES NULL NULL NULL
query_id BIGINT YES NULL NULL NULL
thread_id VARCHAR YES NULL NULL NULL
timestamp TIMESTAMP YES NULL NULL NULL
type VARCHAR YES NULL NULL NULL
log_level VARCHAR YES NULL NULL NULL
message VARCHAR YES NULL NULL NULL
# Ensure we can reparse the structured log message from the csv
query III
SELECT
scope,
path: parse_duckdb_log_message('FileSystem', message)['path'],
op: parse_duckdb_log_message('FileSystem', message)['op'],
FROM "__TEST_DIR__/logging_csv_log.csv"
WHERE path = 'data/csv/big_number.csv';
----
CONNECTION data/csv/big_number.csv OPEN
CONNECTION data/csv/big_number.csv READ
CONNECTION data/csv/big_number.csv READ
CONNECTION data/csv/big_number.csv CLOSE
statement ok
CALL disable_logging()
# Truncating the logs will clear the csv log file
statement ok
CALL truncate_duckdb_logs();
query I
select count(*) FROM "__TEST_DIR__/logging_csv_log.csv";
----
0
# Enable FileSystem logging to normalized files
statement ok
CALL enable_logging(['FileSystem'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_logs_normalized', 'normalize': true});
# Read some data to trigger FileSystem log
statement ok
FROM "data/csv/big_number.csv"
# Ensure we can reparse the structured log message from the csv
query III
SELECT
context_id is not null,
path: parse_duckdb_log_message('FileSystem', message)['path'],
op: parse_duckdb_log_message('FileSystem', message)['op'],
FROM "__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_entries.csv"
WHERE path = 'data/csv/big_number.csv';
----
1 data/csv/big_number.csv OPEN
1 data/csv/big_number.csv READ
1 data/csv/big_number.csv READ
1 data/csv/big_number.csv CLOSE
# Contexts are now in a separate csv file
# TODO: is this correct?
query I
SELECT scope
FROM "__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_contexts.csv";
----
CONNECTION
CONNECTION
# Check schema
query IIIIII
DESCRIBE FROM '__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_entries.csv';
----
context_id BIGINT YES NULL NULL NULL
timestamp TIMESTAMP YES NULL NULL NULL
type VARCHAR YES NULL NULL NULL
log_level VARCHAR YES NULL NULL NULL
message VARCHAR YES NULL NULL NULL
# Check schema
query IIIIII
DESCRIBE FROM '__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_contexts.csv';
----
context_id BIGINT YES NULL NULL NULL
scope VARCHAR YES NULL NULL NULL
connection_id BIGINT YES NULL NULL NULL
transaction_id BIGINT YES NULL NULL NULL
query_id BIGINT YES NULL NULL NULL
thread_id VARCHAR YES NULL NULL NULL
statement ok
CALL disable_logging();
# Truncating the logs will clear both csv files
statement ok
CALL truncate_duckdb_logs();
query I
select count(*) FROM "__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_contexts.csv";
----
0
query I
select count(*) FROM "__TEST_DIR__/logging_csv_logs_normalized/duckdb_log_entries.csv";
----
0
statement ok
CALL enable_logging(['FileSystem'], storage='stdout');
# TODO: we can't nicely test logging to stdout without polluting test runner output
statement ok
CALL truncate_duckdb_logs();
statement ok
CALL disable_logging();
# Both stdout and file logging have a buffer size param which controls when the buffered csv data is written out. This is currently hard to test though since
# we flush these buffers after every query anyways
statement ok
CALL enable_logging(['FileSystem'], storage='stdout', storage_config={'buffer_size': 1000000});
statement ok
CALL disable_logging();
# Try some invalid configs ensuring they throw nice errors
statement error
CALL enable_logging(['FileSystem'], storage='stdout', storage_config={'bla': 'bla'});
----
Invalid Input Error: Unrecognized log storage config option for storage: 'StdOutLogStorage': 'bla'
statement error
CALL enable_logging(['FileSystem'], storage='stdout', storage_config={'path': './file.csv'});
----
Invalid Input Error: Unrecognized log storage config option for storage: 'StdOutLogStorage': 'path'
# Switching between normalized and denormalized logs will throw if the log is non-empty
statement ok
CALL truncate_duckdb_logs()
statement ok
CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_log.csv'});
statement ok
SELECT 1;
statement ok
CALL truncate_duckdb_logs()
# statement error
# CALL enable_logging(['QueryLog'], storage='file', storage_config={'entries_path': '__TEST_DIR__/logging_csv_log_entries.csv', 'contexts_path': '__TEST_DIR__/logging_csv_log_contexts.csv'});
# ----
# Invalid Configuration Error: Cannot change between normalized and denormalized with a non-empty log. Please truncate the log first
statement ok
explain FROM duckdb_logs
# This is not allowed
statement error
CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_log.csv', 'normalize': true});
----
Invalid Configuration Error: Can not set path to '
statement error
CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_log.csv', 'normalize': true});
----
' while normalize is true. Normalize will make DuckDB write multiple log files to more efficiently store log entries. Please specify a directory path instead of a csv file path, or set normalize to false.
statement ok
CALL truncate_duckdb_logs();
statement ok
CALL disable_logging();
# Test switching CSV delimiters
statement ok
CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '__TEST_DIR__/logging_csv_log_delim.csv', 'delim': ';'});
statement ok
SELECT 1;
query I
SELECT message FROM read_csv('__TEST_DIR__/logging_csv_log_delim.csv', delim=';');
----
SELECT 1

View File

@@ -0,0 +1,102 @@
# name: test/sql/logging/logging_file_bind_replace.test
# description: Test file-backed log storage with the duckdb_logs() and duckdb_log_contexts() functions
# group: [logging]
require noforcestorage
# Due to different file locking behaviour, this currently fails on windows
require notwindows
# Enable FileSystem logging to single csv file (note that normalize is implicit here)
statement ok
CALL enable_logging(['QueryLog'], storage_path='__TEST_DIR__/logging_file_bind_replace_log.csv');
# Read some data to trigger FileSystem log
statement ok
SELECT 1 as a
statement ok
FROM duckdb_logs_parsed('FileSystem');
statement ok
CALL disable_logging();
query IIIIII nosort describe_duckdb_logs
DESCRIBE FROM duckdb_logs;
----
query I
SELECT message FROM duckdb_logs_parsed('QueryLog') WHERE starts_with(message, 'SELECT 1');
----
SELECT 1 as a
statement ok
CALL truncate_duckdb_logs();
# Enable FileSystem logging to double csv file
statement ok
CALL enable_logging(['QueryLog'], storage='file', storage_config={'path': '__TEST_DIR__/logging_file_bind_replace_normalized/', 'normalize': true});
# Trigger log
statement ok
SELECT 1 as b
statement ok
FROM duckdb_logs
statement ok
CALL disable_logging();
query IIIIII nosort describe_duckdb_logs
DESCRIBE FROM duckdb_logs
----
query I
SELECT message FROM duckdb_logs_parsed('QueryLog') WHERE starts_with(message, 'SELECT 1');
----
SELECT 1 as b
statement ok
CALL truncate_duckdb_logs();
# Now rerun with the in-memory storage to ensure everything matches up
statement ok
CALL enable_logging(['QueryLog'], storage='memory');
statement ok
SELECT 1 as c
statement ok
CALL disable_logging();
query IIIIII nosort describe_duckdb_logs
DESCRIBE FROM duckdb_logs
----
query I
SELECT message FROM duckdb_logs_parsed('QueryLog') WHERE starts_with(message, 'SELECT 1');
----
SELECT 1 as c
statement ok
CALL truncate_duckdb_logs();
# Now rerun with all log types enabled
statement ok
CALL enable_logging(level='trace', storage='memory');
statement ok
SELECT 1 as d
statement ok
CALL disable_logging();
query IIIIII nosort describe_duckdb_logs
DESCRIBE FROM duckdb_logs
----
query I
SELECT message FROM duckdb_logs_parsed('QueryLog') WHERE starts_with(message, 'SELECT 1');
----
SELECT 1 as d

View File

@@ -0,0 +1,37 @@
# name: test/sql/logging/logging_file_persistence.test
# description: Test file backed log storage persistence
# group: [logging]
# Due to different file locking behaviour, this currently fails on windows
require notwindows
# load the DB from disk
load __TEST_DIR__/logging_file_persistence.test
# Enable FileSystem logging to single csv file
statement ok
CALL enable_logging('FileSystem', storage='file', storage_config={'path': '__TEST_DIR__/logging_file_persistence.csv'});
# Read some data to trigger FileSystem log
statement ok
FROM "data/csv/big_number.csv"
statement ok
CALL disable_logging();
query IIIIIIIII nosort log_output
FROM duckdb_logs;
----
restart
# TODO: configuring log storage config is clunky now
statement ok
CALL enable_logging('FileSystem', storage='file', storage_config={'path': '__TEST_DIR__/logging_file_persistence.csv'});
statement ok
CALL disable_logging();
query IIIIIIIII nosort log_output
FROM duckdb_logs;
----

View File

@@ -0,0 +1,42 @@
# name: test/sql/logging/logging_types.test
# group: [logging]
require noforcestorage
statement ok
CALL enable_logging('FileSystem');
statement ok
copy (select 1 as a) to '__TEST_DIR__/par.csv'
statement ok
FROM '__TEST_DIR__/par.csv'
query IIIIIIII
SELECT type, log_level, scope, fs, parse_filename(path), op, bytes, pos FROM duckdb_logs_parsed('FileSystem') ORDER BY timestamp
----
FileSystem TRACE CONNECTION LocalFileSystem par.csv OPEN NULL NULL
FileSystem TRACE CONNECTION LocalFileSystem par.csv WRITE 4 0
FileSystem TRACE CONNECTION LocalFileSystem par.csv CLOSE NULL NULL
FileSystem TRACE CONNECTION LocalFileSystem par.csv OPEN NULL NULL
FileSystem TRACE CONNECTION LocalFileSystem par.csv READ 4 0
FileSystem TRACE CONNECTION LocalFileSystem par.csv READ 0 4
FileSystem TRACE CONNECTION LocalFileSystem par.csv CLOSE NULL NULL
statement ok
CALL truncate_duckdb_logs()
statement ok
CALL enable_logging('QueryLog');
statement ok
SELECT 'hallo' as value;
query IIII
SELECT type, log_level, scope, message FROM duckdb_logs_parsed('QueryLog') ORDER BY timestamp
----
QueryLog INFO CONNECTION SELECT 'hallo' as value;
statement ok
SELECT type, log_level, scope, message FROM duckdb_logs WHERE type='QueryLog';

View File

@@ -0,0 +1,66 @@
# name: test/sql/logging/physical_operator_logging.test_slow
# description: Test physical operator logging
# group: [logging]
require parquet
statement ok
CALL enable_logging('PhysicalOperator')
# force external so we can see the external log message being added
statement ok
set debug_force_external=true
statement ok
set threads=1
# trigger external join, order result by i so we have good and predictable zone maps
statement ok
copy (
select t1.i
from range(3_000_000) t1(i)
join range(3_000_000) t2(i)
using (i)
order by i
) to '__TEST_DIR__/physical_operator_logging.parquet'
# this is for row group read/skip logging
statement ok
from '__TEST_DIR__/physical_operator_logging.parquet' where i = 42
statement ok
CALL disable_logging()
# enabling external hash join happens exactly once
query I
select count(*) from duckdb_logs_parsed('PhysicalOperator') where class = 'PhysicalHashJoin' and event = 'Finalize' and info.external
----
1
# 4 radix bits, external hash join, should build exactly 16 times
query I
select count(*) from duckdb_logs_parsed('PhysicalOperator') where class = 'JoinHashTable' and event = 'Build'
----
16
# all flushed row groups should be logged, these should be equal
query I
select count(*) = (
select count(distinct row_group_id)
from parquet_metadata('__TEST_DIR__/physical_operator_logging.parquet')
)
from duckdb_logs_parsed('PhysicalOperator') where class = 'ParquetWriter' and event = 'FlushRowGroup'
----
1
# there are many row groups, but only one of them contains the value 42, so we should only have 1 of these
query I
select count(*) from duckdb_logs_parsed('PhysicalOperator') where class = 'ParquetReader' and event = 'ReadRowGroup'
----
1
# all other row groups are skipped
query I
select count(*) from duckdb_logs_parsed('PhysicalOperator') where class = 'ParquetReader' and event = 'SkipRowGroup'
----
24

View File

@@ -0,0 +1,73 @@
# name: test/sql/logging/test_logging_function.test
# description: Use test_logging function to test logging
# group: [logging]
require noforcestorage
query IIIIIIIIII
from duckdb_logs
----
statement error
PRAGMA enable_logging;
----
Pragma Function with name enable_logging does not exist, but a table function with the same name exists, try
statement ok
CALL enable_logging();
statement ok
set logging_level='info';
# We use these to offset the ids which don't start at 0 here due to internal queries/transactions that DuckDB performs
statement ok
set variable base_connection_id = current_connection_id()
statement ok
set variable base_transaction_id = current_transaction_id() + 2
statement ok
set variable base_query_id = current_query_id() + 1
statement ok
SELECT write_log('hello from the global log scope', level := 'info', scope := 'database', log_type := 'global_type' ) from range(0,3);
statement ok
SELECT write_log('hello from the connection log scope', level := 'info', scope := 'connection', log_type := 'client_context_type' ) from range(0,3);
statement ok
SELECT write_log('hello from the file_opener log scope', level := 'info', scope := 'file_opener', log_type := 'opener_type' ) from range(0,3);
statement ok
from duckdb_logs
query IIII
SELECT * EXCLUDE (context_id, timestamp, connection_id, transaction_id, query_id, thread_id) FROM duckdb_logs where starts_with(message, 'hello from the') order by query_id
----
CONNECTION client_context_type INFO hello from the connection log scope
CONNECTION client_context_type INFO hello from the connection log scope
CONNECTION client_context_type INFO hello from the connection log scope
CONNECTION opener_type INFO hello from the file_opener log scope
CONNECTION opener_type INFO hello from the file_opener log scope
CONNECTION opener_type INFO hello from the file_opener log scope
DATABASE global_type INFO hello from the global log scope
DATABASE global_type INFO hello from the global log scope
DATABASE global_type INFO hello from the global log scope
query IIII
SELECT
type,
connection_id - getvariable('base_connection_id'),
transaction_id - getvariable('base_transaction_id'),
query_id - getvariable('base_query_id')
FROM duckdb_logs where starts_with(message, 'hello from the') order by query_id
----
client_context_type 0 1 1
client_context_type 0 1 1
client_context_type 0 1 1
opener_type 0 2 2
opener_type 0 2 2
opener_type 0 2 2
global_type NULL NULL NULL
global_type NULL NULL NULL
global_type NULL NULL NULL

View File

@@ -0,0 +1,43 @@
# name: test/sql/logging/test_logging_function_large.test_slow
# description: Use test_logging function with 1 million log entries
# group: [logging]
require noforcestorage
query IIIIIIIIII
from duckdb_logs
----
statement ok
CALL enable_logging();
statement ok
set logging_level='debug';
loop i 0 100
statement ok
SELECT write_log('hi_global', level := 'info', scope := 'database') from range(0,2500);
statement ok
SELECT write_log('hi_client', level := 'info', scope := 'connection') from range(0,2500);
statement ok
SELECT write_log('hi_file', level := 'info', scope := 'file_opener') from range(0,2500);
endloop
query II
SELECT count(*), message FROM duckdb_logs where starts_with(message, 'hi_') group by message order by message
----
250000 hi_client
250000 hi_file
250000 hi_global
statement ok
CALL truncate_duckdb_logs();
query I
SELECT count(*) FROM duckdb_logs;
----
0