should be it
This commit is contained in:
42
external/duckdb/.github/patches/extensions/README.md
vendored
Normal file
42
external/duckdb/.github/patches/extensions/README.md
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
# Extension patches
|
||||
Patches in this directory are used to smoothen the process of introducing changes to DuckDB that break compatibility with an
|
||||
out-of-tree extension. Extensions installed from git urls can automatically apply patches found in this directory. The APPLY_PATCHES flag
|
||||
should be used to explicitly enable this feature. For example,
|
||||
lets say our extension config looks like this:
|
||||
|
||||
```shell
|
||||
duckdb_extension_load(spatial
|
||||
DONT_LINK
|
||||
GIT_URL https://github.com/duckdb/duckdb_spatial
|
||||
GIT_TAG f577b9441793f9170403e489f5d3587e023a945f
|
||||
APPLY_PATCHES
|
||||
)
|
||||
```
|
||||
In this example, upon downloading the spatial extension, all patches in the `.github/patches/extensions/spatial/*.patch`
|
||||
will be automatically applied.
|
||||
|
||||
Note that the reason for having the APPLY_PATCHES flag explicitly enabled is to make it easier for developers reading
|
||||
the extension config to detect a patch is present. For this reason, the patching mechanism will actually fail if `APPLY_PATCHES`
|
||||
is set with no patches in `.github/patches/extensions/<ext>/*.patch`.
|
||||
|
||||
# Workflow
|
||||
Imagine a change to DuckDB is introduced that breaks compatibility with extension X. The
|
||||
workflow for this is as follows:
|
||||
|
||||
### PR #1: breaking change to DuckDB
|
||||
- Commit breaking change to DuckDB
|
||||
- Fix breakage in extension X, producing a patch with fix (be wary of already existing patches)
|
||||
- Commit patch in `.github/patches/extensions/x/*.patch` using a descriptive name
|
||||
- enable APPLY_PATCHES for extension X in `.github/config/out_of_tree_extensions.cmake` (if not already enabled)
|
||||
|
||||
### PR #2: patch to extension X
|
||||
- Apply (all) the patch(es) in `.github/patches/extensions/x/*.patch` to extension X.
|
||||
|
||||
### PR #3: update extension X in DuckDB
|
||||
- Remove patches in `.github/patches/extensions/x/*.patch`
|
||||
- Remove `APPLY_PATCHES` flag from config
|
||||
- Update hash of extension in config
|
||||
|
||||
|
||||
|
||||
|
||||
13
external/duckdb/.github/patches/extensions/avro/fix.patch
vendored
Normal file
13
external/duckdb/.github/patches/extensions/avro/fix.patch
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
diff --git a/src/field_ids.cpp b/src/field_ids.cpp
|
||||
index d197f8d..52fb48c 100644
|
||||
--- a/src/field_ids.cpp
|
||||
+++ b/src/field_ids.cpp
|
||||
@@ -5,6 +5,8 @@ namespace duckdb {
|
||||
|
||||
namespace avro {
|
||||
|
||||
+constexpr const char *FieldID::DUCKDB_FIELD_ID;
|
||||
+
|
||||
FieldID::FieldID() : set(false) {
|
||||
}
|
||||
|
||||
12
external/duckdb/.github/patches/extensions/delta/remove_include.patch
vendored
Normal file
12
external/duckdb/.github/patches/extensions/delta/remove_include.patch
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
diff --git a/src/storage/delta_insert.cpp b/src/storage/delta_insert.cpp
|
||||
index 93ebf9f..8eea9f5 100644
|
||||
--- a/src/storage/delta_insert.cpp
|
||||
+++ b/src/storage/delta_insert.cpp
|
||||
@@ -1,7 +1,5 @@
|
||||
#include "storage/delta_insert.hpp"
|
||||
|
||||
-#include <duckdb/common/sort/partition_state.hpp>
|
||||
-
|
||||
#include "duckdb/catalog/catalog_entry/copy_function_catalog_entry.hpp"
|
||||
#include "duckdb/main/client_data.hpp"
|
||||
#include "duckdb/planner/operator/logical_copy_to_file.hpp"
|
||||
38
external/duckdb/.github/patches/extensions/httpfs/fix.patch
vendored
Normal file
38
external/duckdb/.github/patches/extensions/httpfs/fix.patch
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
diff --git a/src/s3fs.cpp b/src/s3fs.cpp
|
||||
index 72eddc3..601ecba 100644
|
||||
--- a/src/s3fs.cpp
|
||||
+++ b/src/s3fs.cpp
|
||||
@@ -895,7 +895,7 @@ void S3FileHandle::Initialize(optional_ptr<FileOpener> opener) {
|
||||
correct_region = new_region->second;
|
||||
}
|
||||
auto extra_text = S3FileSystem::GetS3BadRequestError(auth_params, correct_region);
|
||||
- throw Exception(error.Type(), error.RawMessage() + extra_text, extra_info);
|
||||
+ throw Exception(extra_info, error.Type(), error.RawMessage() + extra_text);
|
||||
}
|
||||
if (entry->second == "403") {
|
||||
// 403: FORBIDDEN
|
||||
@@ -905,7 +905,7 @@ void S3FileHandle::Initialize(optional_ptr<FileOpener> opener) {
|
||||
} else {
|
||||
extra_text = S3FileSystem::GetS3AuthError(auth_params);
|
||||
}
|
||||
- throw Exception(error.Type(), error.RawMessage() + extra_text, extra_info);
|
||||
+ throw Exception(extra_info, error.Type(), error.RawMessage() + extra_text);
|
||||
}
|
||||
}
|
||||
throw;
|
||||
@@ -941,13 +941,13 @@ bool S3FileSystem::CanHandleFile(const string &fpath) {
|
||||
void S3FileSystem::RemoveFile(const string &path, optional_ptr<FileOpener> opener) {
|
||||
auto handle = OpenFile(path, FileFlags::FILE_FLAGS_NULL_IF_NOT_EXISTS, opener);
|
||||
if (!handle) {
|
||||
- throw IOException("Could not remove file \"%s\": %s", {{"errno", "404"}}, path, "No such file or directory");
|
||||
+ throw IOException({{"errno", "404"}}, "Could not remove file \"%s\": %s", path, "No such file or directory");
|
||||
}
|
||||
|
||||
auto &s3fh = handle->Cast<S3FileHandle>();
|
||||
auto res = DeleteRequest(*handle, s3fh.path, {});
|
||||
if (res->status != HTTPStatusCode::OK_200 && res->status != HTTPStatusCode::NoContent_204) {
|
||||
- throw IOException("Could not remove file \"%s\": %s", {{"errno", to_string(static_cast<int>(res->status))}},
|
||||
+ throw IOException({{"errno", to_string(static_cast<int>(res->status))}}, "Could not remove file \"%s\": %s",
|
||||
path, res->GetError());
|
||||
}
|
||||
}
|
||||
12
external/duckdb/.github/patches/extensions/iceberg/remove_include.patch
vendored
Normal file
12
external/duckdb/.github/patches/extensions/iceberg/remove_include.patch
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
diff --git a/src/storage/iceberg_insert.cpp b/src/storage/iceberg_insert.cpp
|
||||
index aa2371e8..cccc82d6 100644
|
||||
--- a/src/storage/iceberg_insert.cpp
|
||||
+++ b/src/storage/iceberg_insert.cpp
|
||||
@@ -7,7 +7,6 @@
|
||||
|
||||
#include "iceberg_multi_file_list.hpp"
|
||||
|
||||
-#include "duckdb/common/sort/partition_state.hpp"
|
||||
#include "duckdb/catalog/catalog_entry/copy_function_catalog_entry.hpp"
|
||||
#include "duckdb/main/client_data.hpp"
|
||||
#include "duckdb/planner/operator/logical_copy_to_file.hpp"
|
||||
19
external/duckdb/.github/patches/extensions/inet/hugeint_fixes.patch
vendored
Normal file
19
external/duckdb/.github/patches/extensions/inet/hugeint_fixes.patch
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
diff --git a/src/inet_functions.cpp b/src/inet_functions.cpp
|
||||
index da92a4c..afa7446 100644
|
||||
--- a/src/inet_functions.cpp
|
||||
+++ b/src/inet_functions.cpp
|
||||
@@ -185,11 +185,12 @@ static INET_TYPE AddImplementation(INET_TYPE ip, hugeint_t val) {
|
||||
if (val > 0) {
|
||||
address_out =
|
||||
AddOperatorOverflowCheck::Operation<uhugeint_t, uhugeint_t, uhugeint_t>(
|
||||
- address_in, val);
|
||||
+ address_in, (uhugeint_t)val);
|
||||
} else {
|
||||
+ // TODO: this is off for when val is the minimal uhugeint_t value
|
||||
address_out =
|
||||
SubtractOperatorOverflowCheck::Operation<uhugeint_t, uhugeint_t,
|
||||
- uhugeint_t>(address_in, -val);
|
||||
+ uhugeint_t>(address_in, (uhugeint_t)(-val));
|
||||
}
|
||||
|
||||
if (addr_type == IPAddressType::IP_ADDRESS_V4 &&
|
||||
16
external/duckdb/.github/patches/extensions/spatial/fix.patch
vendored
Normal file
16
external/duckdb/.github/patches/extensions/spatial/fix.patch
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
diff --git a/src/spatial/modules/main/spatial_functions_scalar.cpp b/src/spatial/modules/main/spatial_functions_scalar.cpp
|
||||
index 60ca7373ce..a44cfc7a82 100644
|
||||
--- a/src/spatial/modules/main/spatial_functions_scalar.cpp
|
||||
+++ b/src/spatial/modules/main/spatial_functions_scalar.cpp
|
||||
@@ -9243,6 +9243,11 @@ struct ST_MMin : VertexAggFunctionBase<ST_MMin, VertexMinAggOp> {
|
||||
static constexpr auto ORDINATE = VertexOrdinate::M;
|
||||
};
|
||||
|
||||
+constexpr const char * ST_M::NAME;
|
||||
+constexpr const char * ST_X::NAME;
|
||||
+constexpr const char * ST_Y::NAME;
|
||||
+constexpr const char * ST_Z::NAME;
|
||||
+
|
||||
} // namespace
|
||||
|
||||
// Helper to access the constant distance from the bind data
|
||||
44
external/duckdb/.github/patches/extensions/sqlsmith/fix.patch
vendored
Normal file
44
external/duckdb/.github/patches/extensions/sqlsmith/fix.patch
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
diff --git a/src/statement_generator.cpp b/src/statement_generator.cpp
|
||||
index fc34c7c..5defc4e 100644
|
||||
--- a/src/statement_generator.cpp
|
||||
+++ b/src/statement_generator.cpp
|
||||
@@ -373,8 +373,9 @@ unique_ptr<QueryNode> StatementGenerator::GenerateQueryNode() {
|
||||
GenerateCTEs(*setop);
|
||||
setop->setop_type = Choose<SetOperationType>({SetOperationType::EXCEPT, SetOperationType::INTERSECT,
|
||||
SetOperationType::UNION, SetOperationType::UNION_BY_NAME});
|
||||
- setop->left = GenerateQueryNode();
|
||||
- setop->right = GenerateQueryNode();
|
||||
+ for(idx_t i = 0; i < 2; i++) {
|
||||
+ setop->children.push_back(GenerateQueryNode());
|
||||
+ }
|
||||
switch (setop->setop_type) {
|
||||
case SetOperationType::EXCEPT:
|
||||
case SetOperationType::INTERSECT:
|
||||
diff --git a/src/statement_simplifier.cpp b/src/statement_simplifier.cpp
|
||||
index 2cd7f06..4602928 100644
|
||||
--- a/src/statement_simplifier.cpp
|
||||
+++ b/src/statement_simplifier.cpp
|
||||
@@ -196,8 +196,9 @@ void StatementSimplifier::Simplify(SelectNode &node) {
|
||||
}
|
||||
|
||||
void StatementSimplifier::Simplify(SetOperationNode &node) {
|
||||
- Simplify(node.left);
|
||||
- Simplify(node.right);
|
||||
+ for(auto &child : node.children) {
|
||||
+ Simplify(child);
|
||||
+ }
|
||||
}
|
||||
|
||||
void StatementSimplifier::Simplify(CommonTableExpressionMap &cte) {
|
||||
@@ -218,8 +219,9 @@ void StatementSimplifier::Simplify(unique_ptr<QueryNode> &node) {
|
||||
break;
|
||||
case QueryNodeType::SET_OPERATION_NODE: {
|
||||
auto &setop = node->Cast<SetOperationNode>();
|
||||
- SimplifyReplace(node, setop.left);
|
||||
- SimplifyReplace(node, setop.right);
|
||||
+ for(auto &child : setop.children) {
|
||||
+ SimplifyReplace(node, child);
|
||||
+ }
|
||||
Simplify(setop);
|
||||
break;
|
||||
}
|
||||
Reference in New Issue
Block a user