This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new f04faf08e00 Fix some store errors
f04faf08e00 is described below
commit f04faf08e00d5b49a18934b650d30af1db1595af
Author: Jinbao Chen <[email protected]>
AuthorDate: Fri Nov 7 11:00:13 2025 +0800
Fix some store errors
---
src/backend/catalog/storage.c | 35 ++++++-----
src/backend/optimizer/prep/preptlist.c | 96 +++++++++++++-----------------
src/include/parser/parse_coerce.h | 3 +
src/test/regress/expected/create_index.out | 73 -----------------------
src/test/regress/expected/create_misc.out | 49 ---------------
src/test/regress/serial_schedule | 14 ++---
src/test/regress/sql/create_index.sql | 16 -----
src/test/regress/sql/create_misc.sql | 10 ----
8 files changed, 70 insertions(+), 226 deletions(-)
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index 2a9b82d884b..1c23dac5a78 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -386,19 +386,20 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
* to-be-truncated blocks might still exist on disk but have older
* contents than expected, which can cause replay to fail. It's OK for
the
* blocks to not exist on disk at all, but not for them to have the
wrong
- * contents.
- */
- Assert(!MyProc->delayChkptEnd);
- MyProc->delayChkptEnd = true;
-
- /*
- * We WAL-log the truncation before actually truncating, which means
- * trouble if the truncation fails. If we then crash, the WAL replay
- * likely isn't going to succeed in the truncation either, and cause a
- * PANIC. It's tempting to put a critical section here, but that cure
- * would be worse than the disease. It would turn a usually harmless
- * failure to truncate, that might spell trouble at WAL replay, into a
- * certain PANIC.
+ * contents. For this reason, we need to set DELAY_CHKPT_COMPLETE while
+ * this code executes.
+ *
+ * Second, the call to smgrtruncate() below will in turn call
+ * RegisterSyncRequest(). We need the sync request created by that call
to
+ * be processed before the checkpoint completes. CheckPointGuts() will
+ * call ProcessSyncRequests(), but if we register our sync request after
+ * that happens, then the WAL record for the truncation could end up
+ * preceding the checkpoint record, while the actual sync doesn't happen
+ * until the next checkpoint. To prevent that, we need to set
+ * DELAY_CHKPT_START here. That way, if the XLOG_SMGR_TRUNCATE precedes
+ * the redo pointer of a concurrent checkpoint, we're guaranteed that
the
+ * corresponding sync request will be processed before the checkpoint
+ * completes.
*/
Assert((MyProc->delayChkptFlags & (DELAY_CHKPT_START |
DELAY_CHKPT_COMPLETE)) == 0);
MyProc->delayChkptFlags |= DELAY_CHKPT_START | DELAY_CHKPT_COMPLETE;
@@ -416,6 +417,8 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
*
* (See also pg_visibilitymap.c if changing this code.)
*/
+ START_CRIT_SECTION();
+
if (RelationNeedsWAL(rel))
{
/*
@@ -450,10 +453,12 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
* longer exist after truncation is complete, and then truncate the
* corresponding files on disk.
*/
- smgrtruncate(rel->rd_smgr, forks, nforks, blocks);
+ smgrtruncate2(RelationGetSmgr(rel), forks, nforks, old_blocks, blocks);
+
+ END_CRIT_SECTION();
/* We've done all the critical work, so checkpoints are OK now. */
- MyProc->delayChkptEnd = false;
+ MyProc->delayChkptFlags &= ~(DELAY_CHKPT_START | DELAY_CHKPT_COMPLETE);
/*
* Update upper-level FSM pages to account for the truncation. This is
diff --git a/src/backend/optimizer/prep/preptlist.c
b/src/backend/optimizer/prep/preptlist.c
index 321d6b202aa..d2a87ae218b 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -458,40 +458,9 @@ expand_insert_targetlist(PlannerInfo *root, List *tlist,
Relation rel, Index spl
* Again, code comparing the finished plan to the
target relation
* must account for this.
*/
- Oid atttype = att_tup->atttypid;
- Oid attcollation =
att_tup->attcollation;
Node *new_expr;
- if (!att_tup->attisdropped)
- {
- if (split_update_result_relation)
- {
- new_expr = (Node *)
makeVar(split_update_result_relation,
-
attrno,
-
atttype,
-
att_tup->atttypmod,
-
attcollation,
-
0);
- }
- else
- {
- new_expr = (Node *) makeConst(atttype,
-
-1,
-
attcollation,
-
att_tup->attlen,
-
(Datum) 0,
-
true, /* isnull */
-
att_tup->attbyval);
- new_expr = coerce_to_domain(new_expr,
-
InvalidOid, -1,
-
atttype,
-
COERCION_IMPLICIT,
-
COERCE_IMPLICIT_CAST,
-
-1,
-
false);
- }
- }
- else if (att_tup->attgenerated)
+ if (att_tup->attisdropped)
{
/* Insert NULL for dropped column */
new_expr = (Node *) makeConst(INT4OID,
@@ -502,32 +471,47 @@ expand_insert_targetlist(PlannerInfo *root, List *tlist,
Relation rel, Index spl
true, /* isnull */
true /* byval */ );
}
- else if (att_tup->attgenerated)
- {
- /* Generated column, insert a NULL of the base
type */
- Oid baseTypeId =
att_tup->atttypid;
- int32 baseTypeMod =
att_tup->atttypmod;
-
- baseTypeId = getBaseTypeAndTypmod(baseTypeId,
&baseTypeMod);
- new_expr = (Node *) makeConst(baseTypeId,
-
baseTypeMod,
-
att_tup->attcollation,
-
att_tup->attlen,
-
(Datum) 0,
-
true, /* isnull */
-
att_tup->attbyval);
- }
else
{
- /* Normal column, insert a NULL of the column
datatype */
- new_expr =
coerce_null_to_domain(att_tup->atttypid,
-
att_tup->atttypmod,
-
att_tup->attcollation,
-
att_tup->attlen,
-
att_tup->attbyval);
- /* Must run expression preprocessing on any
non-const nodes */
- if (!IsA(new_expr, Const))
- new_expr = eval_const_expressions(root,
new_expr);
+ if (split_update_result_relation)
+ {
+ new_expr = (Node *)
makeVar(split_update_result_relation,
+
attrno,
+
att_tup->atttypid,
+
att_tup->atttypmod,
+
att_tup->attcollation,
+
0);
+ }
+ else
+ {
+ if (att_tup->attgenerated)
+ {
+ /* Generated column, insert a
NULL of the base type */
+ Oid
baseTypeId = att_tup->atttypid;
+ int32 baseTypeMod =
att_tup->atttypmod;
+
+ baseTypeId =
getBaseTypeAndTypmod(baseTypeId, &baseTypeMod);
+ new_expr = (Node *)
makeConst(baseTypeId,
+
baseTypeMod,
+
att_tup->attcollation,
+
att_tup->attlen,
+
(Datum) 0,
+
true, /* isnull */
+
att_tup->attbyval);
+ }
+ else
+ {
+ /* Normal column, insert a NULL
of the column datatype */
+ new_expr =
coerce_null_to_domain(att_tup->atttypid,
+
att_tup->atttypmod,
+
att_tup->attcollation,
+
att_tup->attlen,
+
att_tup->attbyval);
+ /* Must run expression
preprocessing on any non-const nodes */
+ if (!IsA(new_expr, Const))
+ new_expr =
eval_const_expressions(root, new_expr);
+ }
+ }
}
new_tle = makeTargetEntry((Expr *) new_expr,
diff --git a/src/include/parser/parse_coerce.h
b/src/include/parser/parse_coerce.h
index 29b21e61e5a..3cf6b349f69 100644
--- a/src/include/parser/parse_coerce.h
+++ b/src/include/parser/parse_coerce.h
@@ -67,6 +67,9 @@ extern Node *coerce_to_specific_type_typmod(ParseState
*pstate, Node *node,
Oid targetTypeId, int32 targetTypmod,
const char *constructName);
+extern Node *coerce_null_to_domain(Oid typid, int32 typmod, Oid collation,
+ int typlen,
bool typbyval);
+
extern void parser_coercion_errposition(ParseState *pstate,
int coerce_location,
Node *input_expr);
diff --git a/src/test/regress/expected/create_index.out
b/src/test/regress/expected/create_index.out
index 2c9b13729af..78771f3e715 100644
--- a/src/test/regress/expected/create_index.out
+++ b/src/test/regress/expected/create_index.out
@@ -305,54 +305,6 @@ SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL;
(1 row)
EXPLAIN (COSTS OFF)
-<<<<<<< HEAD
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
- ORDER BY (poly_center(f1))[0];
- QUERY PLAN
------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3)
- Merge Key: ((poly_center(f1))[0])
- -> Sort
- Sort Key: ((poly_center(f1))[0])
- -> Index Scan using gpolygonind on polygon_tbl
- Index Cond: (f1 @> '((1,1),(2,2),(2,1))'::polygon)
- Optimizer: Postgres query optimizer
-(7 rows)
-
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
- ORDER BY (poly_center(f1))[0];
- f1
----------------------
- ((2,0),(2,4),(0,0))
-(1 row)
-
-EXPLAIN (COSTS OFF)
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
- ORDER BY area(f1);
- QUERY PLAN
---------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3)
- Merge Key: (area(f1))
- -> Sort
- Sort Key: (area(f1))
- -> Index Scan using gcircleind on circle_tbl
- Index Cond: (f1 && '<(1,-2),1>'::circle)
- Optimizer: Postgres query optimizer
-(7 rows)
-
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
- ORDER BY area(f1);
- f1
----------------
- <(1,2),3>
- <(1,3),5>
- <(1,2),100>
- <(100,1),115>
-(4 rows)
-
-EXPLAIN (COSTS OFF)
-=======
->>>>>>> REL_16_9
SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon;
QUERY PLAN
------------------------------------------------------------------------
@@ -697,7 +649,6 @@ SELECT circle_center(f1), round(radius(f1)) as radius FROM
gcircle_tbl ORDER BY
EXPLAIN (COSTS OFF)
SELECT point(x,x), (SELECT f1 FROM gpolygon_tbl ORDER BY f1 <-> point(x,x)
LIMIT 1) as c FROM generate_series(0,10,1) x;
-<<<<<<< HEAD
QUERY PLAN
-----------------------------------------------------------------------------------------------------------
Function Scan on generate_series x
@@ -711,16 +662,6 @@ SELECT point(x,x), (SELECT f1 FROM gpolygon_tbl ORDER BY
f1 <-> point(x,x) LIMIT
-> Seq Scan on gpolygon_tbl
Optimizer: Postgres query optimizer
(10 rows)
-=======
- QUERY PLAN
---------------------------------------------------------------------------------------------
- Function Scan on generate_series x
- SubPlan 1
- -> Limit
- -> Index Scan using ggpolygonind on gpolygon_tbl
- Order By: (f1 <-> point((x.x)::double precision,
(x.x)::double precision))
-(5 rows)
->>>>>>> REL_16_9
SELECT point(x,x), (SELECT f1 FROM gpolygon_tbl ORDER BY f1 <-> point(x,x)
LIMIT 1) as c FROM generate_series(0,10,1) x;
point | c
@@ -1512,16 +1453,6 @@ ALTER TABLE covering_index_heap ADD CONSTRAINT
covering_pkey PRIMARY KEY USING I
covering_pkey;
DROP TABLE covering_index_heap;
--
-<<<<<<< HEAD
--- Also try building functional, expressional, and partial indexes on
--- tables that already contain data.
---
-create index hash_f8_index_1 on hash_f8_heap(abs(random));
-create index hash_f8_index_2 on hash_f8_heap((seqno + 1), random);
-create index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000;
---
-=======
->>>>>>> REL_16_9
-- Try some concurrent index builds
--
-- Unfortunately this only tests about half the code paths because there are
@@ -2427,11 +2358,7 @@ CREATE INDEX concur_appclass_ind on concur_appclass_tab
USING gist (i tsvector_ops (siglen='1000'), j tsvector_ops (siglen='500'));
CREATE INDEX concur_appclass_ind_2 on concur_appclass_tab
USING gist (k tsvector_ops (siglen='300'), j tsvector_ops);
-<<<<<<< HEAD
REINDEX TABLE concur_appclass_tab;
-=======
-REINDEX TABLE CONCURRENTLY concur_appclass_tab;
->>>>>>> REL_16_9
\d concur_appclass_tab
Table "public.concur_appclass_tab"
Column | Type | Collation | Nullable | Default
diff --git a/src/test/regress/expected/create_misc.out
b/src/test/regress/expected/create_misc.out
index 6098825179d..5b46ee5f1c6 100644
--- a/src/test/regress/expected/create_misc.out
+++ b/src/test/regress/expected/create_misc.out
@@ -2,54 +2,6 @@
-- CREATE_MISC
--
--
-<<<<<<< HEAD
-INSERT INTO tenk2 SELECT * FROM tenk1;
-ANALYZE tenk2;
-CREATE TABLE onek2 AS SELECT * FROM onek;
-INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000;
-ANALYZE fast_emp4000;
-SELECT *
- INTO TABLE Bprime
- FROM tenk1
- WHERE unique2 < 1000;
-INSERT INTO hobbies_r (name, person)
- SELECT 'posthacking', p.name
- FROM person* p
- WHERE p.name = 'mike' or p.name = 'jeff';
-INSERT INTO hobbies_r (name, person)
- SELECT 'basketball', p.name
- FROM person p
- WHERE p.name = 'joe' or p.name = 'sally';
-INSERT INTO hobbies_r (name) VALUES ('skywalking');
-INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking');
-INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking');
-INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball');
-INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking');
-INSERT INTO city VALUES
-('Podunk', '(1,2),(3,4)', '100,127,1000'),
-('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789');
-TABLE city;
- name | location | budget
---------+----------------------+-----------------------
- Podunk | (3,4),(1,2) | 100,127,1000,0
- Gotham | (1100,334),(1000,34) | 123456,127,-1000,6789
-(2 rows)
-
-SELECT *
- INTO TABLE ramp
- FROM road
- WHERE name ~ '.*Ramp';
-INSERT INTO ihighway
- SELECT *
- FROM road
- WHERE name ~ 'I- .*';
-INSERT INTO shighway
- SELECT *
- FROM road
- WHERE name ~ 'State Hwy.*';
-UPDATE shighway
- SET surface = 'asphalt';
-=======
-- a is the type root
-- b and c inherit from a (one-level single inheritance)
-- d inherits from b and c (two-level multiple inheritance)
@@ -77,7 +29,6 @@ CREATE TABLE e_star (
CREATE TABLE f_star (
f polygon
) INHERITS (e_star);
->>>>>>> REL_16_9
INSERT INTO a_star (class, a) VALUES ('a', 1);
INSERT INTO a_star (class, a) VALUES ('a', 2);
INSERT INTO a_star (class) VALUES ('a');
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index c10ea775bd7..7b992b969e9 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -59,13 +59,13 @@ test: create_table
# test: create_function_2
test: copy
test: copyselect
-# test: copydml
-# test: insert
-# test: insert_conflict
-# test: create_misc
-# test: create_operator
-# test: create_procedure
-# test: create_index
+test: copydml
+test: insert
+test: insert_conflict
+test: create_misc
+test: create_operator
+test: create_procedure
+test: create_index
# test: create_index_spgist
# test: create_view
# test: index_including
diff --git a/src/test/regress/sql/create_index.sql
b/src/test/regress/sql/create_index.sql
index bb9416a6352..31af2d85113 100644
--- a/src/test/regress/sql/create_index.sql
+++ b/src/test/regress/sql/create_index.sql
@@ -495,18 +495,6 @@ ALTER TABLE covering_index_heap ADD CONSTRAINT
covering_pkey PRIMARY KEY USING I
covering_pkey;
DROP TABLE covering_index_heap;
-<<<<<<< HEAD
-
---
--- Also try building functional, expressional, and partial indexes on
--- tables that already contain data.
---
-create index hash_f8_index_1 on hash_f8_heap(abs(random));
-create index hash_f8_index_2 on hash_f8_heap((seqno + 1), random);
-create index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000;
-
-=======
->>>>>>> REL_16_9
--
-- Try some concurrent index builds
--
@@ -977,11 +965,7 @@ CREATE INDEX concur_appclass_ind on concur_appclass_tab
USING gist (i tsvector_ops (siglen='1000'), j tsvector_ops (siglen='500'));
CREATE INDEX concur_appclass_ind_2 on concur_appclass_tab
USING gist (k tsvector_ops (siglen='300'), j tsvector_ops);
-<<<<<<< HEAD
REINDEX TABLE concur_appclass_tab;
-=======
-REINDEX TABLE CONCURRENTLY concur_appclass_tab;
->>>>>>> REL_16_9
\d concur_appclass_tab
DROP TABLE concur_appclass_tab;
diff --git a/src/test/regress/sql/create_misc.sql
b/src/test/regress/sql/create_misc.sql
index e8d91c5e671..6fb9fdab4c1 100644
--- a/src/test/regress/sql/create_misc.sql
+++ b/src/test/regress/sql/create_misc.sql
@@ -14,27 +14,17 @@ CREATE TABLE a_star (
a int4
);
-<<<<<<< HEAD
-INSERT INTO tenk2 SELECT * FROM tenk1;
-ANALYZE tenk2;
-=======
CREATE TABLE b_star (
b text
) INHERITS (a_star);
->>>>>>> REL_16_9
CREATE TABLE c_star (
c name
) INHERITS (a_star);
-<<<<<<< HEAD
-INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000;
-ANALYZE fast_emp4000;
-=======
CREATE TABLE d_star (
d float8
) INHERITS (b_star, c_star);
->>>>>>> REL_16_9
CREATE TABLE e_star (
e int2
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]