This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new b6ceb2b2b52 Fix compile errors for resgroup
b6ceb2b2b52 is described below
commit b6ceb2b2b523c3c5f4123be887a87c4f6bc80c69
Author: Jinbao Chen <[email protected]>
AuthorDate: Mon Oct 6 22:41:36 2025 +0800
Fix compile errors for resgroup
---
src/backend/utils/resgroup/cgroup_io_limit.c | 2 +-
src/backend/utils/resgroup/resgroup.c | 124 +++++++++++++--------------
2 files changed, 62 insertions(+), 64 deletions(-)
diff --git a/src/backend/utils/resgroup/cgroup_io_limit.c
b/src/backend/utils/resgroup/cgroup_io_limit.c
index f3b1a1e33b2..952764713e7 100644
--- a/src/backend/utils/resgroup/cgroup_io_limit.c
+++ b/src/backend/utils/resgroup/cgroup_io_limit.c
@@ -407,7 +407,7 @@ get_iostat(Oid groupid, List *io_limit)
hash_create("hash table for bdi -> io stat",
list_length(io_limit), &ctl,
HASH_ELEM | HASH_BLOBS| HASH_CONTEXT);
- while (pg_get_line_append(f, line))
+ while (pg_get_line_append(f, line, NULL))
{
uint32 maj, min;
bdi_t bdi;
diff --git a/src/backend/utils/resgroup/resgroup.c
b/src/backend/utils/resgroup/resgroup.c
index 6dc5f0c8174..daa7fb2f77e 100644
--- a/src/backend/utils/resgroup/resgroup.c
+++ b/src/backend/utils/resgroup/resgroup.c
@@ -154,7 +154,7 @@ struct ResGroupData
int64 totalExecuted; /* total number of
executed trans */
int64 totalQueued; /* total number of
queued trans */
int64 totalQueuedTimeMs; /* total queue time, in
milliseconds */
- PROC_QUEUE waitProcs; /* list of
PGPROC objects waiting on this group */
+ dclist_head waitProcs; /* list of
PGPROC objects waiting on this group */
bool lockedForDrop; /* true if resource
group is dropped but not committed yet */
@@ -247,7 +247,7 @@ static bool procIsWaiting(const PGPROC *proc);
static void procWakeup(PGPROC *proc);
static int slotGetId(const ResGroupSlotData *slot);
static void groupWaitQueueValidate(const ResGroupData *group);
-static void groupWaitProcValidate(PGPROC *proc, PROC_QUEUE *head);
+static void groupWaitProcValidate(PGPROC *proc, dclist_head *head);
static void groupWaitQueuePush(ResGroupData *group, PGPROC *proc);
static PGPROC *groupWaitQueuePop(ResGroupData *group);
static void groupWaitQueueErase(ResGroupData *group, PGPROC *proc);
@@ -260,7 +260,7 @@ static void unlockResGroupForDrop(ResGroupData *group);
static bool groupIsDropped(ResGroupInfo *pGroupInfo);
static void resgroupDumpGroup(StringInfo str, ResGroupData *group);
-static void resgroupDumpWaitQueue(StringInfo str, PROC_QUEUE *queue);
+static void resgroupDumpWaitQueue(StringInfo str, dclist_head *queue);
static void resgroupDumpCaps(StringInfo str, ResGroupCap *caps);
static void resgroupDumpSlots(StringInfo str);
static void resgroupDumpFreeSlots(StringInfo str);
@@ -508,8 +508,10 @@ InitResGroups(void)
}
else
{
- char *cpuset = getCpuSetByRole(caps.cpuset);
- bmsCurrent = CpusetToBitset(cpuset, MaxCpuSetLength);
+ char *cpuset2;
+
+ cpuset2 = getCpuSetByRole(caps.cpuset);
+ bmsCurrent = CpusetToBitset(cpuset2, MaxCpuSetLength);
Bitmapset *bmsCommon = bms_intersect(bmsCurrent,
bmsUnused);
Bitmapset *bmsMissing = bms_difference(bmsCurrent,
bmsCommon);
@@ -522,7 +524,7 @@ InitResGroups(void)
{
ereport(WARNING,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cgroup is not properly
configured to use the cpuset feature"),
+ errmsg("cgroup is not properly
configured to use the cpuset2 feature"),
errhint("Extra cgroup
configurations are required to enable this feature, "
"please refer
to the Cloudberry Documentations for details")));
}
@@ -535,8 +537,8 @@ InitResGroups(void)
* write cpus to corresponding file
* if all the cores are available
*/
- char *cpuset= getCpuSetByRole(caps.cpuset);
- cgroupOpsRoutine->setcpuset(groupId, cpuset);
+ cpuset2= getCpuSetByRole(caps.cpuset);
+ cgroupOpsRoutine->setcpuset(groupId, cpuset2);
bmsUnused = bms_del_members(bmsUnused,
bmsCurrent);
}
else
@@ -548,8 +550,8 @@ InitResGroups(void)
* to this group and send a warning message, so
the system
* can startup, then DBA can fix it
*/
- snprintf(cpuset, MaxCpuSetLength, "%d",
defaultCore);
- cgroupOpsRoutine->setcpuset(groupId, cpuset);
+ snprintf(cpuset2, MaxCpuSetLength, "%d",
defaultCore);
+ cgroupOpsRoutine->setcpuset(groupId, cpuset2);
BitsetToCpuset(bmsMissing, cpusetMissing,
MaxCpuSetLength);
ereport(WARNING,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -624,7 +626,7 @@ ResGroupCheckForDrop(Oid groupId, char *name)
if (group->nRunning + group->nRunningBypassed > 0)
{
- int nQuery = group->nRunning + group->nRunningBypassed +
group->waitProcs.size;
+ int nQuery = group->nRunning + group->nRunningBypassed +
group->waitProcs.count;
Assert(name != NULL);
ereport(ERROR,
@@ -897,7 +899,7 @@ ResGroupGetStat(Oid groupId, ResGroupStatType type)
result = Int32GetDatum(group->nRunning +
group->nRunningBypassed);
break;
case RES_GROUP_STAT_NQUEUEING:
- result = Int32GetDatum(group->waitProcs.size);
+ result = Int32GetDatum(group->waitProcs.count);
break;
case RES_GROUP_STAT_TOTAL_EXECUTED:
result = Int64GetDatum(group->totalExecuted);
@@ -981,7 +983,7 @@ createGroup(Oid groupId, const ResGroupCaps *caps)
group->nRunning = 0;
group->nRunningBypassed = 0;
- ProcQueueInit(&group->waitProcs);
+ dclist_init(&group->waitProcs);
group->totalExecuted = 0;
group->totalQueued = 0;
group->totalQueuedTimeMs = 0;
@@ -1280,7 +1282,8 @@ groupAcquireSlot(ResGroupInfo *pGroupInfo, bool
isMoveQuery)
/* got one, lucky */
group->totalExecuted++;
LWLockRelease(ResGroupLock);
- pgstat_report_resgroup(group->groupId);
+ /* MERGE16_FIXME report data to pastat */
+ //pgstat_report_resgroup(group->groupId);
return slot;
}
}
@@ -1317,7 +1320,7 @@ groupAcquireSlot(ResGroupInfo *pGroupInfo, bool
isMoveQuery)
group->totalExecuted++;
LWLockRelease(ResGroupLock);
- pgstat_report_resgroup(group->groupId);
+// pgstat_report_resgroup(group->groupId);
return slot;
}
@@ -1556,7 +1559,7 @@ AssignResGroupOnMaster(void)
/* Update pg_stat_activity statistics */
bypassedGroup->totalExecuted++;
- pgstat_report_resgroup(bypassedGroup->groupId);
+// pgstat_report_resgroup(bypassedGroup->groupId);
/* Initialize the fake slot */
bypassedSlot.group = groupInfo.group;
@@ -1622,7 +1625,7 @@ UnassignResGroup(void)
bypassedGroup = NULL;
/* Update pg_stat_activity statistics */
- pgstat_report_resgroup(InvalidOid);
+// pgstat_report_resgroup(InvalidOid);
return;
}
@@ -1657,7 +1660,7 @@ UnassignResGroup(void)
if (Gp_role == GP_ROLE_DISPATCH)
SIMPLE_FAULT_INJECTOR("unassign_resgroup_end_qd");
- pgstat_report_resgroup(InvalidOid);
+// pgstat_report_resgroup(InvalidOid);
}
/*
@@ -1801,7 +1804,7 @@ waitOnGroup(ResGroupData *group, bool isMoveQuery)
* not enough to store a full Oid, so we set groupId out-of-band,
* via the backend entry.
*/
- pgstat_report_resgroup(group->groupId);
+// pgstat_report_resgroup(group->groupId);
/*
* Mark that we are waiting on resource group
@@ -2375,7 +2378,7 @@ groupIsNotDropped(const ResGroupData *group)
static void
groupWaitQueueValidate(const ResGroupData *group)
{
- const PROC_QUEUE *waitQueue;
+ const dclist_head *waitQueue;
Assert(LWLockHeldByMeInMode(ResGroupLock, LW_EXCLUSIVE));
@@ -2383,34 +2386,34 @@ groupWaitQueueValidate(const ResGroupData *group)
if (gp_resgroup_debug_wait_queue)
{
- if (waitQueue->size == 0)
+ if (waitQueue->count == 0)
{
- if (waitQueue->links.next != &waitQueue->links ||
- waitQueue->links.prev != &waitQueue->links)
+ if (waitQueue->dlist.head.next !=
&waitQueue->dlist.head ||
+ waitQueue->dlist.head.prev !=
&waitQueue->dlist.head)
elog(PANIC, "resource group wait queue is
corrupted");
}
else
{
- PGPROC *nextProc = (PGPROC *)waitQueue->links.next;
- PGPROC *prevProc = (PGPROC *)waitQueue->links.prev;
+ PGPROC *nextProc = dclist_container(PGPROC, links,
waitQueue->dlist.head.next);
+ PGPROC *prevProc = dclist_container(PGPROC, links,
waitQueue->dlist.head.prev);
if (!nextProc->mppIsWriter ||
!prevProc->mppIsWriter ||
- nextProc->links.prev != &waitQueue->links ||
- prevProc->links.next != &waitQueue->links)
+ nextProc->links.prev != &waitQueue->dlist.head
||
+ prevProc->links.next != &waitQueue->dlist.head)
elog(PANIC, "resource group wait queue is
corrupted");
}
return;
}
- AssertImply(waitQueue->size == 0,
- waitQueue->links.next == &waitQueue->links &&
- waitQueue->links.prev == &waitQueue->links);
+ AssertImply(waitQueue->count == 0,
+ waitQueue->dlist.head.next ==
&waitQueue->dlist.head &&
+ waitQueue->dlist.head.prev ==
&waitQueue->dlist.head);
}
static void
-groupWaitProcValidate(PGPROC *proc, PROC_QUEUE *head)
+groupWaitProcValidate(PGPROC *proc, dclist_head *head)
{
PGPROC *nextProc = (PGPROC *)proc->links.next;
PGPROC *prevProc = (PGPROC *)proc->links.prev;
@@ -2421,8 +2424,8 @@ groupWaitProcValidate(PGPROC *proc, PROC_QUEUE *head)
return;
if (!proc->mppIsWriter ||
- ((PROC_QUEUE *)nextProc != head && !nextProc->mppIsWriter) ||
- ((PROC_QUEUE *)prevProc != head && !prevProc->mppIsWriter) ||
+ ((dlist_node *)nextProc != &head->dlist.head &&
!nextProc->mppIsWriter) ||
+ ((dlist_node *)prevProc != &head->dlist.head &&
!prevProc->mppIsWriter) ||
nextProc->links.prev != &proc->links ||
prevProc->links.next != &proc->links)
elog(PANIC, "resource group wait queue is corrupted");
@@ -2436,7 +2439,7 @@ groupWaitProcValidate(PGPROC *proc, PROC_QUEUE *head)
static void
groupWaitQueuePush(ResGroupData *group, PGPROC *proc)
{
- PROC_QUEUE *waitQueue;
+ dclist_head *waitQueue;
PGPROC *headProc;
Assert(LWLockHeldByMeInMode(ResGroupLock, LW_EXCLUSIVE));
@@ -2446,12 +2449,12 @@ groupWaitQueuePush(ResGroupData *group, PGPROC *proc)
groupWaitQueueValidate(group);
waitQueue = &group->waitProcs;
- headProc = (PGPROC *) &waitQueue->links;
+ headProc = (PGPROC *) &waitQueue->dlist.head;
- SHMQueueInsertBefore(&headProc->links, &proc->links);
+ dclist_insert_before(waitQueue, &headProc->links, &proc->links);
groupWaitProcValidate(proc, waitQueue);
- waitQueue->size++;
+ waitQueue->count++;
Assert(groupWaitQueueFind(group, proc));
}
@@ -2462,7 +2465,7 @@ groupWaitQueuePush(ResGroupData *group, PGPROC *proc)
static PGPROC *
groupWaitQueuePop(ResGroupData *group)
{
- PROC_QUEUE *waitQueue;
+ dclist_head *waitQueue;
PGPROC *proc;
Assert(LWLockHeldByMeInMode(ResGroupLock, LW_EXCLUSIVE));
@@ -2472,14 +2475,14 @@ groupWaitQueuePop(ResGroupData *group)
waitQueue = &group->waitProcs;
- proc = (PGPROC *) waitQueue->links.next;
+ proc = (PGPROC *) waitQueue->dlist.head.next;
groupWaitProcValidate(proc, waitQueue);
Assert(groupWaitQueueFind(group, proc));
Assert(proc->resSlot == NULL);
- SHMQueueDelete(&proc->links);
+ dclist_delete_from(waitQueue, &proc->links);
- waitQueue->size--;
+ waitQueue->count--;
return proc;
}
@@ -2490,7 +2493,7 @@ groupWaitQueuePop(ResGroupData *group)
static void
groupWaitQueueErase(ResGroupData *group, PGPROC *proc)
{
- PROC_QUEUE *waitQueue;
+ dclist_head *waitQueue;
Assert(LWLockHeldByMeInMode(ResGroupLock, LW_EXCLUSIVE));
Assert(!groupWaitQueueIsEmpty(group));
@@ -2502,9 +2505,9 @@ groupWaitQueueErase(ResGroupData *group, PGPROC *proc)
waitQueue = &group->waitProcs;
groupWaitProcValidate(proc, waitQueue);
- SHMQueueDelete(&proc->links);
+ dclist_delete_from(waitQueue, &proc->links);
- waitQueue->size--;
+ waitQueue->count--;
}
/*
@@ -2513,7 +2516,7 @@ groupWaitQueueErase(ResGroupData *group, PGPROC *proc)
static bool
groupWaitQueueIsEmpty(const ResGroupData *group)
{
- const PROC_QUEUE *waitQueue;
+ const dclist_head *waitQueue;
Assert(LWLockHeldByMeInMode(ResGroupLock, LW_EXCLUSIVE));
@@ -2521,7 +2524,7 @@ groupWaitQueueIsEmpty(const ResGroupData *group)
waitQueue = &group->waitProcs;
- return waitQueue->size == 0;
+ return waitQueue->count == 0;
}
#ifdef USE_ASSERT_CHECKING
@@ -2536,23 +2539,22 @@ groupWaitQueueIsEmpty(const ResGroupData *group)
static bool
groupWaitQueueFind(ResGroupData *group, const PGPROC *proc)
{
- PROC_QUEUE *waitQueue;
- SHM_QUEUE *head;
- PGPROC *iter;
+ dclist_head *waitQueue;
Size offset;
+ dlist_iter proc_iter;
Assert(LWLockHeldByMeInMode(ResGroupLock, LW_EXCLUSIVE));
groupWaitQueueValidate(group);
waitQueue = &group->waitProcs;
- head = &waitQueue->links;
offset = offsetof(PGPROC, links);
- for (iter = (PGPROC *) SHMQueueNext(head, head, offset); iter;
- iter = (PGPROC *) SHMQueueNext(head, &iter->links, offset))
+ dclist_foreach(proc_iter, waitQueue)
{
- if (iter == proc)
+ PGPROC *queued_proc = dlist_container(PGPROC, links,
proc_iter.cur);
+
+ if (queued_proc == proc)
{
Assert(procIsWaiting(proc));
return true;
@@ -2760,17 +2762,15 @@ resgroupDumpGroup(StringInfo str, ResGroupData *group)
}
static void
-resgroupDumpWaitQueue(StringInfo str, PROC_QUEUE *queue)
+resgroupDumpWaitQueue(StringInfo str, dclist_head *queue)
{
PGPROC *proc;
appendStringInfo(str, "\"wait_queue\":{");
- appendStringInfo(str, "\"wait_queue_size\":%d,", queue->size);
+ appendStringInfo(str, "\"wait_queue_size\":%d,", queue->count);
appendStringInfo(str, "\"wait_queue_content\":[");
- proc = (PGPROC *)SHMQueueNext(&queue->links,
-
&queue->links,
-
offsetof(PGPROC, links));
+ proc = (PGPROC *)dclist_next_node(queue, &queue->dlist.head);
if (!ShmemAddrIsValid(&proc->links))
{
@@ -2786,9 +2786,7 @@ resgroupDumpWaitQueue(StringInfo str, PROC_QUEUE *queue)
procIsWaiting(proc) ? "true" :
"false");
appendStringInfo(str, "\"resSlot\":%d",
slotGetId(proc->resSlot));
appendStringInfo(str, "}");
- proc = (PGPROC *)SHMQueueNext(&queue->links,
- &proc->links,
- offsetof(PGPROC,
links));
+ proc = (PGPROC *)dclist_next_node(queue, &queue->dlist.head);
if (proc)
appendStringInfo(str, ",");
}
@@ -3323,7 +3321,7 @@ HandleMoveResourceGroup(void)
cgroupOpsRoutine->attachcgroup(self->groupId, MyProcPid,
self->caps.cpuMaxPercent == CPU_MAX_PERCENT_DISABLED);
- pgstat_report_resgroup(self->groupId);
+// pgstat_report_resgroup(self->groupId);
}
/*
@@ -3693,7 +3691,7 @@ check_and_unassign_from_resgroup(PlannedStmt* stmt)
bypassedGroup = groupInfo.group;
bypassedGroup->totalExecuted++;
- pgstat_report_resgroup(bypassedGroup->groupId);
+// pgstat_report_resgroup(bypassedGroup->groupId);
bypassedSlot.group = groupInfo.group;
bypassedSlot.groupId = groupInfo.groupId;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]