On Tue, Aug 23, 2022 at 01:38:40PM +1200, David Rowley wrote: > On Tue, 23 Aug 2022 at 13:17, Justin Pryzby <pry...@telsasoft.com> wrote: > > Attached is a squished version. > > I see there's some renaming ones snuck in there. e.g: > ... in fact, there's lots of renaming, so I'll just stop looking.
Actually, they didn't sneak in - what I sent are the patches which are ready to be reviewed, excluding the set of "this" and "tmp" and other renames which you disliked. In the branch (not the squished patch) the first ~15 patches were mostly for C99 for loops - I presented them this way deliberately, so you could review and comment on whatever you're able to bite off, or run with whatever parts you think are ready. I rewrote it now to be more bite sized by truncating off the 2nd half of the patches. > Can you just send a patch that only changes the cases where you can > remove a variable declaration from an outer scope into a single inner > scope, or multiple inner scope when the variable can be declared > inside a for() loop? > would be to move the found_whole_row declaration into multiple inner > scopes. That's a net increase in code lines, for which I think > requires more careful thought if we want that or not. IMO it doesn't make sense to declare multiple integers for something like this whether they're all ignored. Nor for "save_errno" nor the third, similar case, for the reason in the commit message. -- Justin
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index e88f7efa7e4..69f21abfb59 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -353,45 +353,44 @@ brinbeginscan(Relation r, int nkeys, int norderbys) int64 bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm) { Relation idxRel = scan->indexRelation; Buffer buf = InvalidBuffer; BrinDesc *bdesc; Oid heapOid; Relation heapRel; BrinOpaque *opaque; BlockNumber nblocks; BlockNumber heapBlk; int totalpages = 0; FmgrInfo *consistentFn; MemoryContext oldcxt; MemoryContext perRangeCxt; BrinMemTuple *dtup; BrinTuple *btup = NULL; Size btupsz = 0; ScanKey **keys, **nullkeys; int *nkeys, *nnullkeys; - int keyno; char *ptr; Size len; char *tmp PG_USED_FOR_ASSERTS_ONLY; opaque = (BrinOpaque *) scan->opaque; bdesc = opaque->bo_bdesc; pgstat_count_index_scan(idxRel); /* * We need to know the size of the table so that we know how long to * iterate on the revmap. */ heapOid = IndexGetRelation(RelationGetRelid(idxRel), false); heapRel = table_open(heapOid, AccessShareLock); nblocks = RelationGetNumberOfBlocks(heapRel); table_close(heapRel, AccessShareLock); /* * Make room for the consistent support procedures of indexed columns. We * don't look them up here; we do that lazily the first time we see a scan * key reference each of them. We rely on zeroing fn_oid to InvalidOid. */ @@ -435,45 +434,45 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm) nkeys = (int *) ptr; ptr += MAXALIGN(sizeof(int) * bdesc->bd_tupdesc->natts); nnullkeys = (int *) ptr; ptr += MAXALIGN(sizeof(int) * bdesc->bd_tupdesc->natts); for (int i = 0; i < bdesc->bd_tupdesc->natts; i++) { keys[i] = (ScanKey *) ptr; ptr += MAXALIGN(sizeof(ScanKey) * scan->numberOfKeys); nullkeys[i] = (ScanKey *) ptr; ptr += MAXALIGN(sizeof(ScanKey) * scan->numberOfKeys); } Assert(tmp + len == ptr); /* zero the number of keys */ memset(nkeys, 0, sizeof(int) * bdesc->bd_tupdesc->natts); memset(nnullkeys, 0, sizeof(int) * bdesc->bd_tupdesc->natts); /* Preprocess the scan keys - split them into per-attribute arrays. */ - for (keyno = 0; keyno < scan->numberOfKeys; keyno++) + for (int keyno = 0; keyno < scan->numberOfKeys; keyno++) { ScanKey key = &scan->keyData[keyno]; AttrNumber keyattno = key->sk_attno; /* * The collation of the scan key must match the collation used in the * index column (but only if the search is not IS NULL/ IS NOT NULL). * Otherwise we shouldn't be using this index ... */ Assert((key->sk_flags & SK_ISNULL) || (key->sk_collation == TupleDescAttr(bdesc->bd_tupdesc, keyattno - 1)->attcollation)); /* * First time we see this index attribute, so init as needed. * * This is a bit of an overkill - we don't know how many scan keys are * there for this attribute, so we simply allocate the largest number * possible (as if all keys were for this attribute). This may waste a * bit of memory, but we only expect small number of scan keys in * general, so this should be negligible, and repeated repalloc calls diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c index 10d4f17bc6f..524c1846b83 100644 --- a/src/backend/access/brin/brin_minmax_multi.c +++ b/src/backend/access/brin/brin_minmax_multi.c @@ -563,125 +563,120 @@ range_deduplicate_values(Ranges *range) AssertCheckRanges(range, range->cmp, range->colloid); } /* * brin_range_serialize * Serialize the in-memory representation into a compact varlena value. * * Simply copy the header and then also the individual values, as stored * in the in-memory value array. */ static SerializedRanges * brin_range_serialize(Ranges *range) { Size len; int nvalues; SerializedRanges *serialized; Oid typid; int typlen; bool typbyval; - int i; char *ptr; /* simple sanity checks */ Assert(range->nranges >= 0); Assert(range->nsorted >= 0); Assert(range->nvalues >= 0); Assert(range->maxvalues > 0); Assert(range->target_maxvalues > 0); /* at this point the range should be compacted to the target size */ Assert(2 * range->nranges + range->nvalues <= range->target_maxvalues); Assert(range->target_maxvalues <= range->maxvalues); /* range boundaries are always sorted */ Assert(range->nvalues >= range->nsorted); /* deduplicate values, if there's unsorted part */ range_deduplicate_values(range); /* see how many Datum values we actually have */ nvalues = 2 * range->nranges + range->nvalues; typid = range->typid; typbyval = get_typbyval(typid); typlen = get_typlen(typid); /* header is always needed */ len = offsetof(SerializedRanges, data); /* * The space needed depends on data type - for fixed-length data types * (by-value and some by-reference) it's pretty simple, just multiply * (attlen * nvalues) and we're done. For variable-length by-reference * types we need to actually walk all the values and sum the lengths. */ if (typlen == -1) /* varlena */ { - int i; - - for (i = 0; i < nvalues; i++) + for (int i = 0; i < nvalues; i++) { len += VARSIZE_ANY(range->values[i]); } } else if (typlen == -2) /* cstring */ { - int i; - - for (i = 0; i < nvalues; i++) + for (int i = 0; i < nvalues; i++) { /* don't forget to include the null terminator ;-) */ len += strlen(DatumGetCString(range->values[i])) + 1; } } else /* fixed-length types (even by-reference) */ { Assert(typlen > 0); len += nvalues * typlen; } /* * Allocate the serialized object, copy the basic information. The * serialized object is a varlena, so update the header. */ serialized = (SerializedRanges *) palloc0(len); SET_VARSIZE(serialized, len); serialized->typid = typid; serialized->nranges = range->nranges; serialized->nvalues = range->nvalues; serialized->maxvalues = range->target_maxvalues; /* * And now copy also the boundary values (like the length calculation this * depends on the particular data type). */ ptr = serialized->data; /* start of the serialized data */ - for (i = 0; i < nvalues; i++) + for (int i = 0; i < nvalues; i++) { if (typbyval) /* simple by-value data types */ { Datum tmp; /* * For byval types, we need to copy just the significant bytes - * we can't use memcpy directly, as that assumes little-endian * behavior. store_att_byval does almost what we need, but it * requires a properly aligned buffer - the output buffer does not * guarantee that. So we simply use a local Datum variable (which * guarantees proper alignment), and then copy the value from it. */ store_att_byval(&tmp, range->values[i], typlen); memcpy(ptr, &tmp, typlen); ptr += typlen; } else if (typlen > 0) /* fixed-length by-ref types */ { memcpy(ptr, DatumGetPointer(range->values[i]), typlen); ptr += typlen; diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 5866c6aaaf7..30069f139c7 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -215,45 +215,44 @@ gistinsert(Relation r, Datum *values, bool *isnull, * * If 'newblkno' is not NULL, returns the block number of page the first * new/updated tuple was inserted to. Usually it's the given page, but could * be its right sibling if the page was split. * * Returns 'true' if the page was split, 'false' otherwise. */ bool gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, Buffer buffer, IndexTuple *itup, int ntup, OffsetNumber oldoffnum, BlockNumber *newblkno, Buffer leftchildbuf, List **splitinfo, bool markfollowright, Relation heapRel, bool is_build) { BlockNumber blkno = BufferGetBlockNumber(buffer); Page page = BufferGetPage(buffer); bool is_leaf = (GistPageIsLeaf(page)) ? true : false; XLogRecPtr recptr; - int i; bool is_split; /* * Refuse to modify a page that's incompletely split. This should not * happen because we finish any incomplete splits while we walk down the * tree. However, it's remotely possible that another concurrent inserter * splits a parent page, and errors out before completing the split. We * will just throw an error in that case, and leave any split we had in * progress unfinished too. The next insert that comes along will clean up * the mess. */ if (GistFollowRight(page)) elog(ERROR, "concurrent GiST page split was incomplete"); /* should never try to insert to a deleted page */ Assert(!GistPageIsDeleted(page)); *splitinfo = NIL; /* * if isupdate, remove old key: This node's key has been modified, either * because a child split occurred or because we needed to adjust our key @@ -401,45 +400,45 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, } else { /* Prepare split-info to be returned to caller */ for (ptr = dist; ptr; ptr = ptr->next) { GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo)); si->buf = ptr->buffer; si->downlink = ptr->itup; *splitinfo = lappend(*splitinfo, si); } } /* * Fill all pages. All the pages are new, ie. freshly allocated empty * pages, or a temporary copy of the old page. */ for (ptr = dist; ptr; ptr = ptr->next) { char *data = (char *) (ptr->list); - for (i = 0; i < ptr->block.num; i++) + for (int i = 0; i < ptr->block.num; i++) { IndexTuple thistup = (IndexTuple) data; if (PageAddItem(ptr->page, (Item) data, IndexTupleSize(thistup), i + FirstOffsetNumber, false, false) == InvalidOffsetNumber) elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(rel)); /* * If this is the first inserted/updated tuple, let the caller * know which page it landed on. */ if (newblkno && ItemPointerEquals(&thistup->t_tid, &(*itup)->t_tid)) *newblkno = ptr->block.blkno; data += IndexTupleSize(thistup); } /* Set up rightlinks */ if (ptr->next && ptr->block.blkno != GIST_ROOT_BLKNO) GistPageGetOpaque(ptr->page)->rightlink = ptr->next->block.blkno; else GistPageGetOpaque(ptr->page)->rightlink = oldrlink; diff --git a/src/backend/commands/copyfrom.c b/src/backend/commands/copyfrom.c index a976008b3d4..e8bb168aea8 100644 --- a/src/backend/commands/copyfrom.c +++ b/src/backend/commands/copyfrom.c @@ -1183,45 +1183,44 @@ CopyFrom(CopyFromState cstate) * 'attnamelist': List of char *, columns to include. NIL selects all cols. * 'options': List of DefElem. See copy_opt_item in gram.y for selections. * * Returns a CopyFromState, to be passed to NextCopyFrom and related functions. */ CopyFromState BeginCopyFrom(ParseState *pstate, Relation rel, Node *whereClause, const char *filename, bool is_program, copy_data_source_cb data_source_cb, List *attnamelist, List *options) { CopyFromState cstate; bool pipe = (filename == NULL); TupleDesc tupDesc; AttrNumber num_phys_attrs, num_defaults; FmgrInfo *in_functions; Oid *typioparams; - int attnum; Oid in_func_oid; int *defmap; ExprState **defexprs; MemoryContext oldcontext; bool volatile_defexprs; const int progress_cols[] = { PROGRESS_COPY_COMMAND, PROGRESS_COPY_TYPE, PROGRESS_COPY_BYTES_TOTAL }; int64 progress_vals[] = { PROGRESS_COPY_COMMAND_FROM, 0, 0 }; /* Allocate workspace and zero all fields */ cstate = (CopyFromStateData *) palloc0(sizeof(CopyFromStateData)); /* * We allocate everything used by a cstate in a new memory context. This * avoids memory leaks during repeated use of COPY in a query. @@ -1382,45 +1381,45 @@ BeginCopyFrom(ParseState *pstate, initStringInfo(&cstate->attribute_buf); /* Assign range table, we'll need it in CopyFrom. */ if (pstate) cstate->range_table = pstate->p_rtable; tupDesc = RelationGetDescr(cstate->rel); num_phys_attrs = tupDesc->natts; num_defaults = 0; volatile_defexprs = false; /* * Pick up the required catalog information for each attribute in the * relation, including the input function, the element type (to pass to * the input function), and info about defaults and constraints. (Which * input function we use depends on text/binary format choice.) */ in_functions = (FmgrInfo *) palloc(num_phys_attrs * sizeof(FmgrInfo)); typioparams = (Oid *) palloc(num_phys_attrs * sizeof(Oid)); defmap = (int *) palloc(num_phys_attrs * sizeof(int)); defexprs = (ExprState **) palloc(num_phys_attrs * sizeof(ExprState *)); - for (attnum = 1; attnum <= num_phys_attrs; attnum++) + for (int attnum = 1; attnum <= num_phys_attrs; attnum++) { Form_pg_attribute att = TupleDescAttr(tupDesc, attnum - 1); /* We don't need info for dropped attributes */ if (att->attisdropped) continue; /* Fetch the input function and typioparam info */ if (cstate->opts.binary) getTypeBinaryInputInfo(att->atttypid, &in_func_oid, &typioparams[attnum - 1]); else getTypeInputInfo(att->atttypid, &in_func_oid, &typioparams[attnum - 1]); fmgr_info(in_func_oid, &in_functions[attnum - 1]); /* Get default info if needed */ if (!list_member_int(cstate->attnumlist, attnum) && !att->attgenerated) { /* attribute is NOT to be copied from input */ /* use default value if one exists */ Expr *defexpr = (Expr *) build_column_default(cstate->rel, diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 667f2a4cd16..3c6e09815e0 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -546,45 +546,44 @@ DefineIndex(Oid relationId, Form_pg_am accessMethodForm; IndexAmRoutine *amRoutine; bool amcanorder; amoptions_function amoptions; bool partitioned; bool safe_index; Datum reloptions; int16 *coloptions; IndexInfo *indexInfo; bits16 flags; bits16 constr_flags; int numberOfAttributes; int numberOfKeyAttributes; TransactionId limitXmin; ObjectAddress address; LockRelId heaprelid; LOCKTAG heaplocktag; LOCKMODE lockmode; Snapshot snapshot; Oid root_save_userid; int root_save_sec_context; int root_save_nestlevel; - int i; root_save_nestlevel = NewGUCNestLevel(); /* * Some callers need us to run with an empty default_tablespace; this is a * necessary hack to be able to reproduce catalog state accurately when * recreating indexes after table-rewriting ALTER TABLE. */ if (stmt->reset_default_tblspc) (void) set_config_option("default_tablespace", "", PGC_USERSET, PGC_S_SESSION, GUC_ACTION_SAVE, true, 0, false); /* * Force non-concurrent build on temporary relations, even if CONCURRENTLY * was requested. Other backends can't access a temporary relation, so * there's no harm in grabbing a stronger lock, and a non-concurrent DROP * is more efficient. Do this before any use of the concurrent option is * done. */ if (stmt->concurrent && get_rel_persistence(relationId) != RELPERSISTENCE_TEMP) concurrent = true; @@ -1028,65 +1027,65 @@ DefineIndex(Oid relationId, if (!found) { Form_pg_attribute att; att = TupleDescAttr(RelationGetDescr(rel), key->partattrs[i] - 1); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unique constraint on partitioned table must include all partitioning columns"), errdetail("%s constraint on table \"%s\" lacks column \"%s\" which is part of the partition key.", constraint_type, RelationGetRelationName(rel), NameStr(att->attname)))); } } } /* * We disallow indexes on system columns. They would not necessarily get * updated correctly, and they don't seem useful anyway. */ - for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++) + for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++) { AttrNumber attno = indexInfo->ii_IndexAttrNumbers[i]; if (attno < 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("index creation on system columns is not supported"))); } /* * Also check for system columns used in expressions or predicates. */ if (indexInfo->ii_Expressions || indexInfo->ii_Predicate) { Bitmapset *indexattrs = NULL; pull_varattnos((Node *) indexInfo->ii_Expressions, 1, &indexattrs); pull_varattnos((Node *) indexInfo->ii_Predicate, 1, &indexattrs); - for (i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++) + for (int i = FirstLowInvalidHeapAttributeNumber + 1; i < 0; i++) { if (bms_is_member(i - FirstLowInvalidHeapAttributeNumber, indexattrs)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("index creation on system columns is not supported"))); } } /* Is index safe for others to ignore? See set_indexsafe_procflags() */ safe_index = indexInfo->ii_Expressions == NIL && indexInfo->ii_Predicate == NIL; /* * Report index creation if appropriate (delay this till after most of the * error checks) */ if (stmt->isconstraint && !quiet) { const char *constraint_type; if (stmt->primary) @@ -1224,45 +1223,45 @@ DefineIndex(Oid relationId, /* * We'll need an IndexInfo describing the parent index. The one * built above is almost good enough, but not quite, because (for * example) its predicate expression if any hasn't been through * expression preprocessing. The most reliable way to get an * IndexInfo that will match those for child indexes is to build * it the same way, using BuildIndexInfo(). */ parentIndex = index_open(indexRelationId, lockmode); indexInfo = BuildIndexInfo(parentIndex); parentDesc = RelationGetDescr(rel); /* * For each partition, scan all existing indexes; if one matches * our index definition and is not already attached to some other * parent index, attach it to the one we just created. * * If none matches, build a new index by calling ourselves * recursively with the same options (except for the index name). */ - for (i = 0; i < nparts; i++) + for (int i = 0; i < nparts; i++) { Oid childRelid = part_oids[i]; Relation childrel; Oid child_save_userid; int child_save_sec_context; int child_save_nestlevel; List *childidxs; ListCell *cell; AttrMap *attmap; bool found = false; childrel = table_open(childRelid, lockmode); GetUserIdAndSecContext(&child_save_userid, &child_save_sec_context); SetUserIdAndSecContext(childrel->rd_rel->relowner, child_save_sec_context | SECURITY_RESTRICTED_OPERATION); child_save_nestlevel = NewGUCNestLevel(); /* * Don't try to create indexes on foreign tables, though. Skip * those if a regular index, or fail if trying to create a diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 96d200e4461..933c3049016 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -1277,51 +1277,50 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet } } } /* * Compute the final value of all aggregates for one group. * * This function handles only one grouping set at a time, which the caller must * have selected. It's also the caller's responsibility to adjust the supplied * pergroup parameter to point to the current set's transvalues. * * Results are stored in the output econtext aggvalues/aggnulls. */ static void finalize_aggregates(AggState *aggstate, AggStatePerAgg peraggs, AggStatePerGroup pergroup) { ExprContext *econtext = aggstate->ss.ps.ps_ExprContext; Datum *aggvalues = econtext->ecxt_aggvalues; bool *aggnulls = econtext->ecxt_aggnulls; int aggno; - int transno; /* * If there were any DISTINCT and/or ORDER BY aggregates, sort their * inputs and run the transition functions. */ - for (transno = 0; transno < aggstate->numtrans; transno++) + for (int transno = 0; transno < aggstate->numtrans; transno++) { AggStatePerTrans pertrans = &aggstate->pertrans[transno]; AggStatePerGroup pergroupstate; pergroupstate = &pergroup[transno]; if (pertrans->aggsortrequired) { Assert(aggstate->aggstrategy != AGG_HASHED && aggstate->aggstrategy != AGG_MIXED); if (pertrans->numInputs == 1) process_ordered_aggregate_single(aggstate, pertrans, pergroupstate); else process_ordered_aggregate_multi(aggstate, pertrans, pergroupstate); } else if (pertrans->numDistinctCols > 0 && pertrans->haslast) { diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 1545ff9f161..f9d40fa1a0d 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -1631,54 +1631,54 @@ interpret_ident_response(const char *ident_response, while (pg_isblank(*cursor)) cursor++; /* skip blanks */ if (strcmp(response_type, "USERID") != 0) return false; else { /* * It's a USERID response. Good. "cursor" should be pointing * to the colon that precedes the operating system type. */ if (*cursor != ':') return false; else { cursor++; /* Go over colon */ /* Skip over operating system field. */ while (*cursor != ':' && *cursor != '\r') cursor++; if (*cursor != ':') return false; else { - int i; /* Index into *ident_user */ + int j; /* Index into *ident_user */ cursor++; /* Go over colon */ while (pg_isblank(*cursor)) cursor++; /* skip blanks */ /* Rest of line is user name. Copy it over. */ - i = 0; + j = 0; while (*cursor != '\r' && i < IDENT_USERNAME_MAX) - ident_user[i++] = *cursor++; - ident_user[i] = '\0'; + ident_user[j++] = *cursor++; + ident_user[j] = '\0'; return true; } } } } } } /* * Talk to the ident server on "remote_addr" and find out who * owns the tcp connection to "local_addr" * If the username is successfully retrieved, check the usermap. * * XXX: Using WaitLatchOrSocket() and doing a CHECK_FOR_INTERRUPTS() if the * latch was set would improve the responsiveness to timeouts/cancellations. */ static int ident_inet(hbaPort *port) { const SockAddr remote_addr = port->raddr; const SockAddr local_addr = port->laddr; diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 1e94c5aa7c4..75acea149c7 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -2428,101 +2428,101 @@ cost_sort(Path *path, PlannerInfo *root, startup_cost += disable_cost; startup_cost += input_cost; path->rows = tuples; path->startup_cost = startup_cost; path->total_cost = startup_cost + run_cost; } /* * append_nonpartial_cost * Estimate the cost of the non-partial paths in a Parallel Append. * The non-partial paths are assumed to be the first "numpaths" paths * from the subpaths list, and to be in order of decreasing cost. */ static Cost append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers) { Cost *costarr; int arrlen; ListCell *l; ListCell *cell; - int i; int path_index; int min_index; int max_index; if (numpaths == 0) return 0; /* * Array length is number of workers or number of relevant paths, * whichever is less. */ arrlen = Min(parallel_workers, numpaths); costarr = (Cost *) palloc(sizeof(Cost) * arrlen); /* The first few paths will each be claimed by a different worker. */ path_index = 0; foreach(cell, subpaths) { Path *subpath = (Path *) lfirst(cell); if (path_index == arrlen) break; costarr[path_index++] = subpath->total_cost; } /* * Since subpaths are sorted by decreasing cost, the last one will have * the minimum cost. */ min_index = arrlen - 1; /* * For each of the remaining subpaths, add its cost to the array element * with minimum cost. */ for_each_cell(l, subpaths, cell) { Path *subpath = (Path *) lfirst(l); - int i; /* Consider only the non-partial paths */ if (path_index++ == numpaths) break; costarr[min_index] += subpath->total_cost; /* Update the new min cost array index */ - for (min_index = i = 0; i < arrlen; i++) + min_index = 0; + for (int i = 0; i < arrlen; i++) { if (costarr[i] < costarr[min_index]) min_index = i; } } /* Return the highest cost from the array */ - for (max_index = i = 0; i < arrlen; i++) + max_index = 0; + for (int i = 0; i < arrlen; i++) { if (costarr[i] > costarr[max_index]) max_index = i; } return costarr[max_index]; } /* * cost_append * Determines and returns the cost of an Append node. */ void cost_append(AppendPath *apath, PlannerInfo *root) { ListCell *l; apath->path.startup_cost = 0; apath->path.total_cost = 0; apath->path.rows = 0; if (apath->subpaths == NIL) diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c index 5410a68bc91..91b9635dc0a 100644 --- a/src/backend/statistics/mcv.c +++ b/src/backend/statistics/mcv.c @@ -1585,45 +1585,44 @@ mcv_match_expression(Node *expr, Bitmapset *keys, List *exprs, Oid *collid) * Evaluate clauses using the MCV list, and update the match bitmap. * * A match bitmap keeps match/mismatch status for each MCV item, and we * update it based on additional clauses. We also use it to skip items * that can't possibly match (e.g. item marked as "mismatch" can't change * to "match" when evaluating AND clause list). * * The function also returns a flag indicating whether there was an * equality condition for all attributes, the minimum frequency in the MCV * list, and a total MCV frequency (sum of frequencies for all items). * * XXX Currently the match bitmap uses a bool for each MCV item, which is * somewhat wasteful as we could do with just a single bit, thus reducing * the size to ~1/8. It would also allow us to combine bitmaps simply using * & and |, which should be faster than min/max. The bitmaps are fairly * small, though (thanks to the cap on the MCV list size). */ static bool * mcv_get_match_bitmap(PlannerInfo *root, List *clauses, Bitmapset *keys, List *exprs, MCVList *mcvlist, bool is_or) { - int i; ListCell *l; bool *matches; /* The bitmap may be partially built. */ Assert(clauses != NIL); Assert(mcvlist != NULL); Assert(mcvlist->nitems > 0); Assert(mcvlist->nitems <= STATS_MCVLIST_MAX_ITEMS); matches = palloc(sizeof(bool) * mcvlist->nitems); memset(matches, !is_or, sizeof(bool) * mcvlist->nitems); /* * Loop through the list of clauses, and for each of them evaluate all the * MCV items not yet eliminated by the preceding clauses. */ foreach(l, clauses) { Node *clause = (Node *) lfirst(l); /* if it's a RestrictInfo, then extract the clause */ if (IsA(clause, RestrictInfo)) @@ -1640,45 +1639,45 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses, /* valid only after examine_opclause_args returns true */ Node *clause_expr; Const *cst; bool expronleft; int idx; Oid collid; fmgr_info(get_opcode(expr->opno), &opproc); /* extract the var/expr and const from the expression */ if (!examine_opclause_args(expr->args, &clause_expr, &cst, &expronleft)) elog(ERROR, "incompatible clause"); /* match the attribute/expression to a dimension of the statistic */ idx = mcv_match_expression(clause_expr, keys, exprs, &collid); /* * Walk through the MCV items and evaluate the current clause. We * can skip items that were already ruled out, and terminate if * there are no remaining MCV items that might possibly match. */ - for (i = 0; i < mcvlist->nitems; i++) + for (int i = 0; i < mcvlist->nitems; i++) { bool match = true; MCVItem *item = &mcvlist->items[i]; Assert(idx >= 0); /* * When the MCV item or the Const value is NULL we can treat * this as a mismatch. We must not call the operator because * of strictness. */ if (item->isnull[idx] || cst->constisnull) { matches[i] = RESULT_MERGE(matches[i], is_or, false); continue; } /* * Skip MCV items that can't change result in the bitmap. Once * the value gets false for AND-lists, or true for OR-lists, * we don't need to look at more clauses. */ @@ -1747,45 +1746,45 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses, * Deconstruct the array constant, unless it's NULL (we'll cover * that case below) */ if (!cst->constisnull) { arrayval = DatumGetArrayTypeP(cst->constvalue); get_typlenbyvalalign(ARR_ELEMTYPE(arrayval), &elmlen, &elmbyval, &elmalign); deconstruct_array(arrayval, ARR_ELEMTYPE(arrayval), elmlen, elmbyval, elmalign, &elem_values, &elem_nulls, &num_elems); } /* match the attribute/expression to a dimension of the statistic */ idx = mcv_match_expression(clause_expr, keys, exprs, &collid); /* * Walk through the MCV items and evaluate the current clause. We * can skip items that were already ruled out, and terminate if * there are no remaining MCV items that might possibly match. */ - for (i = 0; i < mcvlist->nitems; i++) + for (int i = 0; i < mcvlist->nitems; i++) { int j; bool match = !expr->useOr; MCVItem *item = &mcvlist->items[i]; /* * When the MCV item or the Const value is NULL we can treat * this as a mismatch. We must not call the operator because * of strictness. */ if (item->isnull[idx] || cst->constisnull) { matches[i] = RESULT_MERGE(matches[i], is_or, false); continue; } /* * Skip MCV items that can't change result in the bitmap. Once * the value gets false for AND-lists, or true for OR-lists, * we don't need to look at more clauses. */ if (RESULT_IS_FINAL(matches[i], is_or)) @@ -1818,164 +1817,162 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses, elem_value)); match = RESULT_MERGE(match, expr->useOr, elem_match); } /* update the match bitmap with the result */ matches[i] = RESULT_MERGE(matches[i], is_or, match); } } else if (IsA(clause, NullTest)) { NullTest *expr = (NullTest *) clause; Node *clause_expr = (Node *) (expr->arg); /* match the attribute/expression to a dimension of the statistic */ int idx = mcv_match_expression(clause_expr, keys, exprs, NULL); /* * Walk through the MCV items and evaluate the current clause. We * can skip items that were already ruled out, and terminate if * there are no remaining MCV items that might possibly match. */ - for (i = 0; i < mcvlist->nitems; i++) + for (int i = 0; i < mcvlist->nitems; i++) { bool match = false; /* assume mismatch */ MCVItem *item = &mcvlist->items[i]; /* if the clause mismatches the MCV item, update the bitmap */ switch (expr->nulltesttype) { case IS_NULL: match = (item->isnull[idx]) ? true : match; break; case IS_NOT_NULL: match = (!item->isnull[idx]) ? true : match; break; } /* now, update the match bitmap, depending on OR/AND type */ matches[i] = RESULT_MERGE(matches[i], is_or, match); } } else if (is_orclause(clause) || is_andclause(clause)) { /* AND/OR clause, with all subclauses being compatible */ - int i; BoolExpr *bool_clause = ((BoolExpr *) clause); List *bool_clauses = bool_clause->args; /* match/mismatch bitmap for each MCV item */ bool *bool_matches = NULL; Assert(bool_clauses != NIL); Assert(list_length(bool_clauses) >= 2); /* build the match bitmap for the OR-clauses */ bool_matches = mcv_get_match_bitmap(root, bool_clauses, keys, exprs, mcvlist, is_orclause(clause)); /* * Merge the bitmap produced by mcv_get_match_bitmap into the * current one. We need to consider if we're evaluating AND or OR * condition when merging the results. */ - for (i = 0; i < mcvlist->nitems; i++) + for (int i = 0; i < mcvlist->nitems; i++) matches[i] = RESULT_MERGE(matches[i], is_or, bool_matches[i]); pfree(bool_matches); } else if (is_notclause(clause)) { /* NOT clause, with all subclauses compatible */ - int i; BoolExpr *not_clause = ((BoolExpr *) clause); List *not_args = not_clause->args; /* match/mismatch bitmap for each MCV item */ bool *not_matches = NULL; Assert(not_args != NIL); Assert(list_length(not_args) == 1); /* build the match bitmap for the NOT-clause */ not_matches = mcv_get_match_bitmap(root, not_args, keys, exprs, mcvlist, false); /* * Merge the bitmap produced by mcv_get_match_bitmap into the * current one. We're handling a NOT clause, so invert the result * before merging it into the global bitmap. */ - for (i = 0; i < mcvlist->nitems; i++) + for (int i = 0; i < mcvlist->nitems; i++) matches[i] = RESULT_MERGE(matches[i], is_or, !not_matches[i]); pfree(not_matches); } else if (IsA(clause, Var)) { /* Var (has to be a boolean Var, possibly from below NOT) */ Var *var = (Var *) (clause); /* match the attribute to a dimension of the statistic */ int idx = bms_member_index(keys, var->varattno); Assert(var->vartype == BOOLOID); /* * Walk through the MCV items and evaluate the current clause. We * can skip items that were already ruled out, and terminate if * there are no remaining MCV items that might possibly match. */ - for (i = 0; i < mcvlist->nitems; i++) + for (int i = 0; i < mcvlist->nitems; i++) { MCVItem *item = &mcvlist->items[i]; bool match = false; /* if the item is NULL, it's a mismatch */ if (!item->isnull[idx] && DatumGetBool(item->values[idx])) match = true; /* update the result bitmap */ matches[i] = RESULT_MERGE(matches[i], is_or, match); } } else { /* Otherwise, it must be a bare boolean-returning expression */ int idx; /* match the expression to a dimension of the statistic */ idx = mcv_match_expression(clause, keys, exprs, NULL); /* * Walk through the MCV items and evaluate the current clause. We * can skip items that were already ruled out, and terminate if * there are no remaining MCV items that might possibly match. */ - for (i = 0; i < mcvlist->nitems; i++) + for (int i = 0; i < mcvlist->nitems; i++) { bool match; MCVItem *item = &mcvlist->items[i]; /* "match" just means it's bool TRUE */ match = !item->isnull[idx] && DatumGetBool(item->values[idx]); /* now, update the match bitmap, depending on OR/AND type */ matches[i] = RESULT_MERGE(matches[i], is_or, match); } } } return matches; } /* * mcv_combine_selectivities * Combine per-column and multi-column MCV selectivity estimates. * * simple_sel is a "simple" selectivity estimate (produced without using any diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 7a1202c6096..49d3b8c9dd0 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -3164,45 +3164,44 @@ DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum, { InvalidateBuffer(bufHdr); /* releases spinlock */ break; } } if (j >= nforks) UnlockBufHdr(bufHdr, buf_state); } } /* --------------------------------------------------------------------- * DropRelationsAllBuffers * * This function removes from the buffer pool all the pages of all * forks of the specified relations. It's equivalent to calling * DropRelationBuffers once per fork per relation with firstDelBlock = 0. * -------------------------------------------------------------------- */ void DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators) { int i; - int j; int n = 0; SMgrRelation *rels; BlockNumber (*block)[MAX_FORKNUM + 1]; uint64 nBlocksToInvalidate = 0; RelFileLocator *locators; bool cached = true; bool use_bsearch; if (nlocators == 0) return; rels = palloc(sizeof(SMgrRelation) * nlocators); /* non-local relations */ /* If it's a local relation, it's localbuf.c's problem. */ for (i = 0; i < nlocators; i++) { if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator)) { if (smgr_reln[i]->smgr_rlocator.backend == MyBackendId) DropRelationAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator); } else @@ -3213,72 +3212,72 @@ DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators) * If there are no non-local relations, then we're done. Release the * memory and return. */ if (n == 0) { pfree(rels); return; } /* * This is used to remember the number of blocks for all the relations * forks. */ block = (BlockNumber (*)[MAX_FORKNUM + 1]) palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1)); /* * We can avoid scanning the entire buffer pool if we know the exact size * of each of the given relation forks. See DropRelationBuffers. */ for (i = 0; i < n && cached; i++) { - for (j = 0; j <= MAX_FORKNUM; j++) + for (int j = 0; j <= MAX_FORKNUM; j++) { /* Get the number of blocks for a relation's fork. */ block[i][j] = smgrnblocks_cached(rels[i], j); /* We need to only consider the relation forks that exists. */ if (block[i][j] == InvalidBlockNumber) { if (!smgrexists(rels[i], j)) continue; cached = false; break; } /* calculate the total number of blocks to be invalidated */ nBlocksToInvalidate += block[i][j]; } } /* * We apply the optimization iff the total number of blocks to invalidate * is below the BUF_DROP_FULL_SCAN_THRESHOLD. */ if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD) { for (i = 0; i < n; i++) { - for (j = 0; j <= MAX_FORKNUM; j++) + for (int j = 0; j <= MAX_FORKNUM; j++) { /* ignore relation forks that doesn't exist */ if (!BlockNumberIsValid(block[i][j])) continue; /* drop all the buffers for a particular relation fork */ FindAndDropRelationBuffers(rels[i]->smgr_rlocator.locator, j, block[i][j], 0); } } pfree(block); pfree(rels); return; } pfree(block); locators = palloc(sizeof(RelFileLocator) * n); /* non-local relations */ for (i = 0; i < n; i++) locators[i] = rels[i]->smgr_rlocator.locator; /* diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 2c689157329..c0d09edf9d0 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -11557,45 +11557,44 @@ dumpFunc(Archive *fout, const FuncInfo *finfo) char *proretset; char *prosrc; char *probin; char *prosqlbody; char *funcargs; char *funciargs; char *funcresult; char *protrftypes; char *prokind; char *provolatile; char *proisstrict; char *prosecdef; char *proleakproof; char *proconfig; char *procost; char *prorows; char *prosupport; char *proparallel; char *lanname; char **configitems = NULL; int nconfigitems = 0; const char *keyword; - int i; /* Do nothing in data-only dump */ if (dopt->dataOnly) return; query = createPQExpBuffer(); q = createPQExpBuffer(); delqry = createPQExpBuffer(); asPart = createPQExpBuffer(); if (!fout->is_prepared[PREPQUERY_DUMPFUNC]) { /* Set up query for function-specific details */ appendPQExpBufferStr(query, "PREPARE dumpFunc(pg_catalog.oid) AS\n"); appendPQExpBufferStr(query, "SELECT\n" "proretset,\n" "prosrc,\n" "probin,\n" "provolatile,\n" @@ -11757,49 +11756,48 @@ dumpFunc(Archive *fout, const FuncInfo *finfo) appendPQExpBuffer(q, "CREATE %s %s.%s", keyword, fmtId(finfo->dobj.namespace->dobj.name), funcfullsig ? funcfullsig : funcsig); if (prokind[0] == PROKIND_PROCEDURE) /* no result type to output */ ; else if (funcresult) appendPQExpBuffer(q, " RETURNS %s", funcresult); else appendPQExpBuffer(q, " RETURNS %s%s", (proretset[0] == 't') ? "SETOF " : "", getFormattedTypeName(fout, finfo->prorettype, zeroIsError)); appendPQExpBuffer(q, "\n LANGUAGE %s", fmtId(lanname)); if (*protrftypes) { Oid *typeids = palloc(FUNC_MAX_ARGS * sizeof(Oid)); - int i; appendPQExpBufferStr(q, " TRANSFORM "); parseOidArray(protrftypes, typeids, FUNC_MAX_ARGS); - for (i = 0; typeids[i]; i++) + for (int i = 0; typeids[i]; i++) { if (i != 0) appendPQExpBufferStr(q, ", "); appendPQExpBuffer(q, "FOR TYPE %s", getFormattedTypeName(fout, typeids[i], zeroAsNone)); } } if (prokind[0] == PROKIND_WINDOW) appendPQExpBufferStr(q, " WINDOW"); if (provolatile[0] != PROVOLATILE_VOLATILE) { if (provolatile[0] == PROVOLATILE_IMMUTABLE) appendPQExpBufferStr(q, " IMMUTABLE"); else if (provolatile[0] == PROVOLATILE_STABLE) appendPQExpBufferStr(q, " STABLE"); else if (provolatile[0] != PROVOLATILE_VOLATILE) pg_fatal("unrecognized provolatile value for function \"%s\"", finfo->dobj.name); } @@ -11834,45 +11832,45 @@ dumpFunc(Archive *fout, const FuncInfo *finfo) } if (proretset[0] == 't' && strcmp(prorows, "0") != 0 && strcmp(prorows, "1000") != 0) appendPQExpBuffer(q, " ROWS %s", prorows); if (strcmp(prosupport, "-") != 0) { /* We rely on regprocout to provide quoting and qualification */ appendPQExpBuffer(q, " SUPPORT %s", prosupport); } if (proparallel[0] != PROPARALLEL_UNSAFE) { if (proparallel[0] == PROPARALLEL_SAFE) appendPQExpBufferStr(q, " PARALLEL SAFE"); else if (proparallel[0] == PROPARALLEL_RESTRICTED) appendPQExpBufferStr(q, " PARALLEL RESTRICTED"); else if (proparallel[0] != PROPARALLEL_UNSAFE) pg_fatal("unrecognized proparallel value for function \"%s\"", finfo->dobj.name); } - for (i = 0; i < nconfigitems; i++) + for (int i = 0; i < nconfigitems; i++) { /* we feel free to scribble on configitems[] here */ char *configitem = configitems[i]; char *pos; pos = strchr(configitem, '='); if (pos == NULL) continue; *pos++ = '\0'; appendPQExpBuffer(q, "\n SET %s TO ", fmtId(configitem)); /* * Variables that are marked GUC_LIST_QUOTE were already fully quoted * by flatten_set_variable_args() before they were put into the * proconfig array. However, because the quoting rules used there * aren't exactly like SQL's, we have to break the list value apart * and then quote the elements as string literals. (The elements may * be double-quoted as-is, but we can't just feed them to the SQL * parser; it would do the wrong thing with elements that are * zero-length or longer than NAMEDATALEN.) * * Variables that are not so marked should just be emitted as simple diff --git a/src/interfaces/ecpg/pgtypeslib/numeric.c b/src/interfaces/ecpg/pgtypeslib/numeric.c index a97b3300cb8..b666c909084 100644 --- a/src/interfaces/ecpg/pgtypeslib/numeric.c +++ b/src/interfaces/ecpg/pgtypeslib/numeric.c @@ -1043,45 +1043,44 @@ select_div_scale(numeric *var1, numeric *var2, int *rscale) res_dscale = Max(res_dscale, NUMERIC_MIN_DISPLAY_SCALE); res_dscale = Min(res_dscale, NUMERIC_MAX_DISPLAY_SCALE); /* Select result scale */ *rscale = res_dscale + 4; return res_dscale; } int PGTYPESnumeric_div(numeric *var1, numeric *var2, numeric *result) { NumericDigit *res_digits; int res_ndigits; int res_sign; int res_weight; numeric dividend; numeric divisor[10]; int ndigits_tmp; int weight_tmp; int rscale_tmp; int ri; - int i; long guess; long first_have; long first_div; int first_nextdigit; int stat = 0; int rscale; int res_dscale = select_div_scale(var1, var2, &rscale); int err = -1; NumericDigit *tmp_buf; /* * First of all division by zero check */ ndigits_tmp = var2->ndigits + 1; if (ndigits_tmp == 1) { errno = PGTYPES_NUM_DIVIDE_ZERO; return -1; } /* * Determine the result sign, weight and number of digits to calculate @@ -1090,45 +1089,45 @@ PGTYPESnumeric_div(numeric *var1, numeric *var2, numeric *result) res_sign = NUMERIC_POS; else res_sign = NUMERIC_NEG; res_weight = var1->weight - var2->weight + 1; res_ndigits = rscale + res_weight; if (res_ndigits <= 0) res_ndigits = 1; /* * Now result zero check */ if (var1->ndigits == 0) { zero_var(result); result->rscale = rscale; return 0; } /* * Initialize local variables */ init_var(÷nd); - for (i = 1; i < 10; i++) + for (int i = 1; i < 10; i++) init_var(&divisor[i]); /* * Make a copy of the divisor which has one leading zero digit */ divisor[1].ndigits = ndigits_tmp; divisor[1].rscale = var2->ndigits; divisor[1].sign = NUMERIC_POS; divisor[1].buf = digitbuf_alloc(ndigits_tmp); if (divisor[1].buf == NULL) goto done; divisor[1].digits = divisor[1].buf; divisor[1].digits[0] = 0; memcpy(&(divisor[1].digits[1]), var2->digits, ndigits_tmp - 1); /* * Make a copy of the dividend */ dividend.ndigits = var1->ndigits; dividend.weight = 0; dividend.rscale = var1->ndigits; dividend.sign = NUMERIC_POS; @@ -1162,53 +1161,52 @@ PGTYPESnumeric_div(numeric *var1, numeric *var2, numeric *result) first_have = 0; first_nextdigit = 0; weight_tmp = 1; rscale_tmp = divisor[1].rscale; for (ri = 0; ri <= res_ndigits; ri++) { first_have = first_have * 10; if (first_nextdigit >= 0 && first_nextdigit < dividend.ndigits) first_have += dividend.digits[first_nextdigit]; first_nextdigit++; guess = (first_have * 10) / first_div + 1; if (guess > 9) guess = 9; while (guess > 0) { if (divisor[guess].buf == NULL) { - int i; long sum = 0; memcpy(&divisor[guess], &divisor[1], sizeof(numeric)); divisor[guess].buf = digitbuf_alloc(divisor[guess].ndigits); if (divisor[guess].buf == NULL) goto done; divisor[guess].digits = divisor[guess].buf; - for (i = divisor[1].ndigits - 1; i >= 0; i--) + for (int i = divisor[1].ndigits - 1; i >= 0; i--) { sum += divisor[1].digits[i] * guess; divisor[guess].digits[i] = sum % 10; sum /= 10; } } divisor[guess].weight = weight_tmp; divisor[guess].rscale = rscale_tmp; stat = cmp_abs(÷nd, &divisor[guess]); if (stat >= 0) break; guess--; } res_digits[ri + 1] = guess; if (stat == 0) { ri++; break; @@ -1249,45 +1247,45 @@ PGTYPESnumeric_div(numeric *var1, numeric *var2, numeric *result) while (result->ndigits > 0 && *(result->digits) == 0) { (result->digits)++; (result->weight)--; (result->ndigits)--; } while (result->ndigits > 0 && result->digits[result->ndigits - 1] == 0) (result->ndigits)--; if (result->ndigits == 0) result->sign = NUMERIC_POS; result->dscale = res_dscale; err = 0; /* if we've made it this far, return success */ done: /* * Tidy up */ if (dividend.buf != NULL) digitbuf_free(dividend.buf); - for (i = 1; i < 10; i++) + for (int i = 1; i < 10; i++) { if (divisor[i].buf != NULL) digitbuf_free(divisor[i].buf); } return err; } int PGTYPESnumeric_cmp(numeric *var1, numeric *var2) { /* use cmp_abs function to calculate the result */ /* both are positive: normal comparison with cmp_abs */ if (var1->sign == NUMERIC_POS && var2->sign == NUMERIC_POS) return cmp_abs(var1, var2); /* both are negative: return the inverse of the normal comparison */ if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG) { /*