pgindent run for 9.6

This commit is contained in:
Robert Haas 2016-06-09 18:02:36 -04:00
parent 9164deea2f
commit 4bc424b968
252 changed files with 2670 additions and 2558 deletions

View File

@ -209,8 +209,8 @@ static void
explain_ExecutorStart(QueryDesc *queryDesc, int eflags) explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
{ {
/* /*
* For rate sampling, randomly choose top-level statement. Either * For rate sampling, randomly choose top-level statement. Either all
* all nested statements will be explained or none will. * nested statements will be explained or none will.
*/ */
if (auto_explain_log_min_duration >= 0 && nesting_level == 0) if (auto_explain_log_min_duration >= 0 && nesting_level == 0)
current_query_sampled = (random() < auto_explain_sample_rate * current_query_sampled = (random() < auto_explain_sample_rate *

View File

@ -33,8 +33,8 @@ PG_MODULE_MAGIC;
typedef struct typedef struct
{ {
BloomState blstate; /* bloom index state */ BloomState blstate; /* bloom index state */
MemoryContext tmpCtx; /* temporary memory context reset after MemoryContext tmpCtx; /* temporary memory context reset after each
* each tuple */ * tuple */
char data[BLCKSZ]; /* cached page */ char data[BLCKSZ]; /* cached page */
int64 count; /* number of tuples in cached page */ int64 count; /* number of tuples in cached page */
} BloomBuildState; } BloomBuildState;
@ -140,8 +140,8 @@ blbuild(Relation heap, Relation index, IndexInfo *indexInfo)
bloomBuildCallback, (void *) &buildstate); bloomBuildCallback, (void *) &buildstate);
/* /*
* There are could be some items in cached page. Flush this page * There are could be some items in cached page. Flush this page if
* if needed. * needed.
*/ */
if (buildstate.count > 0) if (buildstate.count > 0)
flushCachedPage(index, &buildstate); flushCachedPage(index, &buildstate);

View File

@ -33,10 +33,9 @@ typedef struct BloomPageOpaqueData
{ {
OffsetNumber maxoff; /* number of index tuples on page */ OffsetNumber maxoff; /* number of index tuples on page */
uint16 flags; /* see bit definitions below */ uint16 flags; /* see bit definitions below */
uint16 unused; /* placeholder to force maxaligning of size uint16 unused; /* placeholder to force maxaligning of size of
* of BloomPageOpaqueData and to place * BloomPageOpaqueData and to place
* bloom_page_id exactly at the end of page * bloom_page_id exactly at the end of page */
*/
uint16 bloom_page_id; /* for identification of BLOOM indexes */ uint16 bloom_page_id; /* for identification of BLOOM indexes */
} BloomPageOpaqueData; } BloomPageOpaqueData;
@ -102,8 +101,8 @@ typedef struct BloomOptions
{ {
int32 vl_len_; /* varlena header (do not touch directly!) */ int32 vl_len_; /* varlena header (do not touch directly!) */
int bloomLength; /* length of signature in words (not bits!) */ int bloomLength; /* length of signature in words (not bits!) */
int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for each int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for
* index key */ * each index key */
} BloomOptions; } BloomOptions;
/* /*

View File

@ -37,6 +37,7 @@ PG_FUNCTION_INFO_V1(blhandler);
/* Kind of relation options for bloom index */ /* Kind of relation options for bloom index */
static relopt_kind bl_relopt_kind; static relopt_kind bl_relopt_kind;
/* parse table for fillRelOptions */ /* parse table for fillRelOptions */
static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1]; static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1];
@ -215,7 +216,9 @@ myRand(void)
* October 1988, p. 1195. * October 1988, p. 1195.
*---------- *----------
*/ */
int32 hi, lo, x; int32 hi,
lo,
x;
/* Must be in [1, 0x7ffffffe] range at this point. */ /* Must be in [1, 0x7ffffffe] range at this point. */
hi = next / 127773; hi = next / 127773;

View File

@ -109,8 +109,8 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
OffsetNumberNext(BloomPageGetMaxOffset(page)))); OffsetNumberNext(BloomPageGetMaxOffset(page))));
/* /*
* Add page to notFullPage list if we will not mark page as deleted and * Add page to notFullPage list if we will not mark page as deleted
* there is a free space on it * and there is a free space on it
*/ */
if (BloomPageGetMaxOffset(page) != 0 && if (BloomPageGetMaxOffset(page) != 0 &&
BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple && BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple &&

View File

@ -444,9 +444,9 @@ ean2ISBN(char *isn)
unsigned check; unsigned check;
/* /*
* The number should come in this format: 978-0-000-00000-0 * The number should come in this format: 978-0-000-00000-0 or may be an
* or may be an ISBN-13 number, 979-..., which does not have a short * ISBN-13 number, 979-..., which does not have a short representation. Do
* representation. Do the short output version if possible. * the short output version if possible.
*/ */
if (strncmp("978-", isn, 4) == 0) if (strncmp("978-", isn, 4) == 0)
{ {

View File

@ -318,10 +318,10 @@ tuple_data_split_internal(Oid relid, char *tupdata,
is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits); is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits);
/* /*
* Tuple header can specify less attributes than tuple descriptor * Tuple header can specify less attributes than tuple descriptor as
* as ALTER TABLE ADD COLUMN without DEFAULT keyword does not * ALTER TABLE ADD COLUMN without DEFAULT keyword does not actually
* actually change tuples in pages, so attributes with numbers greater * change tuples in pages, so attributes with numbers greater than
* than (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL. * (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
*/ */
if (i >= (t_infomask2 & HEAP_NATTS_MASK)) if (i >= (t_infomask2 & HEAP_NATTS_MASK))
is_null = true; is_null = true;
@ -334,6 +334,7 @@ tuple_data_split_internal(Oid relid, char *tupdata,
{ {
off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1, off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1,
tupdata + off); tupdata + off);
/* /*
* As VARSIZE_ANY throws an exception if it can't properly * As VARSIZE_ANY throws an exception if it can't properly
* detect the type of external storage in macros VARTAG_SIZE, * detect the type of external storage in macros VARTAG_SIZE,

View File

@ -293,7 +293,8 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
} }
/* /*
* See comment in gin_trgm_consistent() about * upper bound formula * See comment in gin_trgm_consistent() about * upper bound
* formula
*/ */
res = (nkeys == 0) res = (nkeys == 0)
? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit) ? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit)
@ -330,9 +331,9 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
else else
{ {
/* /*
* As trigramsMatchGraph implements a monotonic boolean function, * As trigramsMatchGraph implements a monotonic boolean
* promoting all GIN_MAYBE keys to GIN_TRUE will give a * function, promoting all GIN_MAYBE keys to GIN_TRUE will
* conservative result. * give a conservative result.
*/ */
boolcheck = (bool *) palloc(sizeof(bool) * nkeys); boolcheck = (bool *) palloc(sizeof(bool) * nkeys);
for (i = 0; i < nkeys; i++) for (i = 0; i < nkeys; i++)

View File

@ -296,6 +296,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
if (GIST_LEAF(entry)) if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */ { /* all leafs contains orig trgm */
/* /*
* Prevent gcc optimizing the tmpsml variable using volatile * Prevent gcc optimizing the tmpsml variable using volatile
* keyword. Otherwise comparison of nlimit and tmpsml may give * keyword. Otherwise comparison of nlimit and tmpsml may give
@ -476,12 +477,14 @@ gtrgm_distance(PG_FUNCTION_ARGS)
*recheck = strategy == WordDistanceStrategyNumber; *recheck = strategy == WordDistanceStrategyNumber;
if (GIST_LEAF(entry)) if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */ { /* all leafs contains orig trgm */
/* /*
* Prevent gcc optimizing the sml variable using volatile * Prevent gcc optimizing the sml variable using volatile
* keyword. Otherwise res can differ from the * keyword. Otherwise res can differ from the
* word_similarity_dist_op() function. * word_similarity_dist_op() function.
*/ */
float4 volatile sml = cnt_sml(qtrg, key, *recheck); float4 volatile sml = cnt_sml(qtrg, key, *recheck);
res = 1.0 - sml; res = 1.0 - sml;
} }
else if (ISALLTRUE(key)) else if (ISALLTRUE(key))

View File

@ -362,7 +362,8 @@ static pos_trgm *
make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2) make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
{ {
pos_trgm *result; pos_trgm *result;
int i, len = len1 + len2; int i,
len = len1 + len2;
result = (pos_trgm *) palloc(sizeof(pos_trgm) * len); result = (pos_trgm *) palloc(sizeof(pos_trgm) * len);
@ -488,10 +489,11 @@ iterate_word_similarity(int *trg2indexes,
lower = tmp_lower; lower = tmp_lower;
count = tmp_count; count = tmp_count;
} }
/* /*
* if we only check that word similarity is greater than * if we only check that word similarity is greater than
* pg_trgm.word_similarity_threshold we do not need to calculate * pg_trgm.word_similarity_threshold we do not need to
* a maximum similarity. * calculate a maximum similarity.
*/ */
if (check_only && smlr_cur >= word_similarity_threshold) if (check_only && smlr_cur >= word_similarity_threshold)
break; break;
@ -506,6 +508,7 @@ iterate_word_similarity(int *trg2indexes,
} }
smlr_max = Max(smlr_max, smlr_cur); smlr_max = Max(smlr_max, smlr_cur);
/* /*
* if we only check that word similarity is greater than * if we only check that word similarity is greater than
* pg_trgm.word_similarity_threshold we do not need to calculate a * pg_trgm.word_similarity_threshold we do not need to calculate a
@ -517,6 +520,7 @@ iterate_word_similarity(int *trg2indexes,
for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++) for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++)
{ {
int tmp_trgindex; int tmp_trgindex;
tmp_trgindex = trg2indexes[tmp_lower]; tmp_trgindex = trg2indexes[tmp_lower];
if (lastpos[tmp_trgindex] == tmp_lower) if (lastpos[tmp_trgindex] == tmp_lower)
lastpos[tmp_trgindex] = -1; lastpos[tmp_trgindex] = -1;
@ -595,6 +599,7 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
if (i > 0) if (i > 0)
{ {
int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg); int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg);
if (cmp != 0) if (cmp != 0)
{ {
if (found[j]) if (found[j])

View File

@ -320,8 +320,8 @@ collect_visibility_data(Oid relid, bool include_pd)
info->bits[blkno] |= (1 << 1); info->bits[blkno] |= (1 << 1);
/* /*
* Page-level data requires reading every block, so only get it if * Page-level data requires reading every block, so only get it if the
* the caller needs it. Use a buffer access strategy, too, to prevent * caller needs it. Use a buffer access strategy, too, to prevent
* cache-trashing. * cache-trashing.
*/ */
if (include_pd) if (include_pd)

View File

@ -675,9 +675,9 @@ pgfdw_xact_callback(XactEvent event, void *arg)
/* /*
* If a command has been submitted to the remote server by * If a command has been submitted to the remote server by
* using an asynchronous execution function, the command * using an asynchronous execution function, the command
* might not have yet completed. Check to see if a command * might not have yet completed. Check to see if a
* is still being processed by the remote server, and if so, * command is still being processed by the remote server,
* request cancellation of the command. * and if so, request cancellation of the command.
*/ */
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE) if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{ {
@ -798,11 +798,11 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
entry->have_error = true; entry->have_error = true;
/* /*
* If a command has been submitted to the remote server by using an * If a command has been submitted to the remote server by using
* asynchronous execution function, the command might not have yet * an asynchronous execution function, the command might not have
* completed. Check to see if a command is still being processed by * yet completed. Check to see if a command is still being
* the remote server, and if so, request cancellation of the * processed by the remote server, and if so, request cancellation
* command. * of the command.
*/ */
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE) if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{ {

View File

@ -1583,8 +1583,8 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
/* /*
* All other system attributes are fetched as 0, except for table OID, * All other system attributes are fetched as 0, except for table OID,
* which is fetched as the local table OID. However, we must be * which is fetched as the local table OID. However, we must be
* careful; the table could be beneath an outer join, in which case * careful; the table could be beneath an outer join, in which case it
* it must go to NULL whenever the rest of the row does. * must go to NULL whenever the rest of the row does.
*/ */
Oid fetchval = 0; Oid fetchval = 0;
@ -1633,10 +1633,10 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
0 - FirstLowInvalidHeapAttributeNumber); 0 - FirstLowInvalidHeapAttributeNumber);
/* /*
* In case the whole-row reference is under an outer join then it has to * In case the whole-row reference is under an outer join then it has
* go NULL whenver the rest of the row goes NULL. Deparsing a join query * to go NULL whenver the rest of the row goes NULL. Deparsing a join
* would always involve multiple relations, thus qualify_col would be * query would always involve multiple relations, thus qualify_col
* true. * would be true.
*/ */
if (qualify_col) if (qualify_col)
{ {

View File

@ -4063,19 +4063,20 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
/* /*
* Pull the other remote conditions from the joining relations into join * Pull the other remote conditions from the joining relations into join
* clauses or other remote clauses (remote_conds) of this relation wherever * clauses or other remote clauses (remote_conds) of this relation
* possible. This avoids building subqueries at every join step, which is * wherever possible. This avoids building subqueries at every join step,
* not currently supported by the deparser logic. * which is not currently supported by the deparser logic.
* *
* For an inner join, clauses from both the relations are added to the * For an inner join, clauses from both the relations are added to the
* other remote clauses. For LEFT and RIGHT OUTER join, the clauses from the * other remote clauses. For LEFT and RIGHT OUTER join, the clauses from
* outer side are added to remote_conds since those can be evaluated after * the outer side are added to remote_conds since those can be evaluated
* the join is evaluated. The clauses from inner side are added to the * after the join is evaluated. The clauses from inner side are added to
* joinclauses, since they need to evaluated while constructing the join. * the joinclauses, since they need to evaluated while constructing the
* join.
* *
* For a FULL OUTER JOIN, the other clauses from either relation can not be * For a FULL OUTER JOIN, the other clauses from either relation can not
* added to the joinclauses or remote_conds, since each relation acts as an * be added to the joinclauses or remote_conds, since each relation acts
* outer relation for the other. Consider such full outer join as * as an outer relation for the other. Consider such full outer join as
* unshippable because of the reasons mentioned above in this comment. * unshippable because of the reasons mentioned above in this comment.
* *
* The joining sides can not have local conditions, thus no need to test * The joining sides can not have local conditions, thus no need to test

View File

@ -101,7 +101,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs table pages only to this percentage", "Packs table pages only to this percentage",
RELOPT_KIND_HEAP, RELOPT_KIND_HEAP,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100 HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100
}, },
@ -110,7 +111,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs btree index pages only to this percentage", "Packs btree index pages only to this percentage",
RELOPT_KIND_BTREE, RELOPT_KIND_BTREE,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100 BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100
}, },
@ -119,7 +121,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs hash index pages only to this percentage", "Packs hash index pages only to this percentage",
RELOPT_KIND_HASH, RELOPT_KIND_HASH,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100 HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100
}, },
@ -128,7 +131,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs gist index pages only to this percentage", "Packs gist index pages only to this percentage",
RELOPT_KIND_GIST, RELOPT_KIND_GIST,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100 GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100
}, },
@ -137,7 +141,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs spgist index pages only to this percentage", "Packs spgist index pages only to this percentage",
RELOPT_KIND_SPGIST, RELOPT_KIND_SPGIST,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100 SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100
}, },

View File

@ -745,18 +745,17 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
bool inVacuum = (stats == NULL); bool inVacuum = (stats == NULL);
/* /*
* We would like to prevent concurrent cleanup process. For * We would like to prevent concurrent cleanup process. For that we will
* that we will lock metapage in exclusive mode using LockPage() * lock metapage in exclusive mode using LockPage() call. Nobody other
* call. Nobody other will use that lock for metapage, so * will use that lock for metapage, so we keep possibility of concurrent
* we keep possibility of concurrent insertion into pending list * insertion into pending list
*/ */
if (inVacuum) if (inVacuum)
{ {
/* /*
* We are called from [auto]vacuum/analyze or * We are called from [auto]vacuum/analyze or gin_clean_pending_list()
* gin_clean_pending_list() and we would like to wait * and we would like to wait concurrent cleanup to finish.
* concurrent cleanup to finish.
*/ */
LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock); LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
workMemory = workMemory =
@ -766,9 +765,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
else else
{ {
/* /*
* We are called from regular insert and if we see * We are called from regular insert and if we see concurrent cleanup
* concurrent cleanup just exit in hope that concurrent * just exit in hope that concurrent process will clean up pending
* process will clean up pending list. * list.
*/ */
if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock)) if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock))
return; return;
@ -829,9 +828,10 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
Assert(!GinPageIsDeleted(page)); Assert(!GinPageIsDeleted(page));
/* /*
* Are we walk through the page which as we remember was a tail when we * Are we walk through the page which as we remember was a tail when
* start our cleanup? But if caller asks us to clean up whole pending * we start our cleanup? But if caller asks us to clean up whole
* list then ignore old tail, we will work until list becomes empty. * pending list then ignore old tail, we will work until list becomes
* empty.
*/ */
if (blkno == blknoFinish && full_clean == false) if (blkno == blknoFinish && full_clean == false)
cleanupFinish = true; cleanupFinish = true;
@ -917,8 +917,8 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
* locking */ * locking */
/* /*
* remove read pages from pending list, at this point all * remove read pages from pending list, at this point all content
* content of read pages is in regular structure * of read pages is in regular structure
*/ */
shiftList(index, metabuffer, blkno, fill_fsm, stats); shiftList(index, metabuffer, blkno, fill_fsm, stats);
@ -961,9 +961,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
ReleaseBuffer(metabuffer); ReleaseBuffer(metabuffer);
/* /*
* As pending list pages can have a high churn rate, it is * As pending list pages can have a high churn rate, it is desirable to
* desirable to recycle them immediately to the FreeSpace Map when * recycle them immediately to the FreeSpace Map when ordinary backends
* ordinary backends clean the list. * clean the list.
*/ */
if (fsm_vac && fill_fsm) if (fsm_vac && fill_fsm)
IndexFreeSpaceMapVacuum(index); IndexFreeSpaceMapVacuum(index);

View File

@ -540,8 +540,10 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
{ {
/* Yes, so initialize stats to zeroes */ /* Yes, so initialize stats to zeroes */
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
/* /*
* and cleanup any pending inserts */ * and cleanup any pending inserts
*/
ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(), ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),
false, stats); false, stats);
} }

View File

@ -1499,7 +1499,8 @@ gistvacuumpage(Relation rel, Page page, Buffer buffer)
{ {
OffsetNumber deletable[MaxIndexTuplesPerPage]; OffsetNumber deletable[MaxIndexTuplesPerPage];
int ndeletable = 0; int ndeletable = 0;
OffsetNumber offnum, maxoff; OffsetNumber offnum,
maxoff;
Assert(GistPageIsLeaf(page)); Assert(GistPageIsLeaf(page));

View File

@ -57,8 +57,9 @@ gistkillitems(IndexScanDesc scan)
page = BufferGetPage(buffer); page = BufferGetPage(buffer);
/* /*
* If page LSN differs it means that the page was modified since the last read. * If page LSN differs it means that the page was modified since the last
* killedItems could be not valid so LP_DEAD hints applying is not safe. * read. killedItems could be not valid so LP_DEAD hints applying is not
* safe.
*/ */
if (PageGetLSN(page) != so->curPageLSN) if (PageGetLSN(page) != so->curPageLSN)
{ {
@ -70,8 +71,8 @@ gistkillitems(IndexScanDesc scan)
Assert(GistPageIsLeaf(page)); Assert(GistPageIsLeaf(page));
/* /*
* Mark all killedItems as dead. We need no additional recheck, * Mark all killedItems as dead. We need no additional recheck, because,
* because, if page was modified, pageLSN must have changed. * if page was modified, pageLSN must have changed.
*/ */
for (i = 0; i < so->numKilled; i++) for (i = 0; i < so->numKilled; i++)
{ {
@ -404,6 +405,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
continue; continue;
it = (IndexTuple) PageGetItem(page, iid); it = (IndexTuple) PageGetItem(page, iid);
/* /*
* Must call gistindex_keytest in tempCxt, and clean up any leftover * Must call gistindex_keytest in tempCxt, and clean up any leftover
* junk afterward. * junk afterward.

View File

@ -230,8 +230,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
ScanKey skey = scan->keyData + i; ScanKey skey = scan->keyData + i;
/* /*
* Copy consistent support function to ScanKey structure * Copy consistent support function to ScanKey structure instead
* instead of function implementing filtering operator. * of function implementing filtering operator.
*/ */
fmgr_info_copy(&(skey->sk_func), fmgr_info_copy(&(skey->sk_func),
&(so->giststate->consistentFn[skey->sk_attno - 1]), &(so->giststate->consistentFn[skey->sk_attno - 1]),
@ -303,8 +303,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid); so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid);
/* /*
* Copy distance support function to ScanKey structure * Copy distance support function to ScanKey structure instead of
* instead of function implementing ordering operator. * function implementing ordering operator.
*/ */
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt); fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);

View File

@ -191,10 +191,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
return; return;
/* /*
* It might seem like multiplying the number of lock waiters by as much * It might seem like multiplying the number of lock waiters by as much as
* as 20 is too aggressive, but benchmarking revealed that smaller numbers * 20 is too aggressive, but benchmarking revealed that smaller numbers
* were insufficient. 512 is just an arbitrary cap to prevent pathological * were insufficient. 512 is just an arbitrary cap to prevent
* results. * pathological results.
*/ */
extraBlocks = Min(512, lockWaiters * 20); extraBlocks = Min(512, lockWaiters * 20);
@ -225,10 +225,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
} }
/* /*
* Updating the upper levels of the free space map is too expensive * Updating the upper levels of the free space map is too expensive to do
* to do for every block, but it's worth doing once at the end to make * for every block, but it's worth doing once at the end to make sure that
* sure that subsequent insertion activity sees all of those nifty free * subsequent insertion activity sees all of those nifty free pages we
* pages we just inserted. * just inserted.
* *
* Note that we're using the freespace value that was reported for the * Note that we're using the freespace value that was reported for the
* last block we added as if it were the freespace value for every block * last block we added as if it were the freespace value for every block
@ -547,8 +547,8 @@ loop:
} }
/* /*
* In addition to whatever extension we performed above, we always add * In addition to whatever extension we performed above, we always add at
* at least one block to satisfy our own request. * least one block to satisfy our own request.
* *
* XXX This does an lseek - rather expensive - but at the moment it is the * XXX This does an lseek - rather expensive - but at the moment it is the
* only way to accurately determine how many blocks are in a relation. Is * only way to accurately determine how many blocks are in a relation. Is

View File

@ -395,7 +395,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* Check for a conflict-in as we would if we were going to * Check for a conflict-in as we would if we were going to
* write to this page. We aren't actually going to write, * write to this page. We aren't actually going to write,
* but we want a chance to report SSI conflicts that would * but we want a chance to report SSI conflicts that would
* otherwise be masked by this unique constraint violation. * otherwise be masked by this unique constraint
* violation.
*/ */
CheckForSerializableConflictIn(rel, NULL, buf); CheckForSerializableConflictIn(rel, NULL, buf);

View File

@ -813,8 +813,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/* /*
* Check to see if we need to issue one final WAL record for this index, * Check to see if we need to issue one final WAL record for this index,
* which may be needed for correctness on a hot standby node when * which may be needed for correctness on a hot standby node when non-MVCC
* non-MVCC index scans could take place. * index scans could take place.
* *
* If the WAL is replayed in hot standby, the replay process needs to get * If the WAL is replayed in hot standby, the replay process needs to get
* cleanup locks on all index leaf pages, just as we've been doing here. * cleanup locks on all index leaf pages, just as we've been doing here.
@ -1025,13 +1025,13 @@ restart:
if (ndeletable > 0) if (ndeletable > 0)
{ {
/* /*
* Notice that the issued XLOG_BTREE_VACUUM WAL record includes all * Notice that the issued XLOG_BTREE_VACUUM WAL record includes
* information to the replay code to allow it to get a cleanup lock * all information to the replay code to allow it to get a cleanup
* on all pages between the previous lastBlockVacuumed and this page. * lock on all pages between the previous lastBlockVacuumed and
* This ensures that WAL replay locks all leaf pages at some point, * this page. This ensures that WAL replay locks all leaf pages at
* which is important should non-MVCC scans be requested. * some point, which is important should non-MVCC scans be
* This is currently unused on standby, but we record it anyway, so * requested. This is currently unused on standby, but we record
* that the WAL contains the required information. * it anyway, so that the WAL contains the required information.
* *
* Since we can visit leaf pages out-of-order when recursing, * Since we can visit leaf pages out-of-order when recursing,
* replay might end up locking such pages an extra time, but it * replay might end up locking such pages an extra time, but it

View File

@ -392,15 +392,15 @@ btree_xlog_vacuum(XLogReaderState *record)
xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record); xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/* /*
* This section of code is thought to be no longer needed, after * This section of code is thought to be no longer needed, after analysis
* analysis of the calling paths. It is retained to allow the code * of the calling paths. It is retained to allow the code to be reinstated
* to be reinstated if a flaw is revealed in that thinking. * if a flaw is revealed in that thinking.
* *
* If we are running non-MVCC scans using this index we need to do some * If we are running non-MVCC scans using this index we need to do some
* additional work to ensure correctness, which is known as a "pin scan" * additional work to ensure correctness, which is known as a "pin scan"
* described in more detail in next paragraphs. We used to do the extra * described in more detail in next paragraphs. We used to do the extra
* work in all cases, whereas we now avoid that work in most cases. * work in all cases, whereas we now avoid that work in most cases. If
* If lastBlockVacuumed is set to InvalidBlockNumber then we skip the * lastBlockVacuumed is set to InvalidBlockNumber then we skip the
* additional work required for the pin scan. * additional work required for the pin scan.
* *
* Avoiding this extra work is important since it requires us to touch * Avoiding this extra work is important since it requires us to touch

View File

@ -153,9 +153,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
* No-op if the module is not active. * No-op if the module is not active.
* *
* An unlocked read here is fine, because in a standby (the only place * An unlocked read here is fine, because in a standby (the only place
* where the flag can change in flight) this routine is only called by * where the flag can change in flight) this routine is only called by the
* the recovery process, which is also the only process which can change * recovery process, which is also the only process which can change the
* the flag. * flag.
*/ */
if (!commitTsShared->commitTsActive) if (!commitTsShared->commitTsActive)
return; return;
@ -767,8 +767,8 @@ ExtendCommitTs(TransactionId newestXact)
int pageno; int pageno;
/* /*
* Nothing to do if module not enabled. Note we do an unlocked read of the * Nothing to do if module not enabled. Note we do an unlocked read of
* flag here, which is okay because this routine is only called from * the flag here, which is okay because this routine is only called from
* GetNewTransactionId, which is never called in a standby. * GetNewTransactionId, which is never called in a standby.
*/ */
Assert(!InRecovery); Assert(!InRecovery);

View File

@ -52,9 +52,8 @@ typedef struct
Buffer buffer; /* registered buffer */ Buffer buffer; /* registered buffer */
int flags; /* flags for this buffer */ int flags; /* flags for this buffer */
int deltaLen; /* space consumed in delta field */ int deltaLen; /* space consumed in delta field */
char *image; /* copy of page image for modification, char *image; /* copy of page image for modification, do not
* do not do it in-place to have aligned * do it in-place to have aligned memory chunk */
* memory chunk */
char delta[MAX_DELTA_SIZE]; /* delta between page images */ char delta[MAX_DELTA_SIZE]; /* delta between page images */
} PageData; } PageData;

View File

@ -988,8 +988,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
char *oldest_datname = get_database_name(oldest_datoid); char *oldest_datname = get_database_name(oldest_datoid);
/* /*
* Immediately kick autovacuum into action as we're already * Immediately kick autovacuum into action as we're already in
* in ERROR territory. * ERROR territory.
*/ */
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER); SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);

View File

@ -134,9 +134,9 @@ CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
nworkers = 0; nworkers = 0;
/* /*
* If we are running under serializable isolation, we can't use * If we are running under serializable isolation, we can't use parallel
* parallel workers, at least not until somebody enhances that mechanism * workers, at least not until somebody enhances that mechanism to be
* to be parallel-aware. * parallel-aware.
*/ */
if (IsolationIsSerializable()) if (IsolationIsSerializable())
nworkers = 0; nworkers = 0;
@ -646,9 +646,9 @@ DestroyParallelContext(ParallelContext *pcxt)
} }
/* /*
* We can't finish transaction commit or abort until all of the * We can't finish transaction commit or abort until all of the workers
* workers have exited. This means, in particular, that we can't respond * have exited. This means, in particular, that we can't respond to
* to interrupts at this stage. * interrupts at this stage.
*/ */
HOLD_INTERRUPTS(); HOLD_INTERRUPTS();
WaitForParallelWorkersToExit(pcxt); WaitForParallelWorkersToExit(pcxt);
@ -958,9 +958,9 @@ ParallelWorkerMain(Datum main_arg)
*/ */
/* /*
* Join locking group. We must do this before anything that could try * Join locking group. We must do this before anything that could try to
* to acquire a heavyweight lock, because any heavyweight locks acquired * acquire a heavyweight lock, because any heavyweight locks acquired to
* to this point could block either directly against the parallel group * this point could block either directly against the parallel group
* leader or against some process which in turn waits for a lock that * leader or against some process which in turn waits for a lock that
* conflicts with the parallel group leader, causing an undetected * conflicts with the parallel group leader, causing an undetected
* deadlock. (If we can't join the lock group, the leader has gone away, * deadlock. (If we can't join the lock group, the leader has gone away,

View File

@ -140,11 +140,11 @@ typedef struct GlobalTransactionData
TimestampTz prepared_at; /* time of preparation */ TimestampTz prepared_at; /* time of preparation */
/* /*
* Note that we need to keep track of two LSNs for each GXACT. * Note that we need to keep track of two LSNs for each GXACT. We keep
* We keep track of the start LSN because this is the address we must * track of the start LSN because this is the address we must use to read
* use to read state data back from WAL when committing a prepared GXACT. * state data back from WAL when committing a prepared GXACT. We keep
* We keep track of the end LSN because that is the LSN we need to wait * track of the end LSN because that is the LSN we need to wait for prior
* for prior to commit. * to commit.
*/ */
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */ XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */ XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
@ -1347,10 +1347,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
xid = pgxact->xid; xid = pgxact->xid;
/* /*
* Read and validate 2PC state data. * Read and validate 2PC state data. State data will typically be stored
* State data will typically be stored in WAL files if the LSN is after the * in WAL files if the LSN is after the last checkpoint record, or moved
* last checkpoint record, or moved to disk if for some reason they have * to disk if for some reason they have lived for a long time.
* lived for a long time.
*/ */
if (gxact->ondisk) if (gxact->ondisk)
buf = ReadTwoPhaseFile(xid, true); buf = ReadTwoPhaseFile(xid, true);
@ -1605,22 +1604,20 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START(); TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START();
/* /*
* We are expecting there to be zero GXACTs that need to be * We are expecting there to be zero GXACTs that need to be copied to
* copied to disk, so we perform all I/O while holding * disk, so we perform all I/O while holding TwoPhaseStateLock for
* TwoPhaseStateLock for simplicity. This prevents any new xacts * simplicity. This prevents any new xacts from preparing while this
* from preparing while this occurs, which shouldn't be a problem * occurs, which shouldn't be a problem since the presence of long-lived
* since the presence of long-lived prepared xacts indicates the * prepared xacts indicates the transaction manager isn't active.
* transaction manager isn't active.
* *
* It's also possible to move I/O out of the lock, but on * It's also possible to move I/O out of the lock, but on every error we
* every error we should check whether somebody committed our * should check whether somebody committed our transaction in different
* transaction in different backend. Let's leave this optimisation * backend. Let's leave this optimisation for future, if somebody will
* for future, if somebody will spot that this place cause * spot that this place cause bottleneck.
* bottleneck.
* *
* Note that it isn't possible for there to be a GXACT with * Note that it isn't possible for there to be a GXACT with a
* a prepare_end_lsn set prior to the last checkpoint yet * prepare_end_lsn set prior to the last checkpoint yet is marked invalid,
* is marked invalid, because of the efforts with delayChkpt. * because of the efforts with delayChkpt.
*/ */
LWLockAcquire(TwoPhaseStateLock, LW_SHARED); LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++) for (i = 0; i < TwoPhaseState->numPrepXacts; i++)

View File

@ -1166,13 +1166,13 @@ RecordTransactionCommit(void)
/* /*
* Transactions without an assigned xid can contain invalidation * Transactions without an assigned xid can contain invalidation
* messages (e.g. explicit relcache invalidations or catcache * messages (e.g. explicit relcache invalidations or catcache
* invalidations for inplace updates); standbys need to process * invalidations for inplace updates); standbys need to process those.
* those. We can't emit a commit record without an xid, and we don't * We can't emit a commit record without an xid, and we don't want to
* want to force assigning an xid, because that'd be problematic for * force assigning an xid, because that'd be problematic for e.g.
* e.g. vacuum. Hence we emit a bespoke record for the * vacuum. Hence we emit a bespoke record for the invalidations. We
* invalidations. We don't want to use that in case a commit record is * don't want to use that in case a commit record is emitted, so they
* emitted, so they happen synchronously with commits (besides not * happen synchronously with commits (besides not wanting to emit more
* wanting to emit more WAL recoreds). * WAL recoreds).
*/ */
if (nmsgs != 0) if (nmsgs != 0)
{ {
@ -1272,8 +1272,8 @@ RecordTransactionCommit(void)
* this case, but we don't currently try to do that. It would certainly * this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the * cause problems at least in Hot Standby mode, where the
* KnownAssignedXids machinery requires tracking every XID assignment. It * KnownAssignedXids machinery requires tracking every XID assignment. It
* might be OK to skip it only when wal_level < replica, but for now * might be OK to skip it only when wal_level < replica, but for now we
* we don't.) * don't.)
* *
* However, if we're doing cleanup of any non-temp rels or committing any * However, if we're doing cleanup of any non-temp rels or committing any
* command that wanted to force sync commit, then we must flush XLOG * command that wanted to force sync commit, then we must flush XLOG
@ -5486,8 +5486,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
/* /*
* If asked by the primary (because someone is waiting for a synchronous * If asked by the primary (because someone is waiting for a synchronous
* commit = remote_apply), we will need to ask walreceiver to send a * commit = remote_apply), we will need to ask walreceiver to send a reply
* reply immediately. * immediately.
*/ */
if (XactCompletionApplyFeedback(parsed->xinfo)) if (XactCompletionApplyFeedback(parsed->xinfo))
XLogRequestWalReceiverReply(); XLogRequestWalReceiverReply();

View File

@ -5880,8 +5880,8 @@ CheckRequiredParameterValues(void)
} }
/* /*
* For Hot Standby, the WAL must be generated with 'replica' mode, and * For Hot Standby, the WAL must be generated with 'replica' mode, and we
* we must have at least as many backend slots as the primary. * must have at least as many backend slots as the primary.
*/ */
if (ArchiveRecoveryRequested && EnableHotStandby) if (ArchiveRecoveryRequested && EnableHotStandby)
{ {
@ -6163,10 +6163,10 @@ StartupXLOG(void)
* is no use of such file. There is no harm in retaining it, but it * is no use of such file. There is no harm in retaining it, but it
* is better to get rid of the map file so that we don't have any * is better to get rid of the map file so that we don't have any
* redundant file in data directory and it will avoid any sort of * redundant file in data directory and it will avoid any sort of
* confusion. It seems prudent though to just rename the file out * confusion. It seems prudent though to just rename the file out of
* of the way rather than delete it completely, also we ignore any * the way rather than delete it completely, also we ignore any error
* error that occurs in rename operation as even if map file is * that occurs in rename operation as even if map file is present
* present without backup_label file, it is harmless. * without backup_label file, it is harmless.
*/ */
if (stat(TABLESPACE_MAP, &st) == 0) if (stat(TABLESPACE_MAP, &st) == 0)
{ {
@ -6883,8 +6883,8 @@ StartupXLOG(void)
SpinLockRelease(&XLogCtl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
/* /*
* If rm_redo called XLogRequestWalReceiverReply, then we * If rm_redo called XLogRequestWalReceiverReply, then we wake
* wake up the receiver so that it notices the updated * up the receiver so that it notices the updated
* lastReplayedEndRecPtr and sends a reply to the master. * lastReplayedEndRecPtr and sends a reply to the master.
*/ */
if (doRequestWalReceiverReply) if (doRequestWalReceiverReply)

View File

@ -104,8 +104,8 @@ pg_start_backup(PG_FUNCTION_ARGS)
MemoryContext oldcontext; MemoryContext oldcontext;
/* /*
* Label file and tablespace map file need to be long-lived, since they * Label file and tablespace map file need to be long-lived, since
* are read in pg_stop_backup. * they are read in pg_stop_backup.
*/ */
oldcontext = MemoryContextSwitchTo(TopMemoryContext); oldcontext = MemoryContextSwitchTo(TopMemoryContext);
label_file = makeStringInfo(); label_file = makeStringInfo();
@ -156,10 +156,10 @@ pg_stop_backup(PG_FUNCTION_ARGS)
errhint("Did you mean to use pg_stop_backup('f')?"))); errhint("Did you mean to use pg_stop_backup('f')?")));
/* /*
* Exclusive backups were typically started in a different connection, * Exclusive backups were typically started in a different connection, so
* so don't try to verify that exclusive_backup_running is set in this one. * don't try to verify that exclusive_backup_running is set in this one.
* Actual verification that an exclusive backup is in fact running is handled * Actual verification that an exclusive backup is in fact running is
* inside do_pg_stop_backup. * handled inside do_pg_stop_backup.
*/ */
stoppoint = do_pg_stop_backup(NULL, true, NULL); stoppoint = do_pg_stop_backup(NULL, true, NULL);
@ -248,9 +248,8 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS)
errhint("Did you mean to use pg_stop_backup('t')?"))); errhint("Did you mean to use pg_stop_backup('t')?")));
/* /*
* Stop the non-exclusive backup. Return a copy of the backup * Stop the non-exclusive backup. Return a copy of the backup label
* label and tablespace map so they can be written to disk by * and tablespace map so they can be written to disk by the caller.
* the caller.
*/ */
stoppoint = do_pg_stop_backup(label_file->data, true, NULL); stoppoint = do_pg_stop_backup(label_file->data, true, NULL);
nonexclusive_backup_running = false; nonexclusive_backup_running = false;

View File

@ -410,8 +410,8 @@ AggregateCreate(const char *aggName,
Oid combineType; Oid combineType;
/* /*
* Combine function must have 2 argument, each of which is the * Combine function must have 2 argument, each of which is the trans
* trans type * type
*/ */
fnArgs[0] = aggTransType; fnArgs[0] = aggTransType;
fnArgs[1] = aggTransType; fnArgs[1] = aggTransType;
@ -440,8 +440,9 @@ AggregateCreate(const char *aggName,
} }
/* /*
* Validate the serialization function, if present. We must ensure that the * Validate the serialization function, if present. We must ensure that
* return type of this function is the same as the specified serialType. * the return type of this function is the same as the specified
* serialType.
*/ */
if (aggserialfnName) if (aggserialfnName)
{ {

View File

@ -338,8 +338,8 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
/* /*
* There's little point in having a serialization/deserialization * There's little point in having a serialization/deserialization
* function on aggregates that don't have an internal state, so let's * function on aggregates that don't have an internal state, so let's
* just disallow this as it may help clear up any confusion or needless * just disallow this as it may help clear up any confusion or
* authoring of these functions. * needless authoring of these functions.
*/ */
if (transTypeId != INTERNALOID) if (transTypeId != INTERNALOID)
ereport(ERROR, ereport(ERROR,
@ -358,9 +358,9 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
/* /*
* We disallow INTERNAL serialType as the whole point of the * We disallow INTERNAL serialType as the whole point of the
* serialized types is to allow the aggregate state to be output, * serialized types is to allow the aggregate state to be output, and
* and we cannot output INTERNAL. This check, combined with the one * we cannot output INTERNAL. This check, combined with the one above
* above ensures that the trans type and serialization type are not the * ensures that the trans type and serialization type are not the
* same. * same.
*/ */
if (serialTypeId == INTERNALOID) if (serialTypeId == INTERNALOID)

View File

@ -409,9 +409,8 @@ ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddre
stmt->objargs, &rel, AccessExclusiveLock, false); stmt->objargs, &rel, AccessExclusiveLock, false);
/* /*
* If a relation was involved, it would have been opened and locked. * If a relation was involved, it would have been opened and locked. We
* We don't need the relation here, but we'll retain the lock until * don't need the relation here, but we'll retain the lock until commit.
* commit.
*/ */
if (rel) if (rel)
heap_close(rel, NoLock); heap_close(rel, NoLock);
@ -630,8 +629,8 @@ AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid)
oldNspOid = DatumGetObjectId(namespace); oldNspOid = DatumGetObjectId(namespace);
/* /*
* If the object is already in the correct namespace, we don't need * If the object is already in the correct namespace, we don't need to do
* to do anything except fire the object access hook. * anything except fire the object access hook.
*/ */
if (oldNspOid == nspOid) if (oldNspOid == nspOid)
{ {

View File

@ -217,9 +217,8 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
RelationGetRelationName(matviewRel)); RelationGetRelationName(matviewRel));
/* /*
* Check that there is a unique index with no WHERE clause on * Check that there is a unique index with no WHERE clause on one or more
* one or more columns of the materialized view if CONCURRENTLY * columns of the materialized view if CONCURRENTLY is specified.
* is specified.
*/ */
if (concurrent) if (concurrent)
{ {
@ -745,8 +744,8 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
/* /*
* There must be at least one unique index on the matview. * There must be at least one unique index on the matview.
* *
* ExecRefreshMatView() checks that after taking the exclusive lock on * ExecRefreshMatView() checks that after taking the exclusive lock on the
* the matview. So at least one unique index is guaranteed to exist here * matview. So at least one unique index is guaranteed to exist here
* because the lock is still being held. * because the lock is still being held.
*/ */
Assert(foundUniqueIndex); Assert(foundUniqueIndex);

View File

@ -511,7 +511,8 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
*/ */
if (!noperm && num_roles > 0) if (!noperm && num_roles > 0)
{ {
int i, j; int i,
j;
Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles); Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles);
Datum *role_oids; Datum *role_oids;
char *qual_value; char *qual_value;
@ -536,10 +537,9 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
/* /*
* All of the dependencies will be removed from the policy and then * All of the dependencies will be removed from the policy and then
* re-added. In order to get them correct, we need to extract out * re-added. In order to get them correct, we need to extract out the
* the expressions in the policy and construct a parsestate just * expressions in the policy and construct a parsestate just enough to
* enough to build the range table(s) to then pass to * build the range table(s) to then pass to recordDependencyOnExpr().
* recordDependencyOnExpr().
*/ */
/* Get policy qual, to update dependencies */ /* Get policy qual, to update dependencies */
@ -1035,9 +1035,9 @@ AlterPolicy(AlterPolicyStmt *stmt)
ArrayType *policy_roles; ArrayType *policy_roles;
/* /*
* We need to pull the set of roles this policy applies to from * We need to pull the set of roles this policy applies to from what's
* what's in the catalog, so that we can recreate the dependencies * in the catalog, so that we can recreate the dependencies correctly
* correctly for the policy. * for the policy.
*/ */
roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles, roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles,
@ -1070,8 +1070,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
/* /*
* We need to pull the USING expression and build the range table for * We need to pull the USING expression and build the range table for
* the policy from what's in the catalog, so that we can recreate * the policy from what's in the catalog, so that we can recreate the
* the dependencies correctly for the policy. * dependencies correctly for the policy.
*/ */
/* Check if the policy has a USING expr */ /* Check if the policy has a USING expr */

View File

@ -532,8 +532,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* can skip this for internally generated triggers, since the name * can skip this for internally generated triggers, since the name
* modification above should be sufficient. * modification above should be sufficient.
* *
* NOTE that this is cool only because we have ShareRowExclusiveLock on the * NOTE that this is cool only because we have ShareRowExclusiveLock on
* relation, so the trigger set won't be changing underneath us. * the relation, so the trigger set won't be changing underneath us.
*/ */
if (!isInternal) if (!isInternal)
{ {

View File

@ -3313,9 +3313,9 @@ AlterTypeOwner_oid(Oid typeOid, Oid newOwnerId, bool hasDependEntry)
typTup = (Form_pg_type) GETSTRUCT(tup); typTup = (Form_pg_type) GETSTRUCT(tup);
/* /*
* If it's a composite type, invoke ATExecChangeOwner so that we fix up the * If it's a composite type, invoke ATExecChangeOwner so that we fix up
* pg_class entry properly. That will call back to AlterTypeOwnerInternal * the pg_class entry properly. That will call back to
* to take care of the pg_type entry(s). * AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/ */
if (typTup->typtype == TYPTYPE_COMPOSITE) if (typTup->typtype == TYPTYPE_COMPOSITE)
ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock); ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock);

View File

@ -1167,8 +1167,8 @@ RenameRole(const char *oldname, const char *newname)
errmsg("current user cannot be renamed"))); errmsg("current user cannot be renamed")));
/* /*
* Check that the user is not trying to rename a system role and * Check that the user is not trying to rename a system role and not
* not trying to rename a role into the reserved "pg_" namespace. * trying to rename a role into the reserved "pg_" namespace.
*/ */
if (IsReservedName(NameStr(authform->rolname))) if (IsReservedName(NameStr(authform->rolname)))
ereport(ERROR, ereport(ERROR,

View File

@ -1192,9 +1192,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
} }
/* /*
* If the all-visible page is turned out to be all-frozen but not marked, * If the all-visible page is turned out to be all-frozen but not
* we should so mark it. Note that all_frozen is only valid if all_visible * marked, we should so mark it. Note that all_frozen is only valid
* is true, so we must check both. * if all_visible is true, so we must check both.
*/ */
else if (all_visible_according_to_vm && all_visible && all_frozen && else if (all_visible_according_to_vm && all_visible && all_frozen &&
!VM_ALL_FROZEN(onerel, blkno, &vmbuffer)) !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))

View File

@ -880,9 +880,9 @@ check_role(char **newval, void **extra, GucSource source)
ReleaseSysCache(roleTup); ReleaseSysCache(roleTup);
/* /*
* Verify that session user is allowed to become this role, but * Verify that session user is allowed to become this role, but skip
* skip this in parallel mode, where we must blindly recreate the * this in parallel mode, where we must blindly recreate the parallel
* parallel leader's state. * leader's state.
*/ */
if (!InitializingParallelWorker && if (!InitializingParallelWorker &&
!is_member_of_role(GetSessionUserId(), roleid)) !is_member_of_role(GetSessionUserId(), roleid))

View File

@ -444,10 +444,9 @@ ExecSupportsBackwardScan(Plan *node)
return false; return false;
/* /*
* Parallel-aware nodes return a subset of the tuples in each worker, * Parallel-aware nodes return a subset of the tuples in each worker, and
* and in general we can't expect to have enough bookkeeping state to * in general we can't expect to have enough bookkeeping state to know
* know which ones we returned in this worker as opposed to some other * which ones we returned in this worker as opposed to some other worker.
* worker.
*/ */
if (node->parallel_aware) if (node->parallel_aware)
return false; return false;

View File

@ -391,8 +391,8 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
shm_toc_estimate_keys(&pcxt->estimator, 1); shm_toc_estimate_keys(&pcxt->estimator, 1);
/* /*
* Give parallel-aware nodes a chance to add to the estimates, and get * Give parallel-aware nodes a chance to add to the estimates, and get a
* a count of how many PlanState nodes there are. * count of how many PlanState nodes there are.
*/ */
e.pcxt = pcxt; e.pcxt = pcxt;
e.nnodes = 0; e.nnodes = 0;
@ -444,9 +444,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false); pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
/* /*
* If instrumentation options were supplied, allocate space for the * If instrumentation options were supplied, allocate space for the data.
* data. It only gets partially initialized here; the rest happens * It only gets partially initialized here; the rest happens during
* during ExecParallelInitializeDSM. * ExecParallelInitializeDSM.
*/ */
if (estate->es_instrument) if (estate->es_instrument)
{ {
@ -636,9 +636,9 @@ ExecParallelReportInstrumentation(PlanState *planstate,
/* /*
* If we shuffled the plan_node_id values in ps_instrument into sorted * If we shuffled the plan_node_id values in ps_instrument into sorted
* order, we could use binary search here. This might matter someday * order, we could use binary search here. This might matter someday if
* if we're pushing down sufficiently large plan trees. For now, do it * we're pushing down sufficiently large plan trees. For now, do it the
* the slow, dumb way. * slow, dumb way.
*/ */
for (i = 0; i < instrumentation->num_plan_nodes; ++i) for (i = 0; i < instrumentation->num_plan_nodes; ++i)
if (instrumentation->plan_node_id[i] == plan_node_id) if (instrumentation->plan_node_id[i] == plan_node_id)

View File

@ -981,10 +981,11 @@ combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
if (OidIsValid(pertrans->deserialfn_oid)) if (OidIsValid(pertrans->deserialfn_oid))
{ {
/* /*
* Don't call a strict deserialization function with NULL input. * Don't call a strict deserialization function with NULL input. A
* A strict deserialization function and a null value means we skip * strict deserialization function and a null value means we skip
* calling the combine function for this state. We assume that this * calling the combine function for this state. We assume that
* would be a waste of time and effort anyway so just skip it. * this would be a waste of time and effort anyway so just skip
* it.
*/ */
if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0]) if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0])
continue; continue;
@ -1429,8 +1430,8 @@ finalize_partialaggregate(AggState *aggstate,
oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory); oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
/* /*
* serialfn_oid will be set if we must serialize the input state * serialfn_oid will be set if we must serialize the input state before
* before calling the combine function on the state. * calling the combine function on the state.
*/ */
if (OidIsValid(pertrans->serialfn_oid)) if (OidIsValid(pertrans->serialfn_oid))
{ {
@ -1443,6 +1444,7 @@ finalize_partialaggregate(AggState *aggstate,
else else
{ {
FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo; FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo;
fcinfo->arg[0] = pergroupstate->transValue; fcinfo->arg[0] = pergroupstate->transValue;
fcinfo->argnull[0] = pergroupstate->transValueIsNull; fcinfo->argnull[0] = pergroupstate->transValueIsNull;
@ -3366,9 +3368,9 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg,
/* /*
* The serialization and deserialization functions must match, if * The serialization and deserialization functions must match, if
* present, as we're unable to share the trans state for aggregates * present, as we're unable to share the trans state for aggregates
* which will serialize or deserialize into different formats. Remember * which will serialize or deserialize into different formats.
* that these will be InvalidOid if they're not required for this agg * Remember that these will be InvalidOid if they're not required for
* node. * this agg node.
*/ */
if (aggserialfn != pertrans->serialfn_oid || if (aggserialfn != pertrans->serialfn_oid ||
aggdeserialfn != pertrans->deserialfn_oid) aggdeserialfn != pertrans->deserialfn_oid)

View File

@ -285,8 +285,8 @@ ExecReScanForeignScan(ForeignScanState *node)
/* /*
* If chgParam of subnode is not null then plan will be re-scanned by * If chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode. outerPlan may also be NULL, in which case there * first ExecProcNode. outerPlan may also be NULL, in which case there is
* is nothing to rescan at all. * nothing to rescan at all.
*/ */
if (outerPlan != NULL && outerPlan->chgParam == NULL) if (outerPlan != NULL && outerPlan->chgParam == NULL)
ExecReScan(outerPlan); ExecReScan(outerPlan);

View File

@ -138,8 +138,8 @@ ExecGather(GatherState *node)
/* /*
* Initialize the parallel context and workers on first execution. We do * Initialize the parallel context and workers on first execution. We do
* this on first execution rather than during node initialization, as it * this on first execution rather than during node initialization, as it
* needs to allocate large dynamic segment, so it is better to do if it * needs to allocate large dynamic segment, so it is better to do if it is
* is really needed. * really needed.
*/ */
if (!node->initialized) if (!node->initialized)
{ {
@ -147,8 +147,8 @@ ExecGather(GatherState *node)
Gather *gather = (Gather *) node->ps.plan; Gather *gather = (Gather *) node->ps.plan;
/* /*
* Sometimes we might have to run without parallelism; but if * Sometimes we might have to run without parallelism; but if parallel
* parallel mode is active then we can try to fire up some workers. * mode is active then we can try to fire up some workers.
*/ */
if (gather->num_workers > 0 && IsInParallelMode()) if (gather->num_workers > 0 && IsInParallelMode())
{ {
@ -330,8 +330,8 @@ gather_readnext(GatherState *gatherstate)
tup = TupleQueueReaderNext(reader, true, &readerdone); tup = TupleQueueReaderNext(reader, true, &readerdone);
/* /*
* If this reader is done, remove it. If all readers are done, * If this reader is done, remove it. If all readers are done, clean
* clean up remaining worker state. * up remaining worker state.
*/ */
if (readerdone) if (readerdone)
{ {
@ -452,10 +452,10 @@ void
ExecReScanGather(GatherState *node) ExecReScanGather(GatherState *node)
{ {
/* /*
* Re-initialize the parallel workers to perform rescan of relation. * Re-initialize the parallel workers to perform rescan of relation. We
* We want to gracefully shutdown all the workers so that they * want to gracefully shutdown all the workers so that they should be able
* should be able to propagate any error or other information to master * to propagate any error or other information to master backend before
* backend before dying. Parallel context will be reused for rescan. * dying. Parallel context will be reused for rescan.
*/ */
ExecShutdownGatherWorkers(node); ExecShutdownGatherWorkers(node);

View File

@ -1221,10 +1221,10 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/* /*
* Note that it is possible that the target tuple has been modified in * Note that it is possible that the target tuple has been modified in
* this session, after the above heap_lock_tuple. We choose to not error * this session, after the above heap_lock_tuple. We choose to not error
* out in that case, in line with ExecUpdate's treatment of similar * out in that case, in line with ExecUpdate's treatment of similar cases.
* cases. This can happen if an UPDATE is triggered from within * This can happen if an UPDATE is triggered from within ExecQual(),
* ExecQual(), ExecWithCheckOptions() or ExecProject() above, e.g. by * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
* selecting from a wCTE in the ON CONFLICT's SET. * wCTE in the ON CONFLICT's SET.
*/ */
/* Execute UPDATE with projection */ /* Execute UPDATE with projection */

View File

@ -65,8 +65,8 @@ SeqNext(SeqScanState *node)
if (scandesc == NULL) if (scandesc == NULL)
{ {
/* /*
* We reach here if the scan is not parallel, or if we're executing * We reach here if the scan is not parallel, or if we're executing a
* a scan that was intended to be parallel serially. * scan that was intended to be parallel serially.
*/ */
scandesc = heap_beginscan(node->ss.ss_currentRelation, scandesc = heap_beginscan(node->ss.ss_currentRelation,
estate->es_snapshot, estate->es_snapshot,

View File

@ -2539,11 +2539,10 @@ CheckRADIUSAuth(Port *port)
radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier)); radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier));
/* /*
* RADIUS password attributes are calculated as: * RADIUS password attributes are calculated as: e[0] = p[0] XOR
* e[0] = p[0] XOR MD5(secret + Request Authenticator) * MD5(secret + Request Authenticator) for the first group of 16 octets,
* for the first group of 16 octets, and then: * and then: e[i] = p[i] XOR MD5(secret + e[i-1]) for the following ones
* e[i] = p[i] XOR MD5(secret + e[i-1]) * (if necessary)
* for the following ones (if necessary)
*/ */
encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH; encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH;
cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH); cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH);
@ -2554,7 +2553,11 @@ CheckRADIUSAuth(Port *port)
for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH) for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH)
{ {
memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH); memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH);
/* .. and for subsequent iterations the result of the previous XOR (calculated below) */
/*
* .. and for subsequent iterations the result of the previous XOR
* (calculated below)
*/
md5trailer = encryptedpassword + i; md5trailer = encryptedpassword + i;
if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i)) if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i))

View File

@ -377,11 +377,12 @@ be_tls_open_server(Port *port)
port->ssl_in_use = true; port->ssl_in_use = true;
aloop: aloop:
/* /*
* Prepare to call SSL_get_error() by clearing thread's OpenSSL error * Prepare to call SSL_get_error() by clearing thread's OpenSSL error
* queue. In general, the current thread's error queue must be empty * queue. In general, the current thread's error queue must be empty
* before the TLS/SSL I/O operation is attempted, or SSL_get_error() * before the TLS/SSL I/O operation is attempted, or SSL_get_error() will
* will not work reliably. An extension may have failed to clear the * not work reliably. An extension may have failed to clear the
* per-thread error queue following another call to an OpenSSL I/O * per-thread error queue following another call to an OpenSSL I/O
* routine. * routine.
*/ */
@ -393,12 +394,11 @@ aloop:
/* /*
* Other clients of OpenSSL in the backend may fail to call * Other clients of OpenSSL in the backend may fail to call
* ERR_get_error(), but we always do, so as to not cause problems * ERR_get_error(), but we always do, so as to not cause problems for
* for OpenSSL clients that don't call ERR_clear_error() * OpenSSL clients that don't call ERR_clear_error() defensively. Be
* defensively. Be sure that this happens by calling now. * sure that this happens by calling now. SSL_get_error() relies on
* SSL_get_error() relies on the OpenSSL per-thread error queue * the OpenSSL per-thread error queue being intact, so this is the
* being intact, so this is the earliest possible point * earliest possible point ERR_get_error() may be called.
* ERR_get_error() may be called.
*/ */
ecode = ERR_get_error(); ecode = ERR_get_error();
switch (err) switch (err)

View File

@ -153,13 +153,13 @@ retry:
* because it is the postmaster's job to kill us if some other backend * because it is the postmaster's job to kill us if some other backend
* exists uncleanly. Moreover, we won't run very well in this state; * exists uncleanly. Moreover, we won't run very well in this state;
* helper processes like walwriter and the bgwriter will exit, so * helper processes like walwriter and the bgwriter will exit, so
* performance may be poor. Finally, if we don't exit, pg_ctl will * performance may be poor. Finally, if we don't exit, pg_ctl will be
* be unable to restart the postmaster without manual intervention, * unable to restart the postmaster without manual intervention, so no
* so no new connections can be accepted. Exiting clears the deck * new connections can be accepted. Exiting clears the deck for a
* for a postmaster restart. * postmaster restart.
* *
* (Note that we only make this check when we would otherwise sleep * (Note that we only make this check when we would otherwise sleep on
* on our latch. We might still continue running for a while if the * our latch. We might still continue running for a while if the
* postmaster is killed in mid-query, or even through multiple queries * postmaster is killed in mid-query, or even through multiple queries
* if we never have to wait for read. We don't want to burn too many * if we never have to wait for read. We don't want to burn too many
* cycles checking for this very rare condition, and this should cause * cycles checking for this very rare condition, and this should cause

View File

@ -143,9 +143,9 @@ mq_putmessage(char msgtype, const char *s, size_t len)
/* /*
* If the message queue is already gone, just ignore the message. This * If the message queue is already gone, just ignore the message. This
* doesn't necessarily indicate a problem; for example, DEBUG messages * doesn't necessarily indicate a problem; for example, DEBUG messages can
* can be generated late in the shutdown sequence, after all DSMs have * be generated late in the shutdown sequence, after all DSMs have already
* already been detached. * been detached.
*/ */
if (pq_mq == NULL) if (pq_mq == NULL)
return 0; return 0;

View File

@ -270,13 +270,16 @@ startup_hacks(const char *progname)
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
#if defined(_M_AMD64) && _MSC_VER == 1800 #if defined(_M_AMD64) && _MSC_VER == 1800
/* /*
* Avoid crashing in certain floating-point operations if * Avoid crashing in certain floating-point operations if we were
* we were compiled for x64 with MS Visual Studio 2013 and * compiled for x64 with MS Visual Studio 2013 and are running on
* are running on Windows prior to 7/2008R2 SP1 on an * Windows prior to 7/2008R2 SP1 on an AVX2-capable CPU.
* AVX2-capable CPU.
* *
* Ref: https://connect.microsoft.com/VisualStudio/feedback/details/811093/visual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instructions * Ref:
* https://connect.microsoft.com/VisualStudio/feedback/details/811093/v
* isual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instruction
* s
*/ */
if (!IsWindows7SP1OrGreater()) if (!IsWindows7SP1OrGreater())
{ {

View File

@ -2228,6 +2228,7 @@ _readExtensibleNode(void)
const ExtensibleNodeMethods *methods; const ExtensibleNodeMethods *methods;
ExtensibleNode *local_node; ExtensibleNode *local_node;
const char *extnodename; const char *extnodename;
READ_TEMP_LOCALS(); READ_TEMP_LOCALS();
token = pg_strtok(&length); /* skip: extnodename */ token = pg_strtok(&length); /* skip: extnodename */

View File

@ -163,8 +163,8 @@ make_one_rel(PlannerInfo *root, List *joinlist)
set_base_rel_consider_startup(root); set_base_rel_consider_startup(root);
/* /*
* Generate access paths for the base rels. set_base_rel_sizes also * Generate access paths for the base rels. set_base_rel_sizes also sets
* sets the consider_parallel flag for each baserel, if appropriate. * the consider_parallel flag for each baserel, if appropriate.
*/ */
set_base_rel_sizes(root); set_base_rel_sizes(root);
set_base_rel_pathlists(root); set_base_rel_pathlists(root);
@ -509,6 +509,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
switch (rte->rtekind) switch (rte->rtekind)
{ {
case RTE_RELATION: case RTE_RELATION:
/* /*
* Currently, parallel workers can't access the leader's temporary * Currently, parallel workers can't access the leader's temporary
* tables. We could possibly relax this if the wrote all of its * tables. We could possibly relax this if the wrote all of its
@ -557,14 +558,15 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break; break;
case RTE_SUBQUERY: case RTE_SUBQUERY:
/* /*
* Subplans currently aren't passed to workers. Even if they * Subplans currently aren't passed to workers. Even if they
* were, the subplan might be using parallelism internally, and * were, the subplan might be using parallelism internally, and we
* we can't support nested Gather nodes at present. Finally, * can't support nested Gather nodes at present. Finally, we
* we don't have a good way of knowing whether the subplan * don't have a good way of knowing whether the subplan involves
* involves any parallel-restricted operations. It would be * any parallel-restricted operations. It would be nice to relax
* nice to relax this restriction some day, but it's going to * this restriction some day, but it's going to take a fair amount
* take a fair amount of work. * of work.
*/ */
return; return;
@ -580,6 +582,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break; break;
case RTE_VALUES: case RTE_VALUES:
/* /*
* The data for a VALUES clause is stored in the plan tree itself, * The data for a VALUES clause is stored in the plan tree itself,
* so scanning it in a worker is fine. * so scanning it in a worker is fine.
@ -587,6 +590,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break; break;
case RTE_CTE: case RTE_CTE:
/* /*
* CTE tuplestores aren't shared among parallel workers, so we * CTE tuplestores aren't shared among parallel workers, so we
* force all CTE scans to happen in the leader. Also, populating * force all CTE scans to happen in the leader. Also, populating
@ -598,8 +602,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
} }
/* /*
* If there's anything in baserestrictinfo that's parallel-restricted, * If there's anything in baserestrictinfo that's parallel-restricted, we
* we give up on parallelizing access to this relation. We could consider * give up on parallelizing access to this relation. We could consider
* instead postponing application of the restricted quals until we're * instead postponing application of the restricted quals until we're
* above all the parallelism in the plan tree, but it's not clear that * above all the parallelism in the plan tree, but it's not clear that
* this would be a win in very many cases, and it might be tricky to make * this would be a win in very many cases, and it might be tricky to make
@ -609,8 +613,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
return; return;
/* /*
* If the relation's outputs are not parallel-safe, we must give up. * If the relation's outputs are not parallel-safe, we must give up. In
* In the common case where the relation only outputs Vars, this check is * the common case where the relation only outputs Vars, this check is
* very cheap; otherwise, we have to do more work. * very cheap; otherwise, we have to do more work.
*/ */
if (rel->reltarget_has_non_vars && if (rel->reltarget_has_non_vars &&
@ -1251,8 +1255,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
int parallel_workers = 0; int parallel_workers = 0;
/* /*
* Decide on the numebr of workers to request for this append path. For * Decide on the numebr of workers to request for this append path.
* now, we just use the maximum value from among the members. It * For now, we just use the maximum value from among the members. It
* might be useful to use a higher number if the Append node were * might be useful to use a higher number if the Append node were
* smart enough to spread out the workers, but it currently isn't. * smart enough to spread out the workers, but it currently isn't.
*/ */
@ -2160,8 +2164,8 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
* Run generate_gather_paths() for each just-processed joinrel. We * Run generate_gather_paths() for each just-processed joinrel. We
* could not do this earlier because both regular and partial paths * could not do this earlier because both regular and partial paths
* can get added to a particular joinrel at multiple times within * can get added to a particular joinrel at multiple times within
* join_search_one_level. After that, we're done creating paths * join_search_one_level. After that, we're done creating paths for
* for the joinrel, so run set_cheapest(). * the joinrel, so run set_cheapest().
*/ */
foreach(lc, root->join_rel_level[lev]) foreach(lc, root->join_rel_level[lev])
{ {

View File

@ -1428,15 +1428,14 @@ create_projection_plan(PlannerInfo *root, ProjectionPath *best_path)
* We might not really need a Result node here. There are several ways * We might not really need a Result node here. There are several ways
* that this can happen. For example, MergeAppend doesn't project, so we * that this can happen. For example, MergeAppend doesn't project, so we
* would have thought that we needed a projection to attach resjunk sort * would have thought that we needed a projection to attach resjunk sort
* columns to its output ... but create_merge_append_plan might have * columns to its output ... but create_merge_append_plan might have added
* added those same resjunk sort columns to both MergeAppend and its * those same resjunk sort columns to both MergeAppend and its children.
* children. Alternatively, apply_projection_to_path might have created * Alternatively, apply_projection_to_path might have created a projection
* a projection path as the subpath of a Gather node even though the * path as the subpath of a Gather node even though the subpath was
* subpath was projection-capable. So, if the subpath is capable of * projection-capable. So, if the subpath is capable of projection or the
* projection or the desired tlist is the same expression-wise as the * desired tlist is the same expression-wise as the subplan's, just jam it
* subplan's, just jam it in there. We'll have charged for a Result that * in there. We'll have charged for a Result that doesn't actually appear
* doesn't actually appear in the plan, but that's better than having a * in the plan, but that's better than having a Result we don't need.
* Result we don't need.
*/ */
if (is_projection_capable_path(best_path->subpath) || if (is_projection_capable_path(best_path->subpath) ||
tlist_same_exprs(tlist, subplan->targetlist)) tlist_same_exprs(tlist, subplan->targetlist))
@ -3248,8 +3247,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/* /*
* If a join between foreign relations was pushed down, remember it. The * If a join between foreign relations was pushed down, remember it. The
* push-down safety of the join depends upon the server and user mapping * push-down safety of the join depends upon the server and user mapping
* being same. That can change between planning and execution time, in which * being same. That can change between planning and execution time, in
* case the plan should be invalidated. * which case the plan should be invalidated.
*/ */
if (scan_relid == 0) if (scan_relid == 0)
root->glob->hasForeignJoin = true; root->glob->hasForeignJoin = true;
@ -3257,8 +3256,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/* /*
* Replace any outer-relation variables with nestloop params in the qual, * Replace any outer-relation variables with nestloop params in the qual,
* fdw_exprs and fdw_recheck_quals expressions. We do this last so that * fdw_exprs and fdw_recheck_quals expressions. We do this last so that
* the FDW doesn't have to be involved. (Note that parts of fdw_exprs * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
* or fdw_recheck_quals could have come from join clauses, so doing this * fdw_recheck_quals could have come from join clauses, so doing this
* beforehand on the scan_clauses wouldn't work.) We assume * beforehand on the scan_clauses wouldn't work.) We assume
* fdw_scan_tlist contains no such variables. * fdw_scan_tlist contains no such variables.
*/ */
@ -3279,8 +3278,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
* 0, but there can be no Var with relid 0 in the rel's targetlist or the * 0, but there can be no Var with relid 0 in the rel's targetlist or the
* restriction clauses, so we skip this in that case. Note that any such * restriction clauses, so we skip this in that case. Note that any such
* columns in base relations that were joined are assumed to be contained * columns in base relations that were joined are assumed to be contained
* in fdw_scan_tlist.) This is a bit of a kluge and might go away someday, * in fdw_scan_tlist.) This is a bit of a kluge and might go away
* so we intentionally leave it out of the API presented to FDWs. * someday, so we intentionally leave it out of the API presented to FDWs.
*/ */
scan_plan->fsSystemCol = false; scan_plan->fsSystemCol = false;
if (scan_relid > 0) if (scan_relid > 0)

View File

@ -1777,8 +1777,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
* findable from the PlannerInfo struct; anything else the FDW wants * findable from the PlannerInfo struct; anything else the FDW wants
* to know should be obtainable via "root". * to know should be obtainable via "root".
* *
* Note: CustomScan providers, as well as FDWs that don't want to * Note: CustomScan providers, as well as FDWs that don't want to use
* use this hook, can use the create_upper_paths_hook; see below. * this hook, can use the create_upper_paths_hook; see below.
*/ */
if (current_rel->fdwroutine && if (current_rel->fdwroutine &&
current_rel->fdwroutine->GetForeignUpperPaths) current_rel->fdwroutine->GetForeignUpperPaths)
@ -3196,8 +3196,8 @@ set_grouped_rel_consider_parallel(PlannerInfo *root, RelOptInfo *grouped_rel,
/* /*
* All that's left to check now is to make sure all aggregate functions * All that's left to check now is to make sure all aggregate functions
* support partial mode. If there's no aggregates then we can skip checking * support partial mode. If there's no aggregates then we can skip
* that. * checking that.
*/ */
if (!parse->hasAggs) if (!parse->hasAggs)
grouped_rel->consider_parallel = true; grouped_rel->consider_parallel = true;
@ -3370,9 +3370,10 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Determine whether it's possible to perform sort-based implementations * Determine whether it's possible to perform sort-based implementations
* of grouping. (Note that if groupClause is empty, grouping_is_sortable() * of grouping. (Note that if groupClause is empty,
* is trivially true, and all the pathkeys_contained_in() tests will * grouping_is_sortable() is trivially true, and all the
* succeed too, so that we'll consider every surviving input path.) * pathkeys_contained_in() tests will succeed too, so that we'll consider
* every surviving input path.)
*/ */
can_sort = grouping_is_sortable(parse->groupClause); can_sort = grouping_is_sortable(parse->groupClause);
@ -3616,8 +3617,8 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Now generate a complete GroupAgg Path atop of the cheapest partial * Now generate a complete GroupAgg Path atop of the cheapest partial
* path. We need only bother with the cheapest path here, as the output * path. We need only bother with the cheapest path here, as the
* of Gather is never sorted. * output of Gather is never sorted.
*/ */
if (grouped_rel->partial_pathlist) if (grouped_rel->partial_pathlist)
{ {
@ -3632,9 +3633,9 @@ create_grouping_paths(PlannerInfo *root,
&total_groups); &total_groups);
/* /*
* Gather is always unsorted, so we'll need to sort, unless there's * Gather is always unsorted, so we'll need to sort, unless
* no GROUP BY clause, in which case there will only be a single * there's no GROUP BY clause, in which case there will only be a
* group. * single group.
*/ */
if (parse->groupClause) if (parse->groupClause)
path = (Path *) create_sort_path(root, path = (Path *) create_sort_path(root,
@ -3678,15 +3679,15 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Provided that the estimated size of the hashtable does not exceed * Provided that the estimated size of the hashtable does not exceed
* work_mem, we'll generate a HashAgg Path, although if we were unable * work_mem, we'll generate a HashAgg Path, although if we were unable
* to sort above, then we'd better generate a Path, so that we at least * to sort above, then we'd better generate a Path, so that we at
* have one. * least have one.
*/ */
if (hashaggtablesize < work_mem * 1024L || if (hashaggtablesize < work_mem * 1024L ||
grouped_rel->pathlist == NIL) grouped_rel->pathlist == NIL)
{ {
/* /*
* We just need an Agg over the cheapest-total input path, since input * We just need an Agg over the cheapest-total input path, since
* order won't matter. * input order won't matter.
*/ */
add_path(grouped_rel, (Path *) add_path(grouped_rel, (Path *)
create_agg_path(root, grouped_rel, create_agg_path(root, grouped_rel,
@ -3704,8 +3705,8 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Generate a HashAgg Path atop of the cheapest partial path. Once * Generate a HashAgg Path atop of the cheapest partial path. Once
* again, we'll only do this if it looks as though the hash table won't * again, we'll only do this if it looks as though the hash table
* exceed work_mem. * won't exceed work_mem.
*/ */
if (grouped_rel->partial_pathlist) if (grouped_rel->partial_pathlist)
{ {

View File

@ -2100,6 +2100,7 @@ search_indexed_tlist_for_partial_aggref(Aggref *aggref, indexed_tlist *itlist,
continue; continue;
if (aggref->aggvariadic != tlistaggref->aggvariadic) if (aggref->aggvariadic != tlistaggref->aggvariadic)
continue; continue;
/* /*
* it would be harmless to compare aggcombine and aggpartial, but * it would be harmless to compare aggcombine and aggpartial, but
* it's also unnecessary * it's also unnecessary

View File

@ -1371,11 +1371,11 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
* recurse through Query objects to as to locate parallel-unsafe * recurse through Query objects to as to locate parallel-unsafe
* constructs anywhere in the tree. * constructs anywhere in the tree.
* *
* Later, we'll be called again for specific quals, possibly after * Later, we'll be called again for specific quals, possibly after some
* some planning has been done, we may encounter SubPlan, SubLink, * planning has been done, we may encounter SubPlan, SubLink, or
* or AlternativeSubLink nodes. Currently, there's no need to recurse * AlternativeSubLink nodes. Currently, there's no need to recurse
* through these; they can't be unsafe, since we've already cleared * through these; they can't be unsafe, since we've already cleared the
* the entire query of unsafe operations, and they're definitely * entire query of unsafe operations, and they're definitely
* parallel-restricted. * parallel-restricted.
*/ */
if (IsA(node, Query)) if (IsA(node, Query))
@ -1394,8 +1394,8 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
IsA(node, AlternativeSubPlan) ||IsA(node, Param)) IsA(node, AlternativeSubPlan) ||IsA(node, Param))
{ {
/* /*
* Since we don't have the ability to push subplans down to workers * Since we don't have the ability to push subplans down to workers at
* at present, we treat subplan references as parallel-restricted. * present, we treat subplan references as parallel-restricted.
*/ */
if (!context->allow_restricted) if (!context->allow_restricted)
return true; return true;
@ -1405,12 +1405,14 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
if (IsA(node, RestrictInfo)) if (IsA(node, RestrictInfo))
{ {
RestrictInfo *rinfo = (RestrictInfo *) node; RestrictInfo *rinfo = (RestrictInfo *) node;
return has_parallel_hazard_walker((Node *) rinfo->clause, context); return has_parallel_hazard_walker((Node *) rinfo->clause, context);
} }
/* /*
* It is an error for a parallel worker to touch a temporary table in any * It is an error for a parallel worker to touch a temporary table in any
* way, so we can't handle nodes whose type is the rowtype of such a table. * way, so we can't handle nodes whose type is the rowtype of such a
* table.
*/ */
if (!context->allow_restricted) if (!context->allow_restricted)
{ {
@ -1535,6 +1537,7 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
foreach(opid, rcexpr->opnos) foreach(opid, rcexpr->opnos)
{ {
Oid opfuncid = get_opcode(lfirst_oid(opid)); Oid opfuncid = get_opcode(lfirst_oid(opid));
if (parallel_too_dangerous(func_parallel(opfuncid), context)) if (parallel_too_dangerous(func_parallel(opfuncid), context))
return true; return true;
} }
@ -1870,8 +1873,8 @@ contain_leaked_vars_walker(Node *node, void *context)
/* /*
* WHERE CURRENT OF doesn't contain function calls. Moreover, it * WHERE CURRENT OF doesn't contain function calls. Moreover, it
* is important that this can be pushed down into a * is important that this can be pushed down into a
* security_barrier view, since the planner must always generate * security_barrier view, since the planner must always generate a
* a TID scan when CURRENT OF is present -- c.f. cost_tidscan. * TID scan when CURRENT OF is present -- c.f. cost_tidscan.
*/ */
return false; return false;

View File

@ -287,12 +287,11 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
if (like_found) if (like_found)
{ {
/* /*
* To match INHERITS, the existence of any LIKE table with OIDs * To match INHERITS, the existence of any LIKE table with OIDs causes
* causes the new table to have oids. For the same reason, * the new table to have oids. For the same reason, WITH/WITHOUT OIDs
* WITH/WITHOUT OIDs is also ignored with LIKE. We prepend * is also ignored with LIKE. We prepend because the first oid option
* because the first oid option list entry is honored. Our * list entry is honored. Our prepended WITHOUT OIDS clause will be
* prepended WITHOUT OIDS clause will be overridden if an * overridden if an inherited table has oids.
* inherited table has oids.
*/ */
stmt->options = lcons(makeDefElem("oids", stmt->options = lcons(makeDefElem("oids",
(Node *) makeInteger(cxt.hasoids)), stmt->options); (Node *) makeInteger(cxt.hasoids)), stmt->options);
@ -305,6 +304,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
if (nodeTag(element) == T_Constraint) if (nodeTag(element) == T_Constraint)
transformTableConstraint(&cxt, (Constraint *) element); transformTableConstraint(&cxt, (Constraint *) element);
} }
/* /*
* transformIndexConstraints wants cxt.alist to contain only index * transformIndexConstraints wants cxt.alist to contain only index
* statements, so transfer anything we already have into save_alist. * statements, so transfer anything we already have into save_alist.
@ -1949,8 +1949,8 @@ transformCheckConstraints(CreateStmtContext *cxt, bool skipValidation)
/* /*
* If creating a new table, we can safely skip validation of check * If creating a new table, we can safely skip validation of check
* constraints, and nonetheless mark them valid. (This will override * constraints, and nonetheless mark them valid. (This will override any
* any user-supplied NOT VALID flag.) * user-supplied NOT VALID flag.)
*/ */
if (skipValidation) if (skipValidation)
{ {

View File

@ -35,8 +35,7 @@ pg_spinlock_barrier(void)
* *
* We use kill(0) for the fallback barrier as we assume that kernels on * We use kill(0) for the fallback barrier as we assume that kernels on
* systems old enough to require fallback barrier support will include an * systems old enough to require fallback barrier support will include an
* appropriate barrier while checking the existence of the postmaster * appropriate barrier while checking the existence of the postmaster pid.
* pid.
*/ */
(void) kill(PostmasterPid, 0); (void) kill(PostmasterPid, 0);
} }

View File

@ -672,9 +672,9 @@ AutoVacLauncherMain(int argc, char *argv[])
/* /*
* There are some conditions that we need to check before trying to * There are some conditions that we need to check before trying to
* start a worker. First, we need to make sure that there is a * start a worker. First, we need to make sure that there is a worker
* worker slot available. Second, we need to make sure that no * slot available. Second, we need to make sure that no other worker
* other worker failed while starting up. * failed while starting up.
*/ */
current_time = GetCurrentTimestamp(); current_time = GetCurrentTimestamp();

View File

@ -2727,6 +2727,7 @@ pgstat_bestart(void)
beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0'; beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0';
beentry->st_progress_command = PROGRESS_COMMAND_INVALID; beentry->st_progress_command = PROGRESS_COMMAND_INVALID;
beentry->st_progress_command_target = InvalidOid; beentry->st_progress_command_target = InvalidOid;
/* /*
* we don't zero st_progress_param here to save cycles; nobody should * we don't zero st_progress_param here to save cycles; nobody should
* examine it until st_progress_command has been set to something other * examine it until st_progress_command has been set to something other

View File

@ -1182,23 +1182,22 @@ PostmasterMain(int argc, char *argv[])
RemovePgTempFiles(); RemovePgTempFiles();
/* /*
* Forcibly remove the files signaling a standby promotion * Forcibly remove the files signaling a standby promotion request.
* request. Otherwise, the existence of those files triggers * Otherwise, the existence of those files triggers a promotion too early,
* a promotion too early, whether a user wants that or not. * whether a user wants that or not.
* *
* This removal of files is usually unnecessary because they * This removal of files is usually unnecessary because they can exist
* can exist only during a few moments during a standby * only during a few moments during a standby promotion. However there is
* promotion. However there is a race condition: if pg_ctl promote * a race condition: if pg_ctl promote is executed and creates the files
* is executed and creates the files during a promotion, * during a promotion, the files can stay around even after the server is
* the files can stay around even after the server is brought up * brought up to new master. Then, if new standby starts by using the
* to new master. Then, if new standby starts by using the backup * backup taken from that master, the files can exist at the server
* taken from that master, the files can exist at the server
* startup and should be removed in order to avoid an unexpected * startup and should be removed in order to avoid an unexpected
* promotion. * promotion.
* *
* Note that promotion signal files need to be removed before * Note that promotion signal files need to be removed before the startup
* the startup process is invoked. Because, after that, they can * process is invoked. Because, after that, they can be used by
* be used by postmaster's SIGUSR1 signal handler. * postmaster's SIGUSR1 signal handler.
*/ */
RemovePromoteSignalFiles(); RemovePromoteSignalFiles();
@ -2607,6 +2606,7 @@ pmdie(SIGNAL_ARGS)
if (pmState == PM_RECOVERY) if (pmState == PM_RECOVERY)
{ {
SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER); SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER);
/* /*
* Only startup, bgwriter, walreceiver, possibly bgworkers, * Only startup, bgwriter, walreceiver, possibly bgworkers,
* and/or checkpointer should be active in this state; we just * and/or checkpointer should be active in this state; we just
@ -3074,9 +3074,9 @@ CleanupBackgroundWorker(int pid,
/* /*
* It's possible that this background worker started some OTHER * It's possible that this background worker started some OTHER
* background worker and asked to be notified when that worker * background worker and asked to be notified when that worker started
* started or stopped. If so, cancel any notifications destined * or stopped. If so, cancel any notifications destined for the
* for the now-dead backend. * now-dead backend.
*/ */
if (rw->rw_backend->bgworker_notify) if (rw->rw_backend->bgworker_notify)
BackgroundWorkerStopNotifications(rw->rw_pid); BackgroundWorkerStopNotifications(rw->rw_pid);
@ -5696,9 +5696,8 @@ maybe_start_bgworker(void)
rw->rw_crashed_at = 0; rw->rw_crashed_at = 0;
/* /*
* Allocate and assign the Backend element. Note we * Allocate and assign the Backend element. Note we must do this
* must do this before forking, so that we can handle out of * before forking, so that we can handle out of memory properly.
* memory properly.
*/ */
if (!assign_backendlist_entry(rw)) if (!assign_backendlist_entry(rw))
return; return;

View File

@ -522,7 +522,8 @@ DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
snapshot = SnapBuildGetOrBuildSnapshot(builder, xid); snapshot = SnapBuildGetOrBuildSnapshot(builder, xid);
ReorderBufferQueueMessage(ctx->reorder, xid, snapshot, buf->endptr, ReorderBufferQueueMessage(ctx->reorder, xid, snapshot, buf->endptr,
message->transactional, message->transactional,
message->message, /* first part of message is prefix */ message->message, /* first part of message is
* prefix */
message->message_size, message->message_size,
message->message + message->prefix_size); message->message + message->prefix_size);
} }

View File

@ -1836,10 +1836,10 @@ ReorderBufferImmediateInvalidation(ReorderBuffer *rb, uint32 ninvalidations,
BeginInternalSubTransaction("replay"); BeginInternalSubTransaction("replay");
/* /*
* Force invalidations to happen outside of a valid transaction - that * Force invalidations to happen outside of a valid transaction - that way
* way entries will just be marked as invalid without accessing the * entries will just be marked as invalid without accessing the catalog.
* catalog. That's advantageous because we don't need to setup the * That's advantageous because we don't need to setup the full state
* full state necessary for catalog access. * necessary for catalog access.
*/ */
if (use_subtxn) if (use_subtxn)
AbortCurrentTransaction(); AbortCurrentTransaction();

View File

@ -230,11 +230,11 @@ ReplicationSlotCreate(const char *name, bool db_specific,
ReplicationSlotValidateName(name, ERROR); ReplicationSlotValidateName(name, ERROR);
/* /*
* If some other backend ran this code concurrently with us, we'd likely both * If some other backend ran this code concurrently with us, we'd likely
* allocate the same slot, and that would be bad. We'd also be at risk of * both allocate the same slot, and that would be bad. We'd also be at
* missing a name collision. Also, we don't want to try to create a new * risk of missing a name collision. Also, we don't want to try to create
* slot while somebody's busy cleaning up an old one, because we might * a new slot while somebody's busy cleaning up an old one, because we
* both be monkeying with the same directory. * might both be monkeying with the same directory.
*/ */
LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE); LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
@ -533,6 +533,7 @@ void
ReplicationSlotMarkDirty(void) ReplicationSlotMarkDirty(void)
{ {
ReplicationSlot *slot = MyReplicationSlot; ReplicationSlot *slot = MyReplicationSlot;
Assert(MyReplicationSlot != NULL); Assert(MyReplicationSlot != NULL);
SpinLockAcquire(&slot->mutex); SpinLockAcquire(&slot->mutex);

View File

@ -212,8 +212,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
/* /*
* If a wait for synchronous replication is pending, we can neither * If a wait for synchronous replication is pending, we can neither
* acknowledge the commit nor raise ERROR or FATAL. The latter would * acknowledge the commit nor raise ERROR or FATAL. The latter would
* lead the client to believe that the transaction aborted, which * lead the client to believe that the transaction aborted, which is
* is not true: it's already committed locally. The former is no good * not true: it's already committed locally. The former is no good
* either: the client has requested synchronous replication, and is * either: the client has requested synchronous replication, and is
* entitled to assume that an acknowledged commit is also replicated, * entitled to assume that an acknowledged commit is also replicated,
* which might not be true. So in this case we issue a WARNING (which * which might not be true. So in this case we issue a WARNING (which
@ -400,8 +400,8 @@ SyncRepReleaseWaiters(void)
/* /*
* If this WALSender is serving a standby that is not on the list of * If this WALSender is serving a standby that is not on the list of
* potential sync standbys then we have nothing to do. If we are still * potential sync standbys then we have nothing to do. If we are still
* starting up, still running base backup or the current flush position * starting up, still running base backup or the current flush position is
* is still invalid, then leave quickly also. * still invalid, then leave quickly also.
*/ */
if (MyWalSnd->sync_standby_priority == 0 || if (MyWalSnd->sync_standby_priority == 0 ||
MyWalSnd->state < WALSNDSTATE_STREAMING || MyWalSnd->state < WALSNDSTATE_STREAMING ||
@ -412,21 +412,21 @@ SyncRepReleaseWaiters(void)
} }
/* /*
* We're a potential sync standby. Release waiters if there are * We're a potential sync standby. Release waiters if there are enough
* enough sync standbys and we are considered as sync. * sync standbys and we are considered as sync.
*/ */
LWLockAcquire(SyncRepLock, LW_EXCLUSIVE); LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
/* /*
* Check whether we are a sync standby or not, and calculate * Check whether we are a sync standby or not, and calculate the oldest
* the oldest positions among all sync standbys. * positions among all sync standbys.
*/ */
got_oldest = SyncRepGetOldestSyncRecPtr(&writePtr, &flushPtr, got_oldest = SyncRepGetOldestSyncRecPtr(&writePtr, &flushPtr,
&applyPtr, &am_sync); &applyPtr, &am_sync);
/* /*
* If we are managing a sync standby, though we weren't * If we are managing a sync standby, though we weren't prior to this,
* prior to this, then announce we are now a sync standby. * then announce we are now a sync standby.
*/ */
if (announce_next_takeover && am_sync) if (announce_next_takeover && am_sync)
{ {
@ -513,8 +513,8 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
} }
/* /*
* Scan through all sync standbys and calculate the oldest * Scan through all sync standbys and calculate the oldest Write, Flush
* Write, Flush and Apply positions. * and Apply positions.
*/ */
foreach(cell, sync_standbys) foreach(cell, sync_standbys)
{ {
@ -562,8 +562,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
int priority; int priority;
int i; int i;
bool am_in_pending = false; bool am_in_pending = false;
volatile WalSnd *walsnd; /* Use volatile pointer to prevent volatile WalSnd *walsnd; /* Use volatile pointer to prevent code
* code rearrangement */ * rearrangement */
/* Set default result */ /* Set default result */
if (am_sync != NULL) if (am_sync != NULL)
@ -577,9 +577,9 @@ SyncRepGetSyncStandbys(bool *am_sync)
next_highest_priority = lowest_priority + 1; next_highest_priority = lowest_priority + 1;
/* /*
* Find the sync standbys which have the highest priority (i.e, 1). * Find the sync standbys which have the highest priority (i.e, 1). Also
* Also store all the other potential sync standbys into the pending list, * store all the other potential sync standbys into the pending list, in
* in order to scan it later and find other sync standbys from it quickly. * order to scan it later and find other sync standbys from it quickly.
*/ */
for (i = 0; i < max_wal_senders; i++) for (i = 0; i < max_wal_senders; i++)
{ {
@ -603,9 +603,9 @@ SyncRepGetSyncStandbys(bool *am_sync)
continue; continue;
/* /*
* If the priority is equal to 1, consider this standby as sync * If the priority is equal to 1, consider this standby as sync and
* and append it to the result. Otherwise append this standby * append it to the result. Otherwise append this standby to the
* to the pending list to check if it's actually sync or not later. * pending list to check if it's actually sync or not later.
*/ */
if (this_priority == 1) if (this_priority == 1)
{ {
@ -626,10 +626,10 @@ SyncRepGetSyncStandbys(bool *am_sync)
/* /*
* Track the highest priority among the standbys in the pending * Track the highest priority among the standbys in the pending
* list, in order to use it as the starting priority for later scan * list, in order to use it as the starting priority for later
* of the list. This is useful to find quickly the sync standbys * scan of the list. This is useful to find quickly the sync
* from the pending list later because we can skip unnecessary * standbys from the pending list later because we can skip
* scans for the unused priorities. * unnecessary scans for the unused priorities.
*/ */
if (this_priority < next_highest_priority) if (this_priority < next_highest_priority)
next_highest_priority = this_priority; next_highest_priority = this_priority;
@ -685,8 +685,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
/* /*
* We should always exit here after the scan of pending list * We should always exit here after the scan of pending list
* starts because we know that the list has enough elements * starts because we know that the list has enough elements to
* to reach SyncRepConfig->num_sync. * reach SyncRepConfig->num_sync.
*/ */
if (list_length(result) == SyncRepConfig->num_sync) if (list_length(result) == SyncRepConfig->num_sync)
{ {
@ -695,8 +695,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
} }
/* /*
* Remove the entry for this sync standby from the list * Remove the entry for this sync standby from the list to
* to prevent us from looking at the same entry again. * prevent us from looking at the same entry again.
*/ */
pending = list_delete_cell(pending, cell, prev); pending = list_delete_cell(pending, cell, prev);

View File

@ -475,8 +475,8 @@ WalReceiverMain(void)
/* /*
* The recovery process has asked us to send apply * The recovery process has asked us to send apply
* feedback now. Make sure the flag is really set to * feedback now. Make sure the flag is really set to
* false in shared memory before sending the reply, * false in shared memory before sending the reply, so
* so we don't miss a new request for a reply. * we don't miss a new request for a reply.
*/ */
walrcv->force_reply = false; walrcv->force_reply = false;
pg_memory_barrier(); pg_memory_barrier();
@ -1379,8 +1379,8 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS)
if (!superuser()) if (!superuser())
{ {
/* /*
* Only superusers can see details. Other users only get the pid * Only superusers can see details. Other users only get the pid value
* value to know whether it is a WAL receiver, but no details. * to know whether it is a WAL receiver, but no details.
*/ */
MemSet(&nulls[1], true, PG_STAT_GET_WAL_RECEIVER_COLS - 1); MemSet(&nulls[1], true, PG_STAT_GET_WAL_RECEIVER_COLS - 1);
} }

View File

@ -414,8 +414,8 @@ DefineQueryRewrite(char *rulename,
* any triggers, indexes, child tables, policies, or RLS enabled. * any triggers, indexes, child tables, policies, or RLS enabled.
* (Note: these tests are too strict, because they will reject * (Note: these tests are too strict, because they will reject
* relations that once had such but don't anymore. But we don't * relations that once had such but don't anymore. But we don't
* really care, because this whole business of converting relations * really care, because this whole business of converting relations to
* to views is just a kluge to allow dump/reload of views that * views is just a kluge to allow dump/reload of views that
* participate in circular dependencies.) * participate in circular dependencies.)
*/ */
if (event_relation->rd_rel->relkind != RELKIND_VIEW && if (event_relation->rd_rel->relkind != RELKIND_VIEW &&

View File

@ -170,22 +170,24 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
* visibility of records) associated with multiple command types (see * visibility of records) associated with multiple command types (see
* specific cases below). * specific cases below).
* *
* When considering the order in which to apply these USING policies, * When considering the order in which to apply these USING policies, we
* we prefer to apply higher privileged policies, those which allow the * prefer to apply higher privileged policies, those which allow the user
* user to lock records (UPDATE and DELETE), first, followed by policies * to lock records (UPDATE and DELETE), first, followed by policies which
* which don't (SELECT). * don't (SELECT).
* *
* Note that the optimizer is free to push down and reorder quals which * Note that the optimizer is free to push down and reorder quals which
* use leakproof functions. * use leakproof functions.
* *
* In all cases, if there are no policy clauses allowing access to rows in * In all cases, if there are no policy clauses allowing access to rows in
* the table for the specific type of operation, then a single always-false * the table for the specific type of operation, then a single
* clause (a default-deny policy) will be added (see add_security_quals). * always-false clause (a default-deny policy) will be added (see
* add_security_quals).
*/ */
/* /*
* For a SELECT, if UPDATE privileges are required (eg: the user has * For a SELECT, if UPDATE privileges are required (eg: the user has
* specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals first. * specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals
* first.
* *
* This way, we filter out any records from the SELECT FOR SHARE/UPDATE * This way, we filter out any records from the SELECT FOR SHARE/UPDATE
* which the user does not have access to via the UPDATE USING policies, * which the user does not have access to via the UPDATE USING policies,
@ -232,8 +234,8 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
* a WHERE clause which involves columns from the relation), we collect up * a WHERE clause which involves columns from the relation), we collect up
* CMD_SELECT policies and add them via add_security_quals first. * CMD_SELECT policies and add them via add_security_quals first.
* *
* This way, we filter out any records which are not visible through an ALL * This way, we filter out any records which are not visible through an
* or SELECT USING policy. * ALL or SELECT USING policy.
*/ */
if ((commandType == CMD_UPDATE || commandType == CMD_DELETE) && if ((commandType == CMD_UPDATE || commandType == CMD_DELETE) &&
rte->requiredPerms & ACL_SELECT) rte->requiredPerms & ACL_SELECT)
@ -272,9 +274,9 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
hasSubLinks); hasSubLinks);
/* /*
* Get and add ALL/SELECT policies, if SELECT rights are required * Get and add ALL/SELECT policies, if SELECT rights are required for
* for this relation (eg: when RETURNING is used). These are added as * this relation (eg: when RETURNING is used). These are added as WCO
* WCO policies rather than security quals to ensure that an error is * policies rather than security quals to ensure that an error is
* raised if a policy is violated; otherwise, we might end up silently * raised if a policy is violated; otherwise, we might end up silently
* dropping rows to be added. * dropping rows to be added.
*/ */
@ -324,11 +326,11 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
hasSubLinks); hasSubLinks);
/* /*
* Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK * Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK WCOs
* WCOs to ensure they are considered when taking the UPDATE * to ensure they are considered when taking the UPDATE path of an
* path of an INSERT .. ON CONFLICT DO UPDATE, if SELECT * INSERT .. ON CONFLICT DO UPDATE, if SELECT rights are required
* rights are required for this relation, also as WCO policies, * for this relation, also as WCO policies, again, to avoid
* again, to avoid silently dropping data. See above. * silently dropping data. See above.
*/ */
if (rte->requiredPerms & ACL_SELECT) if (rte->requiredPerms & ACL_SELECT)
{ {
@ -427,8 +429,8 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id,
} }
/* /*
* Add this policy to the list of permissive policies if it * Add this policy to the list of permissive policies if it applies to
* applies to the specified role. * the specified role.
*/ */
if (cmd_matches && check_role_for_policy(policy->roles, user_id)) if (cmd_matches && check_role_for_policy(policy->roles, user_id))
*permissive_policies = lappend(*permissive_policies, policy); *permissive_policies = lappend(*permissive_policies, policy);
@ -498,6 +500,7 @@ sort_policies_by_name(List *policies)
foreach(item, policies) foreach(item, policies)
{ {
RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item); RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item);
pols[ii++] = *policy; pols[ii++] = *policy;
} }
@ -551,8 +554,8 @@ add_security_quals(int rt_index,
Expr *rowsec_expr; Expr *rowsec_expr;
/* /*
* First collect up the permissive quals. If we do not find any permissive * First collect up the permissive quals. If we do not find any
* policies then no rows are visible (this is handled below). * permissive policies then no rows are visible (this is handled below).
*/ */
foreach(item, permissive_policies) foreach(item, permissive_policies)
{ {
@ -577,8 +580,8 @@ add_security_quals(int rt_index,
/* /*
* We now know that permissive policies exist, so we can now add * We now know that permissive policies exist, so we can now add
* security quals based on the USING clauses from the restrictive * security quals based on the USING clauses from the restrictive
* policies. Since these need to be "AND"d together, we can * policies. Since these need to be "AND"d together, we can just add
* just add them one at a time. * them one at a time.
*/ */
foreach(item, restrictive_policies) foreach(item, restrictive_policies)
{ {
@ -608,6 +611,7 @@ add_security_quals(int rt_index,
*securityQuals = list_append_unique(*securityQuals, rowsec_expr); *securityQuals = list_append_unique(*securityQuals, rowsec_expr);
} }
else else
/* /*
* A permissive policy must exist for rows to be visible at all. * A permissive policy must exist for rows to be visible at all.
* Therefore, if there were no permissive policies found, return a * Therefore, if there were no permissive policies found, return a
@ -668,11 +672,11 @@ add_with_check_options(Relation rel,
} }
/* /*
* There must be at least one permissive qual found or no rows are * There must be at least one permissive qual found or no rows are allowed
* allowed to be added. This is the same as in add_security_quals. * to be added. This is the same as in add_security_quals.
* *
* If there are no permissive_quals then we fall through and return a single * If there are no permissive_quals then we fall through and return a
* 'false' WCO, preventing all new rows. * single 'false' WCO, preventing all new rows.
*/ */
if (permissive_quals != NIL) if (permissive_quals != NIL)
{ {

View File

@ -187,11 +187,12 @@ BufferShmemSize(void)
/* /*
* It would be nice to include the I/O locks in the BufferDesc, but that * It would be nice to include the I/O locks in the BufferDesc, but that
* would increase the size of a BufferDesc to more than one cache line, and * would increase the size of a BufferDesc to more than one cache line,
* benchmarking has shown that keeping every BufferDesc aligned on a cache * and benchmarking has shown that keeping every BufferDesc aligned on a
* line boundary is important for performance. So, instead, the array of * cache line boundary is important for performance. So, instead, the
* I/O locks is allocated in a separate tranche. Because those locks are * array of I/O locks is allocated in a separate tranche. Because those
* not highly contentended, we lay out the array with minimal padding. * locks are not highly contentended, we lay out the array with minimal
* padding.
*/ */
size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded))); size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
/* to allow aligning the above */ /* to allow aligning the above */

View File

@ -219,9 +219,9 @@ UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum,
fsm_update_recursive(rel, addr, new_cat); fsm_update_recursive(rel, addr, new_cat);
/* /*
* Get the last block number on this FSM page. If that's greater * Get the last block number on this FSM page. If that's greater than
* than or equal to our endBlkNum, we're done. Otherwise, advance * or equal to our endBlkNum, we're done. Otherwise, advance to the
* to the first block on the next page. * first block on the next page.
*/ */
lastBlkOnPage = fsm_get_lastblckno(rel, addr); lastBlkOnPage = fsm_get_lastblckno(rel, addr);
if (lastBlkOnPage >= endBlkNum) if (lastBlkOnPage >= endBlkNum)
@ -841,8 +841,8 @@ fsm_get_lastblckno(Relation rel, FSMAddress addr)
int slot; int slot;
/* /*
* Get the last slot number on the given address and convert that to * Get the last slot number on the given address and convert that to block
* block number * number
*/ */
slot = SlotsPerFSMPage - 1; slot = SlotsPerFSMPage - 1;
return fsm_get_heap_blk(addr, slot); return fsm_get_heap_blk(addr, slot);
@ -862,8 +862,8 @@ fsm_update_recursive(Relation rel, FSMAddress addr, uint8 new_cat)
return; return;
/* /*
* Get the parent page and our slot in the parent page, and * Get the parent page and our slot in the parent page, and update the
* update the information in that. * information in that.
*/ */
parent = fsm_get_parent(addr, &parentslot); parent = fsm_get_parent(addr, &parentslot);
fsm_set_and_search(rel, parent, parentslot, new_cat, 0); fsm_set_and_search(rel, parent, parentslot, new_cat, 0);

View File

@ -245,8 +245,8 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
} }
/* /*
* OK, the control segment looks basically valid, so we can use it to * OK, the control segment looks basically valid, so we can use it to get
* get a list of segments that need to be removed. * a list of segments that need to be removed.
*/ */
nitems = old_control->nitems; nitems = old_control->nitems;
for (i = 0; i < nitems; ++i) for (i = 0; i < nitems; ++i)

View File

@ -642,8 +642,8 @@ ProcArrayInitRecovery(TransactionId initializedUptoXID)
Assert(TransactionIdIsNormal(initializedUptoXID)); Assert(TransactionIdIsNormal(initializedUptoXID));
/* /*
* we set latestObservedXid to the xid SUBTRANS has been initialized up to, * we set latestObservedXid to the xid SUBTRANS has been initialized up
* so we can extend it from that point onwards in * to, so we can extend it from that point onwards in
* RecordKnownAssignedTransactionIds, and when we get consistent in * RecordKnownAssignedTransactionIds, and when we get consistent in
* ProcArrayApplyRecoveryInfo(). * ProcArrayApplyRecoveryInfo().
*/ */
@ -2591,8 +2591,8 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid)
/* /*
* We ignore an invalid pxmin because this means that backend has * We ignore an invalid pxmin because this means that backend has
* no snapshot currently. We hold a Share lock to avoid contention * no snapshot currently. We hold a Share lock to avoid contention
* with users taking snapshots. That is not a problem because * with users taking snapshots. That is not a problem because the
* the current xmin is always at least one higher than the latest * current xmin is always at least one higher than the latest
* removed xid, so any new snapshot would never conflict with the * removed xid, so any new snapshot would never conflict with the
* test here. * test here.
*/ */

View File

@ -370,6 +370,7 @@ ResolveRecoveryConflictWithLock(LOCKTAG locktag)
* We're already behind, so clear a path as quickly as possible. * We're already behind, so clear a path as quickly as possible.
*/ */
VirtualTransactionId *backends; VirtualTransactionId *backends;
backends = GetLockConflicts(&locktag, AccessExclusiveLock); backends = GetLockConflicts(&locktag, AccessExclusiveLock);
ResolveRecoveryConflictWithVirtualXIDs(backends, ResolveRecoveryConflictWithVirtualXIDs(backends,
PROCSIG_RECOVERY_CONFLICT_LOCK); PROCSIG_RECOVERY_CONFLICT_LOCK);

View File

@ -1153,13 +1153,13 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
uint32 partition = LockHashPartition(hashcode); uint32 partition = LockHashPartition(hashcode);
/* /*
* It might seem unsafe to access proclock->groupLeader without a lock, * It might seem unsafe to access proclock->groupLeader without a
* but it's not really. Either we are initializing a proclock on our * lock, but it's not really. Either we are initializing a proclock
* own behalf, in which case our group leader isn't changing because * on our own behalf, in which case our group leader isn't changing
* the group leader for a process can only ever be changed by the * because the group leader for a process can only ever be changed by
* process itself; or else we are transferring a fast-path lock to the * the process itself; or else we are transferring a fast-path lock to
* main lock table, in which case that process can't change it's lock * the main lock table, in which case that process can't change it's
* group leader without first releasing all of its locks (and in * lock group leader without first releasing all of its locks (and in
* particular the one we are currently transferring). * particular the one we are currently transferring).
*/ */
proclock->groupLeader = proc->lockGroupLeader != NULL ? proclock->groupLeader = proc->lockGroupLeader != NULL ?
@ -1319,10 +1319,9 @@ LockCheckConflicts(LockMethod lockMethodTable,
} }
/* /*
* Rats. Something conflicts. But it could still be my own lock, or * Rats. Something conflicts. But it could still be my own lock, or a
* a lock held by another member of my locking group. First, figure out * lock held by another member of my locking group. First, figure out how
* how many conflicts remain after subtracting out any locks I hold * many conflicts remain after subtracting out any locks I hold myself.
* myself.
*/ */
myLocks = proclock->holdMask; myLocks = proclock->holdMask;
for (i = 1; i <= numLockModes; i++) for (i = 1; i <= numLockModes; i++)
@ -1357,9 +1356,10 @@ LockCheckConflicts(LockMethod lockMethodTable,
/* /*
* Locks held in conflicting modes by members of our own lock group are * Locks held in conflicting modes by members of our own lock group are
* not real conflicts; we can subtract those out and see if we still have * not real conflicts; we can subtract those out and see if we still have
* a conflict. This is O(N) in the number of processes holding or awaiting * a conflict. This is O(N) in the number of processes holding or
* locks on this object. We could improve that by making the shared memory * awaiting locks on this object. We could improve that by making the
* state more complex (and larger) but it doesn't seem worth it. * shared memory state more complex (and larger) but it doesn't seem worth
* it.
*/ */
procLocks = &(lock->procLocks); procLocks = &(lock->procLocks);
otherproclock = (PROCLOCK *) otherproclock = (PROCLOCK *)
@ -2583,8 +2583,8 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
* *
* proc->databaseId is set at backend startup time and never changes * proc->databaseId is set at backend startup time and never changes
* thereafter, so it might be safe to perform this test before * thereafter, so it might be safe to perform this test before
* acquiring &proc->backendLock. In particular, it's certainly safe to * acquiring &proc->backendLock. In particular, it's certainly safe
* assume that if the target backend holds any fast-path locks, it * to assume that if the target backend holds any fast-path locks, it
* must have performed a memory-fencing operation (in particular, an * must have performed a memory-fencing operation (in particular, an
* LWLock acquisition) since setting proc->databaseId. However, it's * LWLock acquisition) since setting proc->databaseId. However, it's
* less clear that our backend is certain to have performed a memory * less clear that our backend is certain to have performed a memory

View File

@ -760,8 +760,8 @@ GetLWLockIdentifier(uint8 classId, uint16 eventId)
/* /*
* It is quite possible that user has registered tranche in one of the * It is quite possible that user has registered tranche in one of the
* backends (e.g. by allocating lwlocks in dynamic shared memory) but * backends (e.g. by allocating lwlocks in dynamic shared memory) but not
* not all of them, so we can't assume the tranche is registered here. * all of them, so we can't assume the tranche is registered here.
*/ */
if (eventId >= LWLockTranchesAllocated || if (eventId >= LWLockTranchesAllocated ||
LWLockTrancheArray[eventId]->name == NULL) LWLockTrancheArray[eventId]->name == NULL)

View File

@ -342,8 +342,8 @@ InitProcess(void)
MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno]; MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
/* /*
* Cross-check that the PGPROC is of the type we expect; if this were * Cross-check that the PGPROC is of the type we expect; if this were not
* not the case, it would get returned to the wrong list. * the case, it would get returned to the wrong list.
*/ */
Assert(MyProc->procgloballist == procgloballist); Assert(MyProc->procgloballist == procgloballist);

View File

@ -527,6 +527,7 @@ NIImportDictionary(IspellDict *Conf, const char *filename)
{ {
char *s, char *s,
*pstr; *pstr;
/* Set of affix flags */ /* Set of affix flags */
const char *flag; const char *flag;
@ -620,9 +621,9 @@ FindWord(IspellDict *Conf, const char *word, char *affixflag, int flag)
if (flag == 0) if (flag == 0)
{ {
/* /*
* The word can be formed only with another word. * The word can be formed only with another word. And
* And in the flag parameter there is not a sign * in the flag parameter there is not a sign that we
* that we search compound words. * search compound words.
*/ */
if (StopMiddle->compoundflag & FF_COMPOUNDONLY) if (StopMiddle->compoundflag & FF_COMPOUNDONLY)
return 0; return 0;
@ -1161,9 +1162,10 @@ getAffixFlagSet(IspellDict *Conf, char *s)
errmsg("invalid affix alias \"%s\"", s))); errmsg("invalid affix alias \"%s\"", s)));
if (curaffix > 0 && curaffix <= Conf->nAffixData) if (curaffix > 0 && curaffix <= Conf->nAffixData)
/* /*
* Do not subtract 1 from curaffix * Do not subtract 1 from curaffix because empty string was added
* because empty string was added in NIImportOOAffixes * in NIImportOOAffixes
*/ */
return Conf->AffixData[curaffix]; return Conf->AffixData[curaffix];
else else
@ -1597,6 +1599,7 @@ static uint32
makeCompoundFlags(IspellDict *Conf, int affix) makeCompoundFlags(IspellDict *Conf, int affix)
{ {
char *str = Conf->AffixData[affix]; char *str = Conf->AffixData[affix];
return (getCompoundAffixFlagValue(Conf, str) & FF_COMPOUNDFLAGMASK); return (getCompoundAffixFlagValue(Conf, str) & FF_COMPOUNDFLAGMASK);
} }
@ -1700,8 +1703,8 @@ NISortDictionary(IspellDict *Conf)
/* compress affixes */ /* compress affixes */
/* /*
* If we use flag aliases then we need to use Conf->AffixData filled * If we use flag aliases then we need to use Conf->AffixData filled in
* in the NIImportOOAffixes(). * the NIImportOOAffixes().
*/ */
if (Conf->useFlagAliases) if (Conf->useFlagAliases)
{ {

View File

@ -295,8 +295,8 @@ pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval,
while (count < prs.curwords) while (count < prs.curwords)
{ {
/* /*
* Were any stop words removed? If so, fill empty positions * Were any stop words removed? If so, fill empty positions with
* with placeholders linked by an appropriate operator. * placeholders linked by an appropriate operator.
*/ */
if (pos > 0 && pos + 1 < prs.words[count].pos.pos) if (pos > 0 && pos + 1 < prs.words[count].pos.pos)
{ {

View File

@ -267,6 +267,7 @@ datumEstimateSpace(Datum value, bool isnull, bool typByVal, int typLen)
else if (VARATT_IS_EXTERNAL_EXPANDED(value)) else if (VARATT_IS_EXTERNAL_EXPANDED(value))
{ {
ExpandedObjectHeader *eoh = DatumGetEOHP(value); ExpandedObjectHeader *eoh = DatumGetEOHP(value);
sz += EOH_get_flat_size(eoh); sz += EOH_get_flat_size(eoh);
} }
else else

View File

@ -442,8 +442,8 @@ spg_box_quad_picksplit(PG_FUNCTION_ARGS)
out->leafTupleDatums = palloc(sizeof(Datum) * in->nTuples); out->leafTupleDatums = palloc(sizeof(Datum) * in->nTuples);
/* /*
* Assign ranges to corresponding nodes according to quadrants * Assign ranges to corresponding nodes according to quadrants relative to
* relative to the "centroid" range * the "centroid" range
*/ */
for (i = 0; i < in->nTuples; i++) for (i = 0; i < in->nTuples; i++)
{ {
@ -484,8 +484,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
} }
/* /*
* We are saving the traversal value or initialize it an unbounded * We are saving the traversal value or initialize it an unbounded one, if
* one, if we have just begun to walk the tree. * we have just begun to walk the tree.
*/ */
if (in->traversalValue) if (in->traversalValue)
rect_box = in->traversalValue; rect_box = in->traversalValue;
@ -493,8 +493,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
rect_box = initRectBox(); rect_box = initRectBox();
/* /*
* We are casting the prefix and queries to RangeBoxes for ease of * We are casting the prefix and queries to RangeBoxes for ease of the
* the following operations. * following operations.
*/ */
centroid = getRangeBox(DatumGetBoxP(in->prefixDatum)); centroid = getRangeBox(DatumGetBoxP(in->prefixDatum));
queries = (RangeBox **) palloc(in->nkeys * sizeof(RangeBox *)); queries = (RangeBox **) palloc(in->nkeys * sizeof(RangeBox *));
@ -507,9 +507,9 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes); out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes);
/* /*
* We switch memory context, because we want to allocate memory for * We switch memory context, because we want to allocate memory for new
* new traversal values (next_rect_box) and pass these pieces of * traversal values (next_rect_box) and pass these pieces of memory to
* memory to further call of this function. * further call of this function.
*/ */
old_ctx = MemoryContextSwitchTo(in->traversalMemoryContext); old_ctx = MemoryContextSwitchTo(in->traversalMemoryContext);
@ -587,8 +587,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
else else
{ {
/* /*
* If this node is not selected, we don't need to keep * If this node is not selected, we don't need to keep the next
* the next traversal value in the memory context. * traversal value in the memory context.
*/ */
pfree(next_rect_box); pfree(next_rect_box);
} }

View File

@ -1002,8 +1002,8 @@ get_array_start(void *state)
{ {
/* /*
* Special case: we should match the entire array. We only need this * Special case: we should match the entire array. We only need this
* at the outermost level because at nested levels the match will * at the outermost level because at nested levels the match will have
* have been started by the outer field or array element callback. * been started by the outer field or array element callback.
*/ */
_state->result_start = _state->lex->token_start; _state->result_start = _state->lex->token_start;
} }
@ -3368,9 +3368,9 @@ jsonb_concat(PG_FUNCTION_ARGS)
*it2; *it2;
/* /*
* If one of the jsonb is empty, just return the other if it's not * If one of the jsonb is empty, just return the other if it's not scalar
* scalar and both are of the same kind. If it's a scalar or they are * and both are of the same kind. If it's a scalar or they are of
* of different kinds we need to perform the concatenation even if one is * different kinds we need to perform the concatenation even if one is
* empty. * empty.
*/ */
if (JB_ROOT_IS_OBJECT(jb1) == JB_ROOT_IS_OBJECT(jb2)) if (JB_ROOT_IS_OBJECT(jb1) == JB_ROOT_IS_OBJECT(jb2))
@ -3868,8 +3868,8 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
if (level == path_len - 1) if (level == path_len - 1)
{ {
/* /*
* called from jsonb_insert(), it forbids redefining * called from jsonb_insert(), it forbids redefining an
* an existsing value * existsing value
*/ */
if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_INSERT_AFTER)) if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_INSERT_AFTER))
ereport(ERROR, ereport(ERROR,
@ -4005,8 +4005,8 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
/* /*
* We should keep current value only in case of * We should keep current value only in case of
* JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER * JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER because
* because otherwise it should be deleted or replaced * otherwise it should be deleted or replaced
*/ */
if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_INSERT_BEFORE)) if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_INSERT_BEFORE))
(void) pushJsonbValue(st, r, &v); (void) pushJsonbValue(st, r, &v);

View File

@ -3397,8 +3397,8 @@ numeric_combine(PG_FUNCTION_ARGS)
state1->NaNcount += state2->NaNcount; state1->NaNcount += state2->NaNcount;
/* /*
* These are currently only needed for moving aggregates, but let's * These are currently only needed for moving aggregates, but let's do
* do the right thing anyway... * the right thing anyway...
*/ */
if (state2->maxScale > state1->maxScale) if (state2->maxScale > state1->maxScale)
{ {
@ -3485,8 +3485,8 @@ numeric_avg_combine(PG_FUNCTION_ARGS)
state1->NaNcount += state2->NaNcount; state1->NaNcount += state2->NaNcount;
/* /*
* These are currently only needed for moving aggregates, but let's * These are currently only needed for moving aggregates, but let's do
* do the right thing anyway... * the right thing anyway...
*/ */
if (state2->maxScale > state1->maxScale) if (state2->maxScale > state1->maxScale)
{ {

View File

@ -762,7 +762,8 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
{ {
Datum previousCentroid; Datum previousCentroid;
/* We know, that in->prefixDatum in this place is varlena, /*
* We know, that in->prefixDatum in this place is varlena,
* because it's range * because it's range
*/ */
previousCentroid = datumCopy(in->prefixDatum, false, -1); previousCentroid = datumCopy(in->prefixDatum, false, -1);

View File

@ -184,8 +184,8 @@ checkcondition_gin_internal(GinChkVal *gcv, QueryOperand *val, ExecPhraseData *d
int j; int j;
/* /*
* if any val requiring a weight is used or caller * if any val requiring a weight is used or caller needs position
* needs position information then set recheck flag * information then set recheck flag
*/ */
if (val->weight != 0 || data != NULL) if (val->weight != 0 || data != NULL)
*gcv->need_recheck = true; *gcv->need_recheck = true;
@ -236,9 +236,10 @@ TS_execute_ternary(GinChkVal *gcv, QueryItem *curitem)
return !result; return !result;
case OP_PHRASE: case OP_PHRASE:
/* /*
* GIN doesn't contain any information about positions, * GIN doesn't contain any information about positions, treat
* treat OP_PHRASE as OP_AND with recheck requirement * OP_PHRASE as OP_AND with recheck requirement
*/ */
*gcv->need_recheck = true; *gcv->need_recheck = true;
/* FALL THRU */ /* FALL THRU */

View File

@ -696,8 +696,8 @@ parse_tsquery(char *buf,
findoprnd(ptr, query->size, &needcleanup); findoprnd(ptr, query->size, &needcleanup);
/* /*
* QI_VALSTOP nodes should be cleaned and * QI_VALSTOP nodes should be cleaned and and OP_PHRASE should be pushed
* and OP_PHRASE should be pushed down * down
*/ */
if (needcleanup) if (needcleanup)
return cleanup_fakeval_and_phrase(query); return cleanup_fakeval_and_phrase(query);
@ -852,7 +852,8 @@ infix(INFIX *in, int parentPriority)
in->curpol++; in->curpol++;
if (priority < parentPriority || if (priority < parentPriority ||
(op == OP_PHRASE && (op == OP_PHRASE &&
(priority == parentPriority || /* phrases are not commutative! */ (priority == parentPriority || /* phrases are not
* commutative! */
parentPriority == OP_PRIORITY(OP_AND)))) parentPriority == OP_PRIORITY(OP_AND))))
{ {
needParenthesis = true; needParenthesis = true;

View File

@ -257,7 +257,9 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
else else
{ {
NODE *res = node; NODE *res = node;
int ndistance, ldistance = 0, rdistance = 0; int ndistance,
ldistance = 0,
rdistance = 0;
ndistance = (node->valnode->qoperator.oper == OP_PHRASE) ? ndistance = (node->valnode->qoperator.oper == OP_PHRASE) ?
node->valnode->qoperator.distance : node->valnode->qoperator.distance :
@ -272,8 +274,8 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
ndistance ? &rdistance : NULL); ndistance ? &rdistance : NULL);
/* /*
* ndistance, ldistance and rdistance are greater than zero * ndistance, ldistance and rdistance are greater than zero if their
* if their corresponding nodes are OP_PHRASE * corresponding nodes are OP_PHRASE
*/ */
if (lresult == V_STOP && rresult == V_STOP) if (lresult == V_STOP && rresult == V_STOP)
@ -287,9 +289,10 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
else if (lresult == V_STOP) else if (lresult == V_STOP)
{ {
res = node->right; res = node->right;
/* /*
* propagate distance from current node to the * propagate distance from current node to the right upper
* right upper subtree. * subtree.
*/ */
if (adddistance && ndistance) if (adddistance && ndistance)
*adddistance = rdistance; *adddistance = rdistance;
@ -298,6 +301,7 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
else if (rresult == V_STOP) else if (rresult == V_STOP)
{ {
res = node->left; res = node->left;
/* /*
* propagate distance from current node to the upper tree. * propagate distance from current node to the upper tree.
*/ */
@ -417,8 +421,8 @@ normalize_phrase_tree(NODE *node)
return node; return node;
/* /*
* We can't swap left-right and works only with left child * We can't swap left-right and works only with left child because of
* because of a <-> b != b <-> a * a <-> b != b <-> a
*/ */
distance = node->valnode->qoperator.distance; distance = node->valnode->qoperator.distance;

View File

@ -498,12 +498,16 @@ ts_rank_tt(PG_FUNCTION_ARGS)
typedef struct typedef struct
{ {
union { union
struct { /* compiled doc representation */ {
struct
{ /* compiled doc representation */
QueryItem **items; QueryItem **items;
int16 nitem; int16 nitem;
} query; } query;
struct { /* struct is used for preparing doc representation */ struct
{ /* struct is used for preparing doc
* representation */
QueryItem *item; QueryItem *item;
WordEntry *entry; WordEntry *entry;
} map; } map;
@ -537,8 +541,8 @@ compareDocR(const void *va, const void *vb)
typedef struct typedef struct
{ {
bool operandexists; bool operandexists;
bool reverseinsert; /* indicates insert order, bool reverseinsert; /* indicates insert order, true means
true means descending order */ * descending order */
uint32 npos; uint32 npos;
WordEntryPos pos[MAXQROPOS]; WordEntryPos pos[MAXQROPOS];
} QueryRepresentationOperand; } QueryRepresentationOperand;
@ -731,8 +735,8 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen)
doc = (DocRepresentation *) palloc(sizeof(DocRepresentation) * len); doc = (DocRepresentation *) palloc(sizeof(DocRepresentation) * len);
/* /*
* Iterate through query to make DocRepresentaion for words and it's entries * Iterate through query to make DocRepresentaion for words and it's
* satisfied by query * entries satisfied by query
*/ */
for (i = 0; i < qr->query->size; i++) for (i = 0; i < qr->query->size; i++)
{ {

View File

@ -276,16 +276,20 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
switch (char_weight) switch (char_weight)
{ {
case 'A': case 'a': case 'A':
case 'a':
weight = 3; weight = 3;
break; break;
case 'B': case 'b': case 'B':
case 'b':
weight = 2; weight = 2;
break; break;
case 'C': case 'c': case 'C':
case 'c':
weight = 1; weight = 1;
break; break;
case 'D': case 'd': case 'D':
case 'd':
weight = 0; weight = 0;
break; break;
default: default:
@ -301,9 +305,9 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
&dlexemes, &nulls, &nlexemes); &dlexemes, &nulls, &nlexemes);
/* /*
* Assuming that lexemes array is significantly shorter than tsvector * Assuming that lexemes array is significantly shorter than tsvector we
* we can iterate through lexemes performing binary search * can iterate through lexemes performing binary search of each lexeme
* of each lexeme from lexemes in tsvector. * from lexemes in tsvector.
*/ */
for (i = 0; i < nlexemes; i++) for (i = 0; i < nlexemes; i++)
{ {
@ -323,6 +327,7 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
if (lex_pos >= 0 && (j = POSDATALEN(tsout, entry + lex_pos)) != 0) if (lex_pos >= 0 && (j = POSDATALEN(tsout, entry + lex_pos)) != 0)
{ {
WordEntryPos *p = POSDATAPTR(tsout, entry + lex_pos); WordEntryPos *p = POSDATAPTR(tsout, entry + lex_pos);
while (j--) while (j--)
{ {
WEP_SETWEIGHT(*p, weight); WEP_SETWEIGHT(*p, weight);
@ -440,13 +445,15 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
*arrout; *arrout;
char *data = STRPTR(tsv), char *data = STRPTR(tsv),
*dataout; *dataout;
int i, j, k, int i,
j,
k,
curoff; curoff;
/* /*
* Here we overestimates tsout size, since we don't know exact size * Here we overestimates tsout size, since we don't know exact size
* occupied by positions and weights. We will set exact size later * occupied by positions and weights. We will set exact size later after a
* after a pass through TSVector. * pass through TSVector.
*/ */
tsout = (TSVector) palloc0(VARSIZE(tsv)); tsout = (TSVector) palloc0(VARSIZE(tsv));
arrout = ARRPTR(tsout); arrout = ARRPTR(tsout);
@ -465,10 +472,11 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
{ {
/* /*
* Here we should check whether current i is present in * Here we should check whether current i is present in
* indices_to_delete or not. Since indices_to_delete is already * indices_to_delete or not. Since indices_to_delete is already sorted
* sorted we can advance it index only when we have match. * we can advance it index only when we have match.
*/ */
if (k < indices_count && i == indices_to_delete[k]){ if (k < indices_count && i == indices_to_delete[k])
{
k++; k++;
continue; continue;
} }
@ -483,6 +491,7 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
{ {
int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos) + int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos) +
sizeof(uint16); sizeof(uint16);
curoff = SHORTALIGN(curoff); curoff = SHORTALIGN(curoff);
memcpy(dataout + curoff, memcpy(dataout + curoff,
STRPTR(tsv) + SHORTALIGN(arrin[i].pos + arrin[i].len), STRPTR(tsv) + SHORTALIGN(arrin[i].pos + arrin[i].len),
@ -494,9 +503,10 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
} }
/* /*
* After the pass through TSVector k should equals exactly to indices_count. * After the pass through TSVector k should equals exactly to
* If it isn't then the caller provided us with indices outside of * indices_count. If it isn't then the caller provided us with indices
* [0, tsv->size) range and estimation of tsout's size is wrong. * outside of [0, tsv->size) range and estimation of tsout's size is
* wrong.
*/ */
Assert(k == indices_count); Assert(k == indices_count);
@ -538,7 +548,8 @@ tsvector_delete_arr(PG_FUNCTION_ARGS)
TSVector tsin = PG_GETARG_TSVECTOR(0), TSVector tsin = PG_GETARG_TSVECTOR(0),
tsout; tsout;
ArrayType *lexemes = PG_GETARG_ARRAYTYPE_P(1); ArrayType *lexemes = PG_GETARG_ARRAYTYPE_P(1);
int i, nlex, int i,
nlex,
skip_count, skip_count,
*skip_indices; *skip_indices;
Datum *dlexemes; Datum *dlexemes;
@ -548,8 +559,8 @@ tsvector_delete_arr(PG_FUNCTION_ARGS)
&dlexemes, &nulls, &nlex); &dlexemes, &nulls, &nlex);
/* /*
* In typical use case array of lexemes to delete is relatively small. * In typical use case array of lexemes to delete is relatively small. So
* So here we optimizing things for that scenario: iterate through lexarr * here we optimizing things for that scenario: iterate through lexarr
* performing binary search of each lexeme from lexarr in tsvector. * performing binary search of each lexeme from lexarr in tsvector.
*/ */
skip_indices = palloc0(nlex * sizeof(int)); skip_indices = palloc0(nlex * sizeof(int));
@ -641,8 +652,8 @@ tsvector_unnest(PG_FUNCTION_ARGS)
/* /*
* Internally tsvector stores position and weight in the same * Internally tsvector stores position and weight in the same
* uint16 (2 bits for weight, 14 for position). Here we extract that * uint16 (2 bits for weight, 14 for position). Here we extract
* in two separate arrays. * that in two separate arrays.
*/ */
posv = _POSVECPTR(tsin, arrin + i); posv = _POSVECPTR(tsin, arrin + i);
positions = palloc(posv->npos * sizeof(Datum)); positions = palloc(posv->npos * sizeof(Datum));
@ -772,7 +783,8 @@ tsvector_filter(PG_FUNCTION_ARGS)
Datum *dweights; Datum *dweights;
bool *nulls; bool *nulls;
int nweights; int nweights;
int i, j; int i,
j;
int cur_pos = 0; int cur_pos = 0;
char mask = 0; char mask = 0;
@ -791,16 +803,20 @@ tsvector_filter(PG_FUNCTION_ARGS)
char_weight = DatumGetChar(dweights[i]); char_weight = DatumGetChar(dweights[i]);
switch (char_weight) switch (char_weight)
{ {
case 'A': case 'a': case 'A':
case 'a':
mask = mask | 8; mask = mask | 8;
break; break;
case 'B': case 'b': case 'B':
case 'b':
mask = mask | 4; mask = mask | 4;
break; break;
case 'C': case 'c': case 'C':
case 'c':
mask = mask | 2; mask = mask | 2;
break; break;
case 'D': case 'd': case 'D':
case 'd':
mask = mask | 1; mask = mask | 1;
break; break;
default: default:
@ -1276,6 +1292,7 @@ checkcondition_str(void *checkval, QueryOperand *val, ExecPhraseData *data)
WordEntryPos *allpos = NULL; WordEntryPos *allpos = NULL;
int npos = 0, int npos = 0,
totalpos = 0; totalpos = 0;
/* /*
* there was a failed exact search, so we should scan further to find * there was a failed exact search, so we should scan further to find
* a prefix match. We also need to do so if caller needs position info * a prefix match. We also need to do so if caller needs position info
@ -1371,22 +1388,24 @@ TS_phrase_execute(QueryItem *curitem,
return false; return false;
/* /*
* if at least one of the operands has no position * if at least one of the operands has no position information,
* information, fallback to AND operation. * fallback to AND operation.
*/ */
if (Ldata.npos == 0 || Rdata.npos == 0) if (Ldata.npos == 0 || Rdata.npos == 0)
return true; return true;
/* /*
* Result of the operation is a list of the * Result of the operation is a list of the corresponding positions of
* corresponding positions of RIGHT operand. * RIGHT operand.
*/ */
if (data) if (data)
{ {
if (!Rdata.allocated) if (!Rdata.allocated)
/* /*
* OP_PHRASE is based on the OP_AND, so the number of resulting * OP_PHRASE is based on the OP_AND, so the number of
* positions could not be greater than the total amount of operands. * resulting positions could not be greater than the total
* amount of operands.
*/ */
data->pos = palloc(sizeof(WordEntryPos) * Min(Ldata.npos, Rdata.npos)); data->pos = palloc(sizeof(WordEntryPos) * Min(Ldata.npos, Rdata.npos));
else else
@ -1439,8 +1458,8 @@ TS_phrase_execute(QueryItem *curitem,
else else
{ {
/* /*
* Go to the next Rpos, because Lpos * Go to the next Rpos, because Lpos is ahead of the
* is ahead of the current Rpos * current Rpos
*/ */
break; break;
} }
@ -1546,6 +1565,7 @@ tsquery_requires_match(QueryItem *curitem)
return false; return false;
case OP_PHRASE: case OP_PHRASE:
/* /*
* Treat OP_PHRASE as OP_AND here * Treat OP_PHRASE as OP_AND here
*/ */

View File

@ -340,8 +340,8 @@ uuid_abbrev_abort(int memtupcount, SortSupport ssup)
/* /*
* Target minimum cardinality is 1 per ~2k of non-null inputs. 0.5 row * Target minimum cardinality is 1 per ~2k of non-null inputs. 0.5 row
* fudge factor allows us to abort earlier on genuinely pathological data * fudge factor allows us to abort earlier on genuinely pathological data
* where we've had exactly one abbreviated value in the first 2k (non-null) * where we've had exactly one abbreviated value in the first 2k
* rows. * (non-null) rows.
*/ */
if (abbr_card < uss->input_count / 2000.0 + 0.5) if (abbr_card < uss->input_count / 2000.0 + 0.5)
{ {
@ -400,9 +400,9 @@ uuid_abbrev_convert(Datum original, SortSupport ssup)
* Byteswap on little-endian machines. * Byteswap on little-endian machines.
* *
* This is needed so that uuid_cmp_abbrev() (an unsigned integer 3-way * This is needed so that uuid_cmp_abbrev() (an unsigned integer 3-way
* comparator) works correctly on all platforms. If we didn't do this, the * comparator) works correctly on all platforms. If we didn't do this,
* comparator would have to call memcmp() with a pair of pointers to the * the comparator would have to call memcmp() with a pair of pointers to
* first byte of each abbreviated key, which is slower. * the first byte of each abbreviated key, which is slower.
*/ */
res = DatumBigEndianToNative(res); res = DatumBigEndianToNative(res);

View File

@ -610,10 +610,11 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
plansource->is_valid = false; plansource->is_valid = false;
/* /*
* If we have a join pushed down to the foreign server and the current user * If we have a join pushed down to the foreign server and the current
* is different from the one for which the plan was created, invalidate the * user is different from the one for which the plan was created,
* generic plan since user mapping for the new user might make the join * invalidate the generic plan since user mapping for the new user might
* unsafe to push down, or change which user mapping is used. * make the join unsafe to push down, or change which user mapping is
* used.
*/ */
if (plansource->is_valid && if (plansource->is_valid &&
plansource->gplan && plansource->gplan &&
@ -1911,9 +1912,9 @@ PlanCacheUserMappingCallback(Datum arg, int cacheid, uint32 hashvalue)
/* /*
* If the plan has pushed down foreign joins, those join may become * If the plan has pushed down foreign joins, those join may become
* unsafe to push down because of user mapping changes. Invalidate only * unsafe to push down because of user mapping changes. Invalidate
* the generic plan, since changes to user mapping do not invalidate the * only the generic plan, since changes to user mapping do not
* parse tree. * invalidate the parse tree.
*/ */
if (plansource->gplan && plansource->gplan->has_foreign_join) if (plansource->gplan && plansource->gplan->has_foreign_join)
plansource->gplan->is_valid = false; plansource->gplan->is_valid = false;

View File

@ -1467,9 +1467,9 @@ EmitErrorReport(void)
* mechanisms. * mechanisms.
* *
* The log hook has access to both the translated and original English * The log hook has access to both the translated and original English
* error message text, which is passed through to allow it to be used * error message text, which is passed through to allow it to be used as a
* as a message identifier. Note that the original text is not available * message identifier. Note that the original text is not available for
* for detail, detail_log, hint and context text elements. * detail, detail_log, hint and context text elements.
*/ */
if (edata->output_to_server && emit_log_hook) if (edata->output_to_server && emit_log_hook)
(*emit_log_hook) (edata); (*emit_log_hook) (edata);

View File

@ -192,6 +192,7 @@ PerformAuthentication(Port *port)
* FIXME: [fork/exec] Ugh. Is there a way around this overhead? * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/ */
#ifdef EXEC_BACKEND #ifdef EXEC_BACKEND
/* /*
* load_hba() and load_ident() want to work within the PostmasterContext, * load_hba() and load_ident() want to work within the PostmasterContext,
* so create that if it doesn't exist (which it won't). We'll delete it * so create that if it doesn't exist (which it won't). We'll delete it
@ -870,9 +871,9 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
{ {
/* /*
* If this is a background worker not bound to any particular * If this is a background worker not bound to any particular
* database, we're done now. Everything that follows only makes * database, we're done now. Everything that follows only makes sense
* sense if we are bound to a specific database. We do need to * if we are bound to a specific database. We do need to close the
* close the transaction we started before returning. * transaction we started before returning.
*/ */
if (!bootstrap) if (!bootstrap)
CommitTransactionCommand(); CommitTransactionCommand();

View File

@ -5926,13 +5926,14 @@ set_config_option(const char *name, const char *value,
* don't re-read the config file during backend start. * don't re-read the config file during backend start.
* *
* In EXEC_BACKEND builds, this works differently: we load all * In EXEC_BACKEND builds, this works differently: we load all
* non-default settings from the CONFIG_EXEC_PARAMS file during * non-default settings from the CONFIG_EXEC_PARAMS file
* backend start. In that case we must accept PGC_SIGHUP * during backend start. In that case we must accept
* settings, so as to have the same value as if we'd forked * PGC_SIGHUP settings, so as to have the same value as if
* from the postmaster. This can also happen when using * we'd forked from the postmaster. This can also happen when
* RestoreGUCState() within a background worker that needs to * using RestoreGUCState() within a background worker that
* have the same settings as the user backend that started it. * needs to have the same settings as the user backend that
* is_reload will be true when either situation applies. * started it. is_reload will be true when either situation
* applies.
*/ */
if (IsUnderPostmaster && !is_reload) if (IsUnderPostmaster && !is_reload)
return -1; return -1;

View File

@ -91,10 +91,10 @@ pg_config(PG_FUNCTION_ARGS)
/* /*
* SFRM_Materialize mode expects us to return a NULL Datum. The actual * SFRM_Materialize mode expects us to return a NULL Datum. The actual
* tuples are in our tuplestore and passed back through * tuples are in our tuplestore and passed back through rsinfo->setResult.
* rsinfo->setResult. rsinfo->setDesc is set to the tuple description * rsinfo->setDesc is set to the tuple description that we actually used
* that we actually used to build our tuples with, so the caller can * to build our tuples with, so the caller can verify we did what it was
* verify we did what it was expecting. * expecting.
*/ */
rsinfo->setDesc = tupdesc; rsinfo->setDesc = tupdesc;
MemoryContextSwitchTo(oldcontext); MemoryContextSwitchTo(oldcontext);

View File

@ -321,10 +321,10 @@ struct Tuplesortstate
/* /*
* Memory for tuples is sometimes allocated in batch, rather than * Memory for tuples is sometimes allocated in batch, rather than
* incrementally. This implies that incremental memory accounting has been * incrementally. This implies that incremental memory accounting has
* abandoned. Currently, this only happens for the final on-the-fly merge * been abandoned. Currently, this only happens for the final on-the-fly
* step. Large batch allocations can store tuples (e.g. IndexTuples) * merge step. Large batch allocations can store tuples (e.g.
* without palloc() fragmentation and other overhead. * IndexTuples) without palloc() fragmentation and other overhead.
*/ */
bool batchUsed; bool batchUsed;
@ -337,8 +337,8 @@ struct Tuplesortstate
/* /*
* While building initial runs, this is the current output run number * While building initial runs, this is the current output run number
* (starting at RUN_FIRST). Afterwards, it is the number of initial * (starting at RUN_FIRST). Afterwards, it is the number of initial runs
* runs we made. * we made.
*/ */
int currentRun; int currentRun;
@ -375,9 +375,9 @@ struct Tuplesortstate
* just a few large allocations. * just a few large allocations.
* *
* Aside from the general benefits of performing fewer individual retail * Aside from the general benefits of performing fewer individual retail
* palloc() calls, this also helps make merging more cache efficient, since * palloc() calls, this also helps make merging more cache efficient,
* each tape's tuples must naturally be accessed sequentially (in sorted * since each tape's tuples must naturally be accessed sequentially (in
* order). * sorted order).
*/ */
int64 spacePerTape; /* Space (memory) for tuples (not slots) */ int64 spacePerTape; /* Space (memory) for tuples (not slots) */
char **mergetuples; /* Each tape's memory allocation */ char **mergetuples; /* Each tape's memory allocation */
@ -647,10 +647,10 @@ tuplesort_begin_common(int workMem, bool randomAccess)
* Caller tuple (e.g. IndexTuple) memory context. * Caller tuple (e.g. IndexTuple) memory context.
* *
* A dedicated child content used exclusively for caller passed tuples * A dedicated child content used exclusively for caller passed tuples
* eases memory management. Resetting at key points reduces fragmentation. * eases memory management. Resetting at key points reduces
* Note that the memtuples array of SortTuples is allocated in the parent * fragmentation. Note that the memtuples array of SortTuples is allocated
* context, not this context, because there is no need to free memtuples * in the parent context, not this context, because there is no need to
* early. * free memtuples early.
*/ */
tuplecontext = AllocSetContextCreate(sortcontext, tuplecontext = AllocSetContextCreate(sortcontext,
"Caller tuples", "Caller tuples",
@ -1042,8 +1042,8 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
* a pass-by-value datatype could have an abbreviated form that is cheaper * a pass-by-value datatype could have an abbreviated form that is cheaper
* to compare. In a tuple sort, we could support that, because we can * to compare. In a tuple sort, we could support that, because we can
* always extract the original datum from the tuple is needed. Here, we * always extract the original datum from the tuple is needed. Here, we
* can't, because a datum sort only stores a single copy of the datum; * can't, because a datum sort only stores a single copy of the datum; the
* the "tuple" field of each sortTuple is NULL. * "tuple" field of each sortTuple is NULL.
*/ */
state->sortKeys->abbreviate = !typbyval; state->sortKeys->abbreviate = !typbyval;
@ -1413,8 +1413,7 @@ tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
* ensure a consistent representation (current tuple was just * ensure a consistent representation (current tuple was just
* handled). It does not matter if some dumped tuples are already * handled). It does not matter if some dumped tuples are already
* sorted on tape, since serialized tuples lack abbreviated keys * sorted on tape, since serialized tuples lack abbreviated keys
* (TSS_BUILDRUNS state prevents control reaching here in any * (TSS_BUILDRUNS state prevents control reaching here in any case).
* case).
*/ */
for (i = 0; i < state->memtupcount; i++) for (i = 0; i < state->memtupcount; i++)
{ {
@ -1459,8 +1458,8 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
if (isNull || !state->tuples) if (isNull || !state->tuples)
{ {
/* /*
* Set datum1 to zeroed representation for NULLs (to be consistent, and * Set datum1 to zeroed representation for NULLs (to be consistent,
* to support cheap inequality tests for NULL abbreviated keys). * and to support cheap inequality tests for NULL abbreviated keys).
*/ */
stup.datum1 = !isNull ? val : (Datum) 0; stup.datum1 = !isNull ? val : (Datum) 0;
stup.isnull1 = isNull; stup.isnull1 = isNull;
@ -1498,10 +1497,10 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
* *
* Alter datum1 representation in already-copied tuples, so as to * Alter datum1 representation in already-copied tuples, so as to
* ensure a consistent representation (current tuple was just * ensure a consistent representation (current tuple was just
* handled). It does not matter if some dumped tuples are * handled). It does not matter if some dumped tuples are already
* already sorted on tape, since serialized tuples lack * sorted on tape, since serialized tuples lack abbreviated keys
* abbreviated keys (TSS_BUILDRUNS state prevents control * (TSS_BUILDRUNS state prevents control reaching here in any
* reaching here in any case). * case).
*/ */
for (i = 0; i < state->memtupcount; i++) for (i = 0; i < state->memtupcount; i++)
{ {
@ -1965,11 +1964,11 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
SortTuple *newtup; SortTuple *newtup;
/* /*
* Returned tuple is still counted in our memory space most * Returned tuple is still counted in our memory space most of
* of the time. See mergebatchone() for discussion of why * the time. See mergebatchone() for discussion of why caller
* caller may occasionally be required to free returned * may occasionally be required to free returned tuple, and
* tuple, and how preread memory is managed with regard to * how preread memory is managed with regard to edge cases
* edge cases more generally. * more generally.
*/ */
*stup = state->memtuples[0]; *stup = state->memtuples[0];
tuplesort_heap_siftup(state, false); tuplesort_heap_siftup(state, false);
@ -2269,8 +2268,8 @@ useselection(Tuplesortstate *state)
/* /*
* memtupsize might be noticeably higher than memtupcount here in atypical * memtupsize might be noticeably higher than memtupcount here in atypical
* cases. It seems slightly preferable to not allow recent outliers to * cases. It seems slightly preferable to not allow recent outliers to
* impact this determination. Note that caller's trace_sort output reports * impact this determination. Note that caller's trace_sort output
* memtupcount instead. * reports memtupcount instead.
*/ */
if (state->memtupsize <= replacement_sort_tuples) if (state->memtupsize <= replacement_sort_tuples)
return true; return true;
@ -2349,9 +2348,9 @@ inittapes(Tuplesortstate *state)
state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int)); state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
/* /*
* Give replacement selection a try based on user setting. There will * Give replacement selection a try based on user setting. There will be
* be a switch to a simple hybrid sort-merge strategy after the first * a switch to a simple hybrid sort-merge strategy after the first run
* run (iff we could not output one long run). * (iff we could not output one long run).
*/ */
state->replaceActive = useselection(state); state->replaceActive = useselection(state);
@ -2792,8 +2791,8 @@ beginmerge(Tuplesortstate *state, bool finalMergeBatch)
int usedSlots; int usedSlots;
/* /*
* Report how effective batchmemtuples() was in balancing * Report how effective batchmemtuples() was in balancing the
* the number of slots against the need for memory for the * number of slots against the need for memory for the
* underlying tuples (e.g. IndexTuples). The big preread of * underlying tuples (e.g. IndexTuples). The big preread of
* all tapes when switching to FINALMERGE state should be * all tapes when switching to FINALMERGE state should be
* fairly representative of memory utilization during the * fairly representative of memory utilization during the
@ -2867,8 +2866,8 @@ batchmemtuples(Tuplesortstate *state)
/* /*
* To establish balanced memory use after refunding palloc overhead, * To establish balanced memory use after refunding palloc overhead,
* temporarily have our accounting indicate that we've allocated all * temporarily have our accounting indicate that we've allocated all
* memory we're allowed to less that refund, and call grow_memtuples() * memory we're allowed to less that refund, and call grow_memtuples() to
* to have it increase the number of slots. * have it increase the number of slots.
*/ */
state->growmemtuples = true; state->growmemtuples = true;
USEMEM(state, availMemLessRefund); USEMEM(state, availMemLessRefund);
@ -2970,9 +2969,9 @@ mergebatchone(Tuplesortstate *state, int srcTape, SortTuple *rtup,
/* /*
* Mark tuple buffer range for reuse, but be careful to move final, * Mark tuple buffer range for reuse, but be careful to move final,
* tail tuple to start of space for next run so that it's available * tail tuple to start of space for next run so that it's available to
* to caller when stup is returned, and remains available at least * caller when stup is returned, and remains available at least until
* until the next tuple is requested. * the next tuple is requested.
*/ */
tupLen = state->mergecurrent[srcTape] - state->mergetail[srcTape]; tupLen = state->mergecurrent[srcTape] - state->mergetail[srcTape];
state->mergecurrent[srcTape] = state->mergetuples[srcTape]; state->mergecurrent[srcTape] = state->mergetuples[srcTape];
@ -3081,9 +3080,9 @@ mergebatchalloc(Tuplesortstate *state, int tapenum, Size tuplen)
state->mergetuples[tapenum] + state->spacePerTape) state->mergetuples[tapenum] + state->spacePerTape)
{ {
/* /*
* Usual case -- caller is returned pointer into its tape's buffer, and * Usual case -- caller is returned pointer into its tape's buffer,
* an offset from that point is recorded as where tape has consumed up * and an offset from that point is recorded as where tape has
* to for current round of preloading. * consumed up to for current round of preloading.
*/ */
ret = state->mergetail[tapenum] = state->mergecurrent[tapenum]; ret = state->mergetail[tapenum] = state->mergecurrent[tapenum];
state->mergecurrent[tapenum] += reserve_tuplen; state->mergecurrent[tapenum] += reserve_tuplen;
@ -3238,8 +3237,8 @@ dumptuples(Tuplesortstate *state, bool alltuples)
if (state->replaceActive) if (state->replaceActive)
{ {
/* /*
* Still holding out for a case favorable to replacement selection. * Still holding out for a case favorable to replacement
* Still incrementally spilling using heap. * selection. Still incrementally spilling using heap.
* *
* Dump the heap's frontmost entry, and sift up to remove it from * Dump the heap's frontmost entry, and sift up to remove it from
* the heap. * the heap.
@ -3252,17 +3251,15 @@ dumptuples(Tuplesortstate *state, bool alltuples)
else else
{ {
/* /*
* Once committed to quicksorting runs, never incrementally * Once committed to quicksorting runs, never incrementally spill
* spill
*/ */
dumpbatch(state, alltuples); dumpbatch(state, alltuples);
break; break;
} }
/* /*
* If top run number has changed, we've finished the current run * If top run number has changed, we've finished the current run (this
* (this can only be the first run), and will no longer spill * can only be the first run), and will no longer spill incrementally.
* incrementally.
*/ */
if (state->memtupcount == 0 || if (state->memtupcount == 0 ||
state->memtuples[0].tupindex == HEAP_RUN_NEXT) state->memtuples[0].tupindex == HEAP_RUN_NEXT)
@ -3280,6 +3277,7 @@ dumptuples(Tuplesortstate *state, bool alltuples)
state->currentRun, state->destTape, state->currentRun, state->destTape,
pg_rusage_show(&state->ru_start)); pg_rusage_show(&state->ru_start));
#endif #endif
/* /*
* Done if heap is empty, which is possible when there is only one * Done if heap is empty, which is possible when there is only one
* long run. * long run.
@ -3334,19 +3332,19 @@ dumpbatch(Tuplesortstate *state, bool alltuples)
* remaining tuples are loaded into memory, just before input was * remaining tuples are loaded into memory, just before input was
* exhausted. * exhausted.
* *
* In general, short final runs are quite possible. Rather than * In general, short final runs are quite possible. Rather than allowing
* allowing a special case where there was a superfluous * a special case where there was a superfluous selectnewtape() call (i.e.
* selectnewtape() call (i.e. a call with no subsequent run actually * a call with no subsequent run actually written to destTape), we prefer
* written to destTape), we prefer to write out a 0 tuple run. * to write out a 0 tuple run.
* *
* mergepreread()/mergeprereadone() are prepared for 0 tuple runs, and * mergepreread()/mergeprereadone() are prepared for 0 tuple runs, and
* will reliably mark the tape inactive for the merge when called from * will reliably mark the tape inactive for the merge when called from
* beginmerge(). This case is therefore similar to the case where * beginmerge(). This case is therefore similar to the case where
* mergeonerun() finds a dummy run for the tape, and so doesn't need to * mergeonerun() finds a dummy run for the tape, and so doesn't need to
* merge a run from the tape (or conceptually "merges" the dummy run, * merge a run from the tape (or conceptually "merges" the dummy run, if
* if you prefer). According to Knuth, Algorithm D "isn't strictly * you prefer). According to Knuth, Algorithm D "isn't strictly optimal"
* optimal" in its method of distribution and dummy run assignment; * in its method of distribution and dummy run assignment; this edge case
* this edge case seems very unlikely to make that appreciably worse. * seems very unlikely to make that appreciably worse.
*/ */
Assert(state->status == TSS_BUILDRUNS); Assert(state->status == TSS_BUILDRUNS);
@ -3369,8 +3367,8 @@ dumpbatch(Tuplesortstate *state, bool alltuples)
#endif #endif
/* /*
* Sort all tuples accumulated within the allowed amount of memory for this * Sort all tuples accumulated within the allowed amount of memory for
* run using quicksort * this run using quicksort
*/ */
tuplesort_sort_memtuples(state); tuplesort_sort_memtuples(state);
@ -3848,11 +3846,11 @@ readtup_alloc(Tuplesortstate *state, int tapenum, Size tuplen)
if (state->batchUsed) if (state->batchUsed)
{ {
/* /*
* No USEMEM() call, because during final on-the-fly merge * No USEMEM() call, because during final on-the-fly merge accounting
* accounting is based on tape-private state. ("Overflow" * is based on tape-private state. ("Overflow" allocations are
* allocations are detected as an indication that a new round * detected as an indication that a new round or preloading is
* or preloading is required. Preloading marks existing * required. Preloading marks existing contents of tape's batch buffer
* contents of tape's batch buffer for reuse.) * for reuse.)
*/ */
return mergebatchalloc(state, tapenum, tuplen); return mergebatchalloc(state, tapenum, tuplen);
} }
@ -3993,8 +3991,7 @@ copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
* ensure a consistent representation (current tuple was just * ensure a consistent representation (current tuple was just
* handled). It does not matter if some dumped tuples are already * handled). It does not matter if some dumped tuples are already
* sorted on tape, since serialized tuples lack abbreviated keys * sorted on tape, since serialized tuples lack abbreviated keys
* (TSS_BUILDRUNS state prevents control reaching here in any * (TSS_BUILDRUNS state prevents control reaching here in any case).
* case).
*/ */
for (i = 0; i < state->memtupcount; i++) for (i = 0; i < state->memtupcount; i++)
{ {
@ -4238,8 +4235,7 @@ copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
* ensure a consistent representation (current tuple was just * ensure a consistent representation (current tuple was just
* handled). It does not matter if some dumped tuples are already * handled). It does not matter if some dumped tuples are already
* sorted on tape, since serialized tuples lack abbreviated keys * sorted on tape, since serialized tuples lack abbreviated keys
* (TSS_BUILDRUNS state prevents control reaching here in any * (TSS_BUILDRUNS state prevents control reaching here in any case).
* case).
*/ */
for (i = 0; i < state->memtupcount; i++) for (i = 0; i < state->memtupcount; i++)
{ {
@ -4544,8 +4540,7 @@ copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
* ensure a consistent representation (current tuple was just * ensure a consistent representation (current tuple was just
* handled). It does not matter if some dumped tuples are already * handled). It does not matter if some dumped tuples are already
* sorted on tape, since serialized tuples lack abbreviated keys * sorted on tape, since serialized tuples lack abbreviated keys
* (TSS_BUILDRUNS state prevents control reaching here in any * (TSS_BUILDRUNS state prevents control reaching here in any case).
* case).
*/ */
for (i = 0; i < state->memtupcount; i++) for (i = 0; i < state->memtupcount; i++)
{ {

View File

@ -80,9 +80,8 @@ typedef struct OldSnapshotControlData
*/ */
slock_t mutex_current; /* protect current_timestamp */ slock_t mutex_current; /* protect current_timestamp */
int64 current_timestamp; /* latest snapshot timestamp */ int64 current_timestamp; /* latest snapshot timestamp */
slock_t mutex_latest_xmin; /* protect latest_xmin slock_t mutex_latest_xmin; /* protect latest_xmin and
* and next_map_update * next_map_update */
*/
TransactionId latest_xmin; /* latest snapshot xmin */ TransactionId latest_xmin; /* latest snapshot xmin */
int64 next_map_update; /* latest snapshot valid up to */ int64 next_map_update; /* latest snapshot valid up to */
slock_t mutex_threshold; /* protect threshold fields */ slock_t mutex_threshold; /* protect threshold fields */
@ -709,8 +708,8 @@ UpdateActiveSnapshotCommandId(void)
/* /*
* Don't allow modification of the active snapshot during parallel * Don't allow modification of the active snapshot during parallel
* operation. We share the snapshot to worker backends at the beginning of * operation. We share the snapshot to worker backends at the beginning
* parallel operation, so any change to the snapshot can lead to * of parallel operation, so any change to the snapshot can lead to
* inconsistencies. We have other defenses against * inconsistencies. We have other defenses against
* CommandCounterIncrement, but there are a few places that call this * CommandCounterIncrement, but there are a few places that call this
* directly, so we put an additional guard here. * directly, so we put an additional guard here.
@ -1715,8 +1714,8 @@ MaintainOldSnapshotTimeMapping(int64 whenTaken, TransactionId xmin)
ts = AlignTimestampToMinuteBoundary(whenTaken); ts = AlignTimestampToMinuteBoundary(whenTaken);
/* /*
* Keep track of the latest xmin seen by any process. Update mapping * Keep track of the latest xmin seen by any process. Update mapping with
* with a new value when we have crossed a bucket boundary. * a new value when we have crossed a bucket boundary.
*/ */
SpinLockAcquire(&oldSnapshotControl->mutex_latest_xmin); SpinLockAcquire(&oldSnapshotControl->mutex_latest_xmin);
latest_xmin = oldSnapshotControl->latest_xmin; latest_xmin = oldSnapshotControl->latest_xmin;

Some files were not shown because too many files have changed in this diff Show More