pgindent run for 9.6

This commit is contained in:
Robert Haas 2016-06-09 18:02:36 -04:00
parent 9164deea2f
commit 4bc424b968
252 changed files with 2670 additions and 2558 deletions

View File

@ -209,8 +209,8 @@ static void
explain_ExecutorStart(QueryDesc *queryDesc, int eflags) explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
{ {
/* /*
* For rate sampling, randomly choose top-level statement. Either * For rate sampling, randomly choose top-level statement. Either all
* all nested statements will be explained or none will. * nested statements will be explained or none will.
*/ */
if (auto_explain_log_min_duration >= 0 && nesting_level == 0) if (auto_explain_log_min_duration >= 0 && nesting_level == 0)
current_query_sampled = (random() < auto_explain_sample_rate * current_query_sampled = (random() < auto_explain_sample_rate *

View File

@ -33,8 +33,8 @@ PG_MODULE_MAGIC;
typedef struct typedef struct
{ {
BloomState blstate; /* bloom index state */ BloomState blstate; /* bloom index state */
MemoryContext tmpCtx; /* temporary memory context reset after MemoryContext tmpCtx; /* temporary memory context reset after each
* each tuple */ * tuple */
char data[BLCKSZ]; /* cached page */ char data[BLCKSZ]; /* cached page */
int64 count; /* number of tuples in cached page */ int64 count; /* number of tuples in cached page */
} BloomBuildState; } BloomBuildState;
@ -140,8 +140,8 @@ blbuild(Relation heap, Relation index, IndexInfo *indexInfo)
bloomBuildCallback, (void *) &buildstate); bloomBuildCallback, (void *) &buildstate);
/* /*
* There are could be some items in cached page. Flush this page * There are could be some items in cached page. Flush this page if
* if needed. * needed.
*/ */
if (buildstate.count > 0) if (buildstate.count > 0)
flushCachedPage(index, &buildstate); flushCachedPage(index, &buildstate);

View File

@ -33,10 +33,9 @@ typedef struct BloomPageOpaqueData
{ {
OffsetNumber maxoff; /* number of index tuples on page */ OffsetNumber maxoff; /* number of index tuples on page */
uint16 flags; /* see bit definitions below */ uint16 flags; /* see bit definitions below */
uint16 unused; /* placeholder to force maxaligning of size uint16 unused; /* placeholder to force maxaligning of size of
* of BloomPageOpaqueData and to place * BloomPageOpaqueData and to place
* bloom_page_id exactly at the end of page * bloom_page_id exactly at the end of page */
*/
uint16 bloom_page_id; /* for identification of BLOOM indexes */ uint16 bloom_page_id; /* for identification of BLOOM indexes */
} BloomPageOpaqueData; } BloomPageOpaqueData;
@ -102,8 +101,8 @@ typedef struct BloomOptions
{ {
int32 vl_len_; /* varlena header (do not touch directly!) */ int32 vl_len_; /* varlena header (do not touch directly!) */
int bloomLength; /* length of signature in words (not bits!) */ int bloomLength; /* length of signature in words (not bits!) */
int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for each int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for
* index key */ * each index key */
} BloomOptions; } BloomOptions;
/* /*
@ -176,14 +175,14 @@ typedef BloomScanOpaqueData *BloomScanOpaque;
/* blutils.c */ /* blutils.c */
extern void _PG_init(void); extern void _PG_init(void);
extern Datum blhandler(PG_FUNCTION_ARGS); extern Datum blhandler(PG_FUNCTION_ARGS);
extern void initBloomState(BloomState * state, Relation index); extern void initBloomState(BloomState *state, Relation index);
extern void BloomFillMetapage(Relation index, Page metaPage); extern void BloomFillMetapage(Relation index, Page metaPage);
extern void BloomInitMetapage(Relation index); extern void BloomInitMetapage(Relation index);
extern void BloomInitPage(Page page, uint16 flags); extern void BloomInitPage(Page page, uint16 flags);
extern Buffer BloomNewBuffer(Relation index); extern Buffer BloomNewBuffer(Relation index);
extern void signValue(BloomState * state, BloomSignatureWord * sign, Datum value, int attno); extern void signValue(BloomState *state, BloomSignatureWord *sign, Datum value, int attno);
extern BloomTuple *BloomFormTuple(BloomState * state, ItemPointer iptr, Datum *values, bool *isnull); extern BloomTuple *BloomFormTuple(BloomState *state, ItemPointer iptr, Datum *values, bool *isnull);
extern bool BloomPageAddItem(BloomState * state, Page page, BloomTuple * tuple); extern bool BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple);
/* blvalidate.c */ /* blvalidate.c */
extern bool blvalidate(Oid opclassoid); extern bool blvalidate(Oid opclassoid);

View File

@ -37,6 +37,7 @@ PG_FUNCTION_INFO_V1(blhandler);
/* Kind of relation options for bloom index */ /* Kind of relation options for bloom index */
static relopt_kind bl_relopt_kind; static relopt_kind bl_relopt_kind;
/* parse table for fillRelOptions */ /* parse table for fillRelOptions */
static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1]; static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1];
@ -215,7 +216,9 @@ myRand(void)
* October 1988, p. 1195. * October 1988, p. 1195.
*---------- *----------
*/ */
int32 hi, lo, x; int32 hi,
lo,
x;
/* Must be in [1, 0x7ffffffe] range at this point. */ /* Must be in [1, 0x7ffffffe] range at this point. */
hi = next / 127773; hi = next / 127773;

View File

@ -109,8 +109,8 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
OffsetNumberNext(BloomPageGetMaxOffset(page)))); OffsetNumberNext(BloomPageGetMaxOffset(page))));
/* /*
* Add page to notFullPage list if we will not mark page as deleted and * Add page to notFullPage list if we will not mark page as deleted
* there is a free space on it * and there is a free space on it
*/ */
if (BloomPageGetMaxOffset(page) != 0 && if (BloomPageGetMaxOffset(page) != 0 &&
BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple && BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple &&

View File

@ -444,9 +444,9 @@ ean2ISBN(char *isn)
unsigned check; unsigned check;
/* /*
* The number should come in this format: 978-0-000-00000-0 * The number should come in this format: 978-0-000-00000-0 or may be an
* or may be an ISBN-13 number, 979-..., which does not have a short * ISBN-13 number, 979-..., which does not have a short representation. Do
* representation. Do the short output version if possible. * the short output version if possible.
*/ */
if (strncmp("978-", isn, 4) == 0) if (strncmp("978-", isn, 4) == 0)
{ {

View File

@ -318,10 +318,10 @@ tuple_data_split_internal(Oid relid, char *tupdata,
is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits); is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits);
/* /*
* Tuple header can specify less attributes than tuple descriptor * Tuple header can specify less attributes than tuple descriptor as
* as ALTER TABLE ADD COLUMN without DEFAULT keyword does not * ALTER TABLE ADD COLUMN without DEFAULT keyword does not actually
* actually change tuples in pages, so attributes with numbers greater * change tuples in pages, so attributes with numbers greater than
* than (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL. * (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
*/ */
if (i >= (t_infomask2 & HEAP_NATTS_MASK)) if (i >= (t_infomask2 & HEAP_NATTS_MASK))
is_null = true; is_null = true;
@ -334,6 +334,7 @@ tuple_data_split_internal(Oid relid, char *tupdata,
{ {
off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1, off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1,
tupdata + off); tupdata + off);
/* /*
* As VARSIZE_ANY throws an exception if it can't properly * As VARSIZE_ANY throws an exception if it can't properly
* detect the type of external storage in macros VARTAG_SIZE, * detect the type of external storage in macros VARTAG_SIZE,

View File

@ -293,7 +293,8 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
} }
/* /*
* See comment in gin_trgm_consistent() about * upper bound formula * See comment in gin_trgm_consistent() about * upper bound
* formula
*/ */
res = (nkeys == 0) res = (nkeys == 0)
? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit) ? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit)
@ -330,9 +331,9 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
else else
{ {
/* /*
* As trigramsMatchGraph implements a monotonic boolean function, * As trigramsMatchGraph implements a monotonic boolean
* promoting all GIN_MAYBE keys to GIN_TRUE will give a * function, promoting all GIN_MAYBE keys to GIN_TRUE will
* conservative result. * give a conservative result.
*/ */
boolcheck = (bool *) palloc(sizeof(bool) * nkeys); boolcheck = (bool *) palloc(sizeof(bool) * nkeys);
for (i = 0; i < nkeys; i++) for (i = 0; i < nkeys; i++)

View File

@ -296,6 +296,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
if (GIST_LEAF(entry)) if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */ { /* all leafs contains orig trgm */
/* /*
* Prevent gcc optimizing the tmpsml variable using volatile * Prevent gcc optimizing the tmpsml variable using volatile
* keyword. Otherwise comparison of nlimit and tmpsml may give * keyword. Otherwise comparison of nlimit and tmpsml may give
@ -476,12 +477,14 @@ gtrgm_distance(PG_FUNCTION_ARGS)
*recheck = strategy == WordDistanceStrategyNumber; *recheck = strategy == WordDistanceStrategyNumber;
if (GIST_LEAF(entry)) if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */ { /* all leafs contains orig trgm */
/* /*
* Prevent gcc optimizing the sml variable using volatile * Prevent gcc optimizing the sml variable using volatile
* keyword. Otherwise res can differ from the * keyword. Otherwise res can differ from the
* word_similarity_dist_op() function. * word_similarity_dist_op() function.
*/ */
float4 volatile sml = cnt_sml(qtrg, key, *recheck); float4 volatile sml = cnt_sml(qtrg, key, *recheck);
res = 1.0 - sml; res = 1.0 - sml;
} }
else if (ISALLTRUE(key)) else if (ISALLTRUE(key))

View File

@ -362,7 +362,8 @@ static pos_trgm *
make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2) make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
{ {
pos_trgm *result; pos_trgm *result;
int i, len = len1 + len2; int i,
len = len1 + len2;
result = (pos_trgm *) palloc(sizeof(pos_trgm) * len); result = (pos_trgm *) palloc(sizeof(pos_trgm) * len);
@ -387,8 +388,8 @@ make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
static int static int
comp_ptrgm(const void *v1, const void *v2) comp_ptrgm(const void *v1, const void *v2)
{ {
const pos_trgm *p1 = (const pos_trgm *)v1; const pos_trgm *p1 = (const pos_trgm *) v1;
const pos_trgm *p2 = (const pos_trgm *)v2; const pos_trgm *p2 = (const pos_trgm *) v2;
int cmp; int cmp;
cmp = CMPTRGM(p1->trg, p2->trg); cmp = CMPTRGM(p1->trg, p2->trg);
@ -488,10 +489,11 @@ iterate_word_similarity(int *trg2indexes,
lower = tmp_lower; lower = tmp_lower;
count = tmp_count; count = tmp_count;
} }
/* /*
* if we only check that word similarity is greater than * if we only check that word similarity is greater than
* pg_trgm.word_similarity_threshold we do not need to calculate * pg_trgm.word_similarity_threshold we do not need to
* a maximum similarity. * calculate a maximum similarity.
*/ */
if (check_only && smlr_cur >= word_similarity_threshold) if (check_only && smlr_cur >= word_similarity_threshold)
break; break;
@ -506,6 +508,7 @@ iterate_word_similarity(int *trg2indexes,
} }
smlr_max = Max(smlr_max, smlr_cur); smlr_max = Max(smlr_max, smlr_cur);
/* /*
* if we only check that word similarity is greater than * if we only check that word similarity is greater than
* pg_trgm.word_similarity_threshold we do not need to calculate a * pg_trgm.word_similarity_threshold we do not need to calculate a
@ -517,6 +520,7 @@ iterate_word_similarity(int *trg2indexes,
for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++) for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++)
{ {
int tmp_trgindex; int tmp_trgindex;
tmp_trgindex = trg2indexes[tmp_lower]; tmp_trgindex = trg2indexes[tmp_lower];
if (lastpos[tmp_trgindex] == tmp_lower) if (lastpos[tmp_trgindex] == tmp_lower)
lastpos[tmp_trgindex] = -1; lastpos[tmp_trgindex] = -1;
@ -568,8 +572,8 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
protect_out_of_mem(slen1 + slen2); protect_out_of_mem(slen1 + slen2);
/* Make positional trigrams */ /* Make positional trigrams */
trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) * 3); trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) *3);
trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) * 3); trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) *3);
len1 = generate_trgm_only(trg1, str1, slen1); len1 = generate_trgm_only(trg1, str1, slen1);
len2 = generate_trgm_only(trg2, str2, slen2); len2 = generate_trgm_only(trg2, str2, slen2);
@ -595,6 +599,7 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
if (i > 0) if (i > 0)
{ {
int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg); int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg);
if (cmp != 0) if (cmp != 0)
{ {
if (found[j]) if (found[j])

View File

@ -301,7 +301,7 @@ collect_visibility_data(Oid relid, bool include_pd)
rel = relation_open(relid, AccessShareLock); rel = relation_open(relid, AccessShareLock);
nblocks = RelationGetNumberOfBlocks(rel); nblocks = RelationGetNumberOfBlocks(rel);
info = palloc0(offsetof(vbits, bits) + nblocks); info = palloc0(offsetof(vbits, bits) +nblocks);
info->next = 0; info->next = 0;
info->count = nblocks; info->count = nblocks;
@ -320,8 +320,8 @@ collect_visibility_data(Oid relid, bool include_pd)
info->bits[blkno] |= (1 << 1); info->bits[blkno] |= (1 << 1);
/* /*
* Page-level data requires reading every block, so only get it if * Page-level data requires reading every block, so only get it if the
* the caller needs it. Use a buffer access strategy, too, to prevent * caller needs it. Use a buffer access strategy, too, to prevent
* cache-trashing. * cache-trashing.
*/ */
if (include_pd) if (include_pd)

View File

@ -675,9 +675,9 @@ pgfdw_xact_callback(XactEvent event, void *arg)
/* /*
* If a command has been submitted to the remote server by * If a command has been submitted to the remote server by
* using an asynchronous execution function, the command * using an asynchronous execution function, the command
* might not have yet completed. Check to see if a command * might not have yet completed. Check to see if a
* is still being processed by the remote server, and if so, * command is still being processed by the remote server,
* request cancellation of the command. * and if so, request cancellation of the command.
*/ */
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE) if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{ {
@ -798,11 +798,11 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
entry->have_error = true; entry->have_error = true;
/* /*
* If a command has been submitted to the remote server by using an * If a command has been submitted to the remote server by using
* asynchronous execution function, the command might not have yet * an asynchronous execution function, the command might not have
* completed. Check to see if a command is still being processed by * yet completed. Check to see if a command is still being
* the remote server, and if so, request cancellation of the * processed by the remote server, and if so, request cancellation
* command. * of the command.
*/ */
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE) if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{ {

View File

@ -1583,8 +1583,8 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
/* /*
* All other system attributes are fetched as 0, except for table OID, * All other system attributes are fetched as 0, except for table OID,
* which is fetched as the local table OID. However, we must be * which is fetched as the local table OID. However, we must be
* careful; the table could be beneath an outer join, in which case * careful; the table could be beneath an outer join, in which case it
* it must go to NULL whenever the rest of the row does. * must go to NULL whenever the rest of the row does.
*/ */
Oid fetchval = 0; Oid fetchval = 0;
@ -1633,10 +1633,10 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
0 - FirstLowInvalidHeapAttributeNumber); 0 - FirstLowInvalidHeapAttributeNumber);
/* /*
* In case the whole-row reference is under an outer join then it has to * In case the whole-row reference is under an outer join then it has
* go NULL whenver the rest of the row goes NULL. Deparsing a join query * to go NULL whenver the rest of the row goes NULL. Deparsing a join
* would always involve multiple relations, thus qualify_col would be * query would always involve multiple relations, thus qualify_col
* true. * would be true.
*/ */
if (qualify_col) if (qualify_col)
{ {
@ -1652,7 +1652,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
/* Complete the CASE WHEN statement started above. */ /* Complete the CASE WHEN statement started above. */
if (qualify_col) if (qualify_col)
appendStringInfo(buf," END"); appendStringInfo(buf, " END");
heap_close(rel, NoLock); heap_close(rel, NoLock);
bms_free(attrs_used); bms_free(attrs_used);

View File

@ -135,7 +135,7 @@ postgres_fdw_validator(PG_FUNCTION_ARGS)
{ {
int fetch_size; int fetch_size;
fetch_size = strtol(defGetString(def), NULL,10); fetch_size = strtol(defGetString(def), NULL, 10);
if (fetch_size <= 0) if (fetch_size <= 0)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR), (errcode(ERRCODE_SYNTAX_ERROR),

View File

@ -4063,19 +4063,20 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
/* /*
* Pull the other remote conditions from the joining relations into join * Pull the other remote conditions from the joining relations into join
* clauses or other remote clauses (remote_conds) of this relation wherever * clauses or other remote clauses (remote_conds) of this relation
* possible. This avoids building subqueries at every join step, which is * wherever possible. This avoids building subqueries at every join step,
* not currently supported by the deparser logic. * which is not currently supported by the deparser logic.
* *
* For an inner join, clauses from both the relations are added to the * For an inner join, clauses from both the relations are added to the
* other remote clauses. For LEFT and RIGHT OUTER join, the clauses from the * other remote clauses. For LEFT and RIGHT OUTER join, the clauses from
* outer side are added to remote_conds since those can be evaluated after * the outer side are added to remote_conds since those can be evaluated
* the join is evaluated. The clauses from inner side are added to the * after the join is evaluated. The clauses from inner side are added to
* joinclauses, since they need to evaluated while constructing the join. * the joinclauses, since they need to evaluated while constructing the
* join.
* *
* For a FULL OUTER JOIN, the other clauses from either relation can not be * For a FULL OUTER JOIN, the other clauses from either relation can not
* added to the joinclauses or remote_conds, since each relation acts as an * be added to the joinclauses or remote_conds, since each relation acts
* outer relation for the other. Consider such full outer join as * as an outer relation for the other. Consider such full outer join as
* unshippable because of the reasons mentioned above in this comment. * unshippable because of the reasons mentioned above in this comment.
* *
* The joining sides can not have local conditions, thus no need to test * The joining sides can not have local conditions, thus no need to test

View File

@ -101,7 +101,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs table pages only to this percentage", "Packs table pages only to this percentage",
RELOPT_KIND_HEAP, RELOPT_KIND_HEAP,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100 HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100
}, },
@ -110,7 +111,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs btree index pages only to this percentage", "Packs btree index pages only to this percentage",
RELOPT_KIND_BTREE, RELOPT_KIND_BTREE,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100 BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100
}, },
@ -119,7 +121,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs hash index pages only to this percentage", "Packs hash index pages only to this percentage",
RELOPT_KIND_HASH, RELOPT_KIND_HASH,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100 HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100
}, },
@ -128,7 +131,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs gist index pages only to this percentage", "Packs gist index pages only to this percentage",
RELOPT_KIND_GIST, RELOPT_KIND_GIST,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100 GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100
}, },
@ -137,7 +141,8 @@ static relopt_int intRelOpts[] =
"fillfactor", "fillfactor",
"Packs spgist index pages only to this percentage", "Packs spgist index pages only to this percentage",
RELOPT_KIND_SPGIST, RELOPT_KIND_SPGIST,
ShareUpdateExclusiveLock /* since it applies only to later inserts */ ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
}, },
SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100 SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100
}, },

View File

@ -745,18 +745,17 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
bool inVacuum = (stats == NULL); bool inVacuum = (stats == NULL);
/* /*
* We would like to prevent concurrent cleanup process. For * We would like to prevent concurrent cleanup process. For that we will
* that we will lock metapage in exclusive mode using LockPage() * lock metapage in exclusive mode using LockPage() call. Nobody other
* call. Nobody other will use that lock for metapage, so * will use that lock for metapage, so we keep possibility of concurrent
* we keep possibility of concurrent insertion into pending list * insertion into pending list
*/ */
if (inVacuum) if (inVacuum)
{ {
/* /*
* We are called from [auto]vacuum/analyze or * We are called from [auto]vacuum/analyze or gin_clean_pending_list()
* gin_clean_pending_list() and we would like to wait * and we would like to wait concurrent cleanup to finish.
* concurrent cleanup to finish.
*/ */
LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock); LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
workMemory = workMemory =
@ -766,9 +765,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
else else
{ {
/* /*
* We are called from regular insert and if we see * We are called from regular insert and if we see concurrent cleanup
* concurrent cleanup just exit in hope that concurrent * just exit in hope that concurrent process will clean up pending
* process will clean up pending list. * list.
*/ */
if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock)) if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock))
return; return;
@ -829,9 +828,10 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
Assert(!GinPageIsDeleted(page)); Assert(!GinPageIsDeleted(page));
/* /*
* Are we walk through the page which as we remember was a tail when we * Are we walk through the page which as we remember was a tail when
* start our cleanup? But if caller asks us to clean up whole pending * we start our cleanup? But if caller asks us to clean up whole
* list then ignore old tail, we will work until list becomes empty. * pending list then ignore old tail, we will work until list becomes
* empty.
*/ */
if (blkno == blknoFinish && full_clean == false) if (blkno == blknoFinish && full_clean == false)
cleanupFinish = true; cleanupFinish = true;
@ -917,8 +917,8 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
* locking */ * locking */
/* /*
* remove read pages from pending list, at this point all * remove read pages from pending list, at this point all content
* content of read pages is in regular structure * of read pages is in regular structure
*/ */
shiftList(index, metabuffer, blkno, fill_fsm, stats); shiftList(index, metabuffer, blkno, fill_fsm, stats);
@ -961,9 +961,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
ReleaseBuffer(metabuffer); ReleaseBuffer(metabuffer);
/* /*
* As pending list pages can have a high churn rate, it is * As pending list pages can have a high churn rate, it is desirable to
* desirable to recycle them immediately to the FreeSpace Map when * recycle them immediately to the FreeSpace Map when ordinary backends
* ordinary backends clean the list. * clean the list.
*/ */
if (fsm_vac && fill_fsm) if (fsm_vac && fill_fsm)
IndexFreeSpaceMapVacuum(index); IndexFreeSpaceMapVacuum(index);

View File

@ -281,7 +281,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values,
&htup->t_self); &htup->t_self);
/* If we've maxed out our available memory, dump everything to the index */ /* If we've maxed out our available memory, dump everything to the index */
if (buildstate->accum.allocatedMemory >= (Size)maintenance_work_mem * 1024L) if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L)
{ {
ItemPointerData *list; ItemPointerData *list;
Datum key; Datum key;

View File

@ -540,8 +540,10 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
{ {
/* Yes, so initialize stats to zeroes */ /* Yes, so initialize stats to zeroes */
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
/* /*
* and cleanup any pending inserts */ * and cleanup any pending inserts
*/
ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(), ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),
false, stats); false, stats);
} }

View File

@ -1499,7 +1499,8 @@ gistvacuumpage(Relation rel, Page page, Buffer buffer)
{ {
OffsetNumber deletable[MaxIndexTuplesPerPage]; OffsetNumber deletable[MaxIndexTuplesPerPage];
int ndeletable = 0; int ndeletable = 0;
OffsetNumber offnum, maxoff; OffsetNumber offnum,
maxoff;
Assert(GistPageIsLeaf(page)); Assert(GistPageIsLeaf(page));

View File

@ -57,10 +57,11 @@ gistkillitems(IndexScanDesc scan)
page = BufferGetPage(buffer); page = BufferGetPage(buffer);
/* /*
* If page LSN differs it means that the page was modified since the last read. * If page LSN differs it means that the page was modified since the last
* killedItems could be not valid so LP_DEAD hints applying is not safe. * read. killedItems could be not valid so LP_DEAD hints applying is not
* safe.
*/ */
if(PageGetLSN(page) != so->curPageLSN) if (PageGetLSN(page) != so->curPageLSN)
{ {
UnlockReleaseBuffer(buffer); UnlockReleaseBuffer(buffer);
so->numKilled = 0; /* reset counter */ so->numKilled = 0; /* reset counter */
@ -70,8 +71,8 @@ gistkillitems(IndexScanDesc scan)
Assert(GistPageIsLeaf(page)); Assert(GistPageIsLeaf(page));
/* /*
* Mark all killedItems as dead. We need no additional recheck, * Mark all killedItems as dead. We need no additional recheck, because,
* because, if page was modified, pageLSN must have changed. * if page was modified, pageLSN must have changed.
*/ */
for (i = 0; i < so->numKilled; i++) for (i = 0; i < so->numKilled; i++)
{ {
@ -400,10 +401,11 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
* If the scan specifies not to return killed tuples, then we treat a * If the scan specifies not to return killed tuples, then we treat a
* killed tuple as not passing the qual. * killed tuple as not passing the qual.
*/ */
if(scan->ignore_killed_tuples && ItemIdIsDead(iid)) if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
continue; continue;
it = (IndexTuple) PageGetItem(page, iid); it = (IndexTuple) PageGetItem(page, iid);
/* /*
* Must call gistindex_keytest in tempCxt, and clean up any leftover * Must call gistindex_keytest in tempCxt, and clean up any leftover
* junk afterward. * junk afterward.

View File

@ -230,8 +230,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
ScanKey skey = scan->keyData + i; ScanKey skey = scan->keyData + i;
/* /*
* Copy consistent support function to ScanKey structure * Copy consistent support function to ScanKey structure instead
* instead of function implementing filtering operator. * of function implementing filtering operator.
*/ */
fmgr_info_copy(&(skey->sk_func), fmgr_info_copy(&(skey->sk_func),
&(so->giststate->consistentFn[skey->sk_attno - 1]), &(so->giststate->consistentFn[skey->sk_attno - 1]),
@ -303,8 +303,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid); so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid);
/* /*
* Copy distance support function to ScanKey structure * Copy distance support function to ScanKey structure instead of
* instead of function implementing ordering operator. * function implementing ordering operator.
*/ */
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt); fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);

View File

@ -191,10 +191,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
return; return;
/* /*
* It might seem like multiplying the number of lock waiters by as much * It might seem like multiplying the number of lock waiters by as much as
* as 20 is too aggressive, but benchmarking revealed that smaller numbers * 20 is too aggressive, but benchmarking revealed that smaller numbers
* were insufficient. 512 is just an arbitrary cap to prevent pathological * were insufficient. 512 is just an arbitrary cap to prevent
* results. * pathological results.
*/ */
extraBlocks = Min(512, lockWaiters * 20); extraBlocks = Min(512, lockWaiters * 20);
@ -225,10 +225,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
} }
/* /*
* Updating the upper levels of the free space map is too expensive * Updating the upper levels of the free space map is too expensive to do
* to do for every block, but it's worth doing once at the end to make * for every block, but it's worth doing once at the end to make sure that
* sure that subsequent insertion activity sees all of those nifty free * subsequent insertion activity sees all of those nifty free pages we
* pages we just inserted. * just inserted.
* *
* Note that we're using the freespace value that was reported for the * Note that we're using the freespace value that was reported for the
* last block we added as if it were the freespace value for every block * last block we added as if it were the freespace value for every block
@ -547,8 +547,8 @@ loop:
} }
/* /*
* In addition to whatever extension we performed above, we always add * In addition to whatever extension we performed above, we always add at
* at least one block to satisfy our own request. * least one block to satisfy our own request.
* *
* XXX This does an lseek - rather expensive - but at the moment it is the * XXX This does an lseek - rather expensive - but at the moment it is the
* only way to accurately determine how many blocks are in a relation. Is * only way to accurately determine how many blocks are in a relation. Is

View File

@ -291,7 +291,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
elog(ERROR, "wrong VM buffer passed to visibilitymap_set"); elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
page = BufferGetPage(vmBuf); page = BufferGetPage(vmBuf);
map = (uint8 *)PageGetContents(page); map = (uint8 *) PageGetContents(page);
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE); LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS)) if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))

View File

@ -395,7 +395,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* Check for a conflict-in as we would if we were going to * Check for a conflict-in as we would if we were going to
* write to this page. We aren't actually going to write, * write to this page. We aren't actually going to write,
* but we want a chance to report SSI conflicts that would * but we want a chance to report SSI conflicts that would
* otherwise be masked by this unique constraint violation. * otherwise be masked by this unique constraint
* violation.
*/ */
CheckForSerializableConflictIn(rel, NULL, buf); CheckForSerializableConflictIn(rel, NULL, buf);

View File

@ -813,8 +813,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/* /*
* Check to see if we need to issue one final WAL record for this index, * Check to see if we need to issue one final WAL record for this index,
* which may be needed for correctness on a hot standby node when * which may be needed for correctness on a hot standby node when non-MVCC
* non-MVCC index scans could take place. * index scans could take place.
* *
* If the WAL is replayed in hot standby, the replay process needs to get * If the WAL is replayed in hot standby, the replay process needs to get
* cleanup locks on all index leaf pages, just as we've been doing here. * cleanup locks on all index leaf pages, just as we've been doing here.
@ -1025,13 +1025,13 @@ restart:
if (ndeletable > 0) if (ndeletable > 0)
{ {
/* /*
* Notice that the issued XLOG_BTREE_VACUUM WAL record includes all * Notice that the issued XLOG_BTREE_VACUUM WAL record includes
* information to the replay code to allow it to get a cleanup lock * all information to the replay code to allow it to get a cleanup
* on all pages between the previous lastBlockVacuumed and this page. * lock on all pages between the previous lastBlockVacuumed and
* This ensures that WAL replay locks all leaf pages at some point, * this page. This ensures that WAL replay locks all leaf pages at
* which is important should non-MVCC scans be requested. * some point, which is important should non-MVCC scans be
* This is currently unused on standby, but we record it anyway, so * requested. This is currently unused on standby, but we record
* that the WAL contains the required information. * it anyway, so that the WAL contains the required information.
* *
* Since we can visit leaf pages out-of-order when recursing, * Since we can visit leaf pages out-of-order when recursing,
* replay might end up locking such pages an extra time, but it * replay might end up locking such pages an extra time, but it

View File

@ -392,15 +392,15 @@ btree_xlog_vacuum(XLogReaderState *record)
xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record); xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/* /*
* This section of code is thought to be no longer needed, after * This section of code is thought to be no longer needed, after analysis
* analysis of the calling paths. It is retained to allow the code * of the calling paths. It is retained to allow the code to be reinstated
* to be reinstated if a flaw is revealed in that thinking. * if a flaw is revealed in that thinking.
* *
* If we are running non-MVCC scans using this index we need to do some * If we are running non-MVCC scans using this index we need to do some
* additional work to ensure correctness, which is known as a "pin scan" * additional work to ensure correctness, which is known as a "pin scan"
* described in more detail in next paragraphs. We used to do the extra * described in more detail in next paragraphs. We used to do the extra
* work in all cases, whereas we now avoid that work in most cases. * work in all cases, whereas we now avoid that work in most cases. If
* If lastBlockVacuumed is set to InvalidBlockNumber then we skip the * lastBlockVacuumed is set to InvalidBlockNumber then we skip the
* additional work required for the pin scan. * additional work required for the pin scan.
* *
* Avoiding this extra work is important since it requires us to touch * Avoiding this extra work is important since it requires us to touch

View File

@ -153,9 +153,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
* No-op if the module is not active. * No-op if the module is not active.
* *
* An unlocked read here is fine, because in a standby (the only place * An unlocked read here is fine, because in a standby (the only place
* where the flag can change in flight) this routine is only called by * where the flag can change in flight) this routine is only called by the
* the recovery process, which is also the only process which can change * recovery process, which is also the only process which can change the
* the flag. * flag.
*/ */
if (!commitTsShared->commitTsActive) if (!commitTsShared->commitTsActive)
return; return;
@ -767,8 +767,8 @@ ExtendCommitTs(TransactionId newestXact)
int pageno; int pageno;
/* /*
* Nothing to do if module not enabled. Note we do an unlocked read of the * Nothing to do if module not enabled. Note we do an unlocked read of
* flag here, which is okay because this routine is only called from * the flag here, which is okay because this routine is only called from
* GetNewTransactionId, which is never called in a standby. * GetNewTransactionId, which is never called in a standby.
*/ */
Assert(!InRecovery); Assert(!InRecovery);

View File

@ -52,9 +52,8 @@ typedef struct
Buffer buffer; /* registered buffer */ Buffer buffer; /* registered buffer */
int flags; /* flags for this buffer */ int flags; /* flags for this buffer */
int deltaLen; /* space consumed in delta field */ int deltaLen; /* space consumed in delta field */
char *image; /* copy of page image for modification, char *image; /* copy of page image for modification, do not
* do not do it in-place to have aligned * do it in-place to have aligned memory chunk */
* memory chunk */
char delta[MAX_DELTA_SIZE]; /* delta between page images */ char delta[MAX_DELTA_SIZE]; /* delta between page images */
} PageData; } PageData;

View File

@ -988,8 +988,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
char *oldest_datname = get_database_name(oldest_datoid); char *oldest_datname = get_database_name(oldest_datoid);
/* /*
* Immediately kick autovacuum into action as we're already * Immediately kick autovacuum into action as we're already in
* in ERROR territory. * ERROR territory.
*/ */
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER); SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);

View File

@ -134,9 +134,9 @@ CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
nworkers = 0; nworkers = 0;
/* /*
* If we are running under serializable isolation, we can't use * If we are running under serializable isolation, we can't use parallel
* parallel workers, at least not until somebody enhances that mechanism * workers, at least not until somebody enhances that mechanism to be
* to be parallel-aware. * parallel-aware.
*/ */
if (IsolationIsSerializable()) if (IsolationIsSerializable())
nworkers = 0; nworkers = 0;
@ -646,9 +646,9 @@ DestroyParallelContext(ParallelContext *pcxt)
} }
/* /*
* We can't finish transaction commit or abort until all of the * We can't finish transaction commit or abort until all of the workers
* workers have exited. This means, in particular, that we can't respond * have exited. This means, in particular, that we can't respond to
* to interrupts at this stage. * interrupts at this stage.
*/ */
HOLD_INTERRUPTS(); HOLD_INTERRUPTS();
WaitForParallelWorkersToExit(pcxt); WaitForParallelWorkersToExit(pcxt);
@ -958,9 +958,9 @@ ParallelWorkerMain(Datum main_arg)
*/ */
/* /*
* Join locking group. We must do this before anything that could try * Join locking group. We must do this before anything that could try to
* to acquire a heavyweight lock, because any heavyweight locks acquired * acquire a heavyweight lock, because any heavyweight locks acquired to
* to this point could block either directly against the parallel group * this point could block either directly against the parallel group
* leader or against some process which in turn waits for a lock that * leader or against some process which in turn waits for a lock that
* conflicts with the parallel group leader, causing an undetected * conflicts with the parallel group leader, causing an undetected
* deadlock. (If we can't join the lock group, the leader has gone away, * deadlock. (If we can't join the lock group, the leader has gone away,

View File

@ -257,7 +257,7 @@ StartupSUBTRANS(TransactionId oldestActiveXID)
startPage++; startPage++;
/* must account for wraparound */ /* must account for wraparound */
if (startPage > TransactionIdToPage(MaxTransactionId)) if (startPage > TransactionIdToPage(MaxTransactionId))
startPage=0; startPage = 0;
} }
(void) ZeroSUBTRANSPage(startPage); (void) ZeroSUBTRANSPage(startPage);

View File

@ -140,11 +140,11 @@ typedef struct GlobalTransactionData
TimestampTz prepared_at; /* time of preparation */ TimestampTz prepared_at; /* time of preparation */
/* /*
* Note that we need to keep track of two LSNs for each GXACT. * Note that we need to keep track of two LSNs for each GXACT. We keep
* We keep track of the start LSN because this is the address we must * track of the start LSN because this is the address we must use to read
* use to read state data back from WAL when committing a prepared GXACT. * state data back from WAL when committing a prepared GXACT. We keep
* We keep track of the end LSN because that is the LSN we need to wait * track of the end LSN because that is the LSN we need to wait for prior
* for prior to commit. * to commit.
*/ */
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */ XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */ XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
@ -1280,7 +1280,7 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
if (len != NULL) if (len != NULL)
*len = XLogRecGetDataLen(xlogreader); *len = XLogRecGetDataLen(xlogreader);
*buf = palloc(sizeof(char)*XLogRecGetDataLen(xlogreader)); *buf = palloc(sizeof(char) * XLogRecGetDataLen(xlogreader));
memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader)); memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader));
XLogReaderFree(xlogreader); XLogReaderFree(xlogreader);
@ -1347,10 +1347,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
xid = pgxact->xid; xid = pgxact->xid;
/* /*
* Read and validate 2PC state data. * Read and validate 2PC state data. State data will typically be stored
* State data will typically be stored in WAL files if the LSN is after the * in WAL files if the LSN is after the last checkpoint record, or moved
* last checkpoint record, or moved to disk if for some reason they have * to disk if for some reason they have lived for a long time.
* lived for a long time.
*/ */
if (gxact->ondisk) if (gxact->ondisk)
buf = ReadTwoPhaseFile(xid, true); buf = ReadTwoPhaseFile(xid, true);
@ -1605,22 +1604,20 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START(); TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START();
/* /*
* We are expecting there to be zero GXACTs that need to be * We are expecting there to be zero GXACTs that need to be copied to
* copied to disk, so we perform all I/O while holding * disk, so we perform all I/O while holding TwoPhaseStateLock for
* TwoPhaseStateLock for simplicity. This prevents any new xacts * simplicity. This prevents any new xacts from preparing while this
* from preparing while this occurs, which shouldn't be a problem * occurs, which shouldn't be a problem since the presence of long-lived
* since the presence of long-lived prepared xacts indicates the * prepared xacts indicates the transaction manager isn't active.
* transaction manager isn't active.
* *
* It's also possible to move I/O out of the lock, but on * It's also possible to move I/O out of the lock, but on every error we
* every error we should check whether somebody committed our * should check whether somebody committed our transaction in different
* transaction in different backend. Let's leave this optimisation * backend. Let's leave this optimisation for future, if somebody will
* for future, if somebody will spot that this place cause * spot that this place cause bottleneck.
* bottleneck.
* *
* Note that it isn't possible for there to be a GXACT with * Note that it isn't possible for there to be a GXACT with a
* a prepare_end_lsn set prior to the last checkpoint yet * prepare_end_lsn set prior to the last checkpoint yet is marked invalid,
* is marked invalid, because of the efforts with delayChkpt. * because of the efforts with delayChkpt.
*/ */
LWLockAcquire(TwoPhaseStateLock, LW_SHARED); LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++) for (i = 0; i < TwoPhaseState->numPrepXacts; i++)

View File

@ -1166,13 +1166,13 @@ RecordTransactionCommit(void)
/* /*
* Transactions without an assigned xid can contain invalidation * Transactions without an assigned xid can contain invalidation
* messages (e.g. explicit relcache invalidations or catcache * messages (e.g. explicit relcache invalidations or catcache
* invalidations for inplace updates); standbys need to process * invalidations for inplace updates); standbys need to process those.
* those. We can't emit a commit record without an xid, and we don't * We can't emit a commit record without an xid, and we don't want to
* want to force assigning an xid, because that'd be problematic for * force assigning an xid, because that'd be problematic for e.g.
* e.g. vacuum. Hence we emit a bespoke record for the * vacuum. Hence we emit a bespoke record for the invalidations. We
* invalidations. We don't want to use that in case a commit record is * don't want to use that in case a commit record is emitted, so they
* emitted, so they happen synchronously with commits (besides not * happen synchronously with commits (besides not wanting to emit more
* wanting to emit more WAL recoreds). * WAL recoreds).
*/ */
if (nmsgs != 0) if (nmsgs != 0)
{ {
@ -1272,8 +1272,8 @@ RecordTransactionCommit(void)
* this case, but we don't currently try to do that. It would certainly * this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the * cause problems at least in Hot Standby mode, where the
* KnownAssignedXids machinery requires tracking every XID assignment. It * KnownAssignedXids machinery requires tracking every XID assignment. It
* might be OK to skip it only when wal_level < replica, but for now * might be OK to skip it only when wal_level < replica, but for now we
* we don't.) * don't.)
* *
* However, if we're doing cleanup of any non-temp rels or committing any * However, if we're doing cleanup of any non-temp rels or committing any
* command that wanted to force sync commit, then we must flush XLOG * command that wanted to force sync commit, then we must flush XLOG
@ -5486,8 +5486,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
/* /*
* If asked by the primary (because someone is waiting for a synchronous * If asked by the primary (because someone is waiting for a synchronous
* commit = remote_apply), we will need to ask walreceiver to send a * commit = remote_apply), we will need to ask walreceiver to send a reply
* reply immediately. * immediately.
*/ */
if (XactCompletionApplyFeedback(parsed->xinfo)) if (XactCompletionApplyFeedback(parsed->xinfo))
XLogRequestWalReceiverReply(); XLogRequestWalReceiverReply();

View File

@ -5880,8 +5880,8 @@ CheckRequiredParameterValues(void)
} }
/* /*
* For Hot Standby, the WAL must be generated with 'replica' mode, and * For Hot Standby, the WAL must be generated with 'replica' mode, and we
* we must have at least as many backend slots as the primary. * must have at least as many backend slots as the primary.
*/ */
if (ArchiveRecoveryRequested && EnableHotStandby) if (ArchiveRecoveryRequested && EnableHotStandby)
{ {
@ -6163,10 +6163,10 @@ StartupXLOG(void)
* is no use of such file. There is no harm in retaining it, but it * is no use of such file. There is no harm in retaining it, but it
* is better to get rid of the map file so that we don't have any * is better to get rid of the map file so that we don't have any
* redundant file in data directory and it will avoid any sort of * redundant file in data directory and it will avoid any sort of
* confusion. It seems prudent though to just rename the file out * confusion. It seems prudent though to just rename the file out of
* of the way rather than delete it completely, also we ignore any * the way rather than delete it completely, also we ignore any error
* error that occurs in rename operation as even if map file is * that occurs in rename operation as even if map file is present
* present without backup_label file, it is harmless. * without backup_label file, it is harmless.
*/ */
if (stat(TABLESPACE_MAP, &st) == 0) if (stat(TABLESPACE_MAP, &st) == 0)
{ {
@ -6883,8 +6883,8 @@ StartupXLOG(void)
SpinLockRelease(&XLogCtl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
/* /*
* If rm_redo called XLogRequestWalReceiverReply, then we * If rm_redo called XLogRequestWalReceiverReply, then we wake
* wake up the receiver so that it notices the updated * up the receiver so that it notices the updated
* lastReplayedEndRecPtr and sends a reply to the master. * lastReplayedEndRecPtr and sends a reply to the master.
*/ */
if (doRequestWalReceiverReply) if (doRequestWalReceiverReply)

View File

@ -104,8 +104,8 @@ pg_start_backup(PG_FUNCTION_ARGS)
MemoryContext oldcontext; MemoryContext oldcontext;
/* /*
* Label file and tablespace map file need to be long-lived, since they * Label file and tablespace map file need to be long-lived, since
* are read in pg_stop_backup. * they are read in pg_stop_backup.
*/ */
oldcontext = MemoryContextSwitchTo(TopMemoryContext); oldcontext = MemoryContextSwitchTo(TopMemoryContext);
label_file = makeStringInfo(); label_file = makeStringInfo();
@ -156,10 +156,10 @@ pg_stop_backup(PG_FUNCTION_ARGS)
errhint("Did you mean to use pg_stop_backup('f')?"))); errhint("Did you mean to use pg_stop_backup('f')?")));
/* /*
* Exclusive backups were typically started in a different connection, * Exclusive backups were typically started in a different connection, so
* so don't try to verify that exclusive_backup_running is set in this one. * don't try to verify that exclusive_backup_running is set in this one.
* Actual verification that an exclusive backup is in fact running is handled * Actual verification that an exclusive backup is in fact running is
* inside do_pg_stop_backup. * handled inside do_pg_stop_backup.
*/ */
stoppoint = do_pg_stop_backup(NULL, true, NULL); stoppoint = do_pg_stop_backup(NULL, true, NULL);
@ -248,9 +248,8 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS)
errhint("Did you mean to use pg_stop_backup('t')?"))); errhint("Did you mean to use pg_stop_backup('t')?")));
/* /*
* Stop the non-exclusive backup. Return a copy of the backup * Stop the non-exclusive backup. Return a copy of the backup label
* label and tablespace map so they can be written to disk by * and tablespace map so they can be written to disk by the caller.
* the caller.
*/ */
stoppoint = do_pg_stop_backup(label_file->data, true, NULL); stoppoint = do_pg_stop_backup(label_file->data, true, NULL);
nonexclusive_backup_running = false; nonexclusive_backup_running = false;

View File

@ -410,8 +410,8 @@ AggregateCreate(const char *aggName,
Oid combineType; Oid combineType;
/* /*
* Combine function must have 2 argument, each of which is the * Combine function must have 2 argument, each of which is the trans
* trans type * type
*/ */
fnArgs[0] = aggTransType; fnArgs[0] = aggTransType;
fnArgs[1] = aggTransType; fnArgs[1] = aggTransType;
@ -440,8 +440,9 @@ AggregateCreate(const char *aggName,
} }
/* /*
* Validate the serialization function, if present. We must ensure that the * Validate the serialization function, if present. We must ensure that
* return type of this function is the same as the specified serialType. * the return type of this function is the same as the specified
* serialType.
*/ */
if (aggserialfnName) if (aggserialfnName)
{ {

View File

@ -338,8 +338,8 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
/* /*
* There's little point in having a serialization/deserialization * There's little point in having a serialization/deserialization
* function on aggregates that don't have an internal state, so let's * function on aggregates that don't have an internal state, so let's
* just disallow this as it may help clear up any confusion or needless * just disallow this as it may help clear up any confusion or
* authoring of these functions. * needless authoring of these functions.
*/ */
if (transTypeId != INTERNALOID) if (transTypeId != INTERNALOID)
ereport(ERROR, ereport(ERROR,
@ -358,9 +358,9 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
/* /*
* We disallow INTERNAL serialType as the whole point of the * We disallow INTERNAL serialType as the whole point of the
* serialized types is to allow the aggregate state to be output, * serialized types is to allow the aggregate state to be output, and
* and we cannot output INTERNAL. This check, combined with the one * we cannot output INTERNAL. This check, combined with the one above
* above ensures that the trans type and serialization type are not the * ensures that the trans type and serialization type are not the
* same. * same.
*/ */
if (serialTypeId == INTERNALOID) if (serialTypeId == INTERNALOID)

View File

@ -409,9 +409,8 @@ ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddre
stmt->objargs, &rel, AccessExclusiveLock, false); stmt->objargs, &rel, AccessExclusiveLock, false);
/* /*
* If a relation was involved, it would have been opened and locked. * If a relation was involved, it would have been opened and locked. We
* We don't need the relation here, but we'll retain the lock until * don't need the relation here, but we'll retain the lock until commit.
* commit.
*/ */
if (rel) if (rel)
heap_close(rel, NoLock); heap_close(rel, NoLock);
@ -630,8 +629,8 @@ AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid)
oldNspOid = DatumGetObjectId(namespace); oldNspOid = DatumGetObjectId(namespace);
/* /*
* If the object is already in the correct namespace, we don't need * If the object is already in the correct namespace, we don't need to do
* to do anything except fire the object access hook. * anything except fire the object access hook.
*/ */
if (oldNspOid == nspOid) if (oldNspOid == nspOid)
{ {

View File

@ -217,9 +217,8 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
RelationGetRelationName(matviewRel)); RelationGetRelationName(matviewRel));
/* /*
* Check that there is a unique index with no WHERE clause on * Check that there is a unique index with no WHERE clause on one or more
* one or more columns of the materialized view if CONCURRENTLY * columns of the materialized view if CONCURRENTLY is specified.
* is specified.
*/ */
if (concurrent) if (concurrent)
{ {
@ -745,8 +744,8 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
/* /*
* There must be at least one unique index on the matview. * There must be at least one unique index on the matview.
* *
* ExecRefreshMatView() checks that after taking the exclusive lock on * ExecRefreshMatView() checks that after taking the exclusive lock on the
* the matview. So at least one unique index is guaranteed to exist here * matview. So at least one unique index is guaranteed to exist here
* because the lock is still being held. * because the lock is still being held.
*/ */
Assert(foundUniqueIndex); Assert(foundUniqueIndex);

View File

@ -511,7 +511,8 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
*/ */
if (!noperm && num_roles > 0) if (!noperm && num_roles > 0)
{ {
int i, j; int i,
j;
Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles); Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles);
Datum *role_oids; Datum *role_oids;
char *qual_value; char *qual_value;
@ -536,10 +537,9 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
/* /*
* All of the dependencies will be removed from the policy and then * All of the dependencies will be removed from the policy and then
* re-added. In order to get them correct, we need to extract out * re-added. In order to get them correct, we need to extract out the
* the expressions in the policy and construct a parsestate just * expressions in the policy and construct a parsestate just enough to
* enough to build the range table(s) to then pass to * build the range table(s) to then pass to recordDependencyOnExpr().
* recordDependencyOnExpr().
*/ */
/* Get policy qual, to update dependencies */ /* Get policy qual, to update dependencies */
@ -665,7 +665,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
heap_close(pg_policy_rel, RowExclusiveLock); heap_close(pg_policy_rel, RowExclusiveLock);
return(noperm || num_roles > 0); return (noperm || num_roles > 0);
} }
/* /*
@ -1035,9 +1035,9 @@ AlterPolicy(AlterPolicyStmt *stmt)
ArrayType *policy_roles; ArrayType *policy_roles;
/* /*
* We need to pull the set of roles this policy applies to from * We need to pull the set of roles this policy applies to from what's
* what's in the catalog, so that we can recreate the dependencies * in the catalog, so that we can recreate the dependencies correctly
* correctly for the policy. * for the policy.
*/ */
roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles, roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles,
@ -1070,8 +1070,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
/* /*
* We need to pull the USING expression and build the range table for * We need to pull the USING expression and build the range table for
* the policy from what's in the catalog, so that we can recreate * the policy from what's in the catalog, so that we can recreate the
* the dependencies correctly for the policy. * dependencies correctly for the policy.
*/ */
/* Check if the policy has a USING expr */ /* Check if the policy has a USING expr */

View File

@ -532,8 +532,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* can skip this for internally generated triggers, since the name * can skip this for internally generated triggers, since the name
* modification above should be sufficient. * modification above should be sufficient.
* *
* NOTE that this is cool only because we have ShareRowExclusiveLock on the * NOTE that this is cool only because we have ShareRowExclusiveLock on
* relation, so the trigger set won't be changing underneath us. * the relation, so the trigger set won't be changing underneath us.
*/ */
if (!isInternal) if (!isInternal)
{ {

View File

@ -3313,9 +3313,9 @@ AlterTypeOwner_oid(Oid typeOid, Oid newOwnerId, bool hasDependEntry)
typTup = (Form_pg_type) GETSTRUCT(tup); typTup = (Form_pg_type) GETSTRUCT(tup);
/* /*
* If it's a composite type, invoke ATExecChangeOwner so that we fix up the * If it's a composite type, invoke ATExecChangeOwner so that we fix up
* pg_class entry properly. That will call back to AlterTypeOwnerInternal * the pg_class entry properly. That will call back to
* to take care of the pg_type entry(s). * AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/ */
if (typTup->typtype == TYPTYPE_COMPOSITE) if (typTup->typtype == TYPTYPE_COMPOSITE)
ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock); ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock);

View File

@ -1167,8 +1167,8 @@ RenameRole(const char *oldname, const char *newname)
errmsg("current user cannot be renamed"))); errmsg("current user cannot be renamed")));
/* /*
* Check that the user is not trying to rename a system role and * Check that the user is not trying to rename a system role and not
* not trying to rename a role into the reserved "pg_" namespace. * trying to rename a role into the reserved "pg_" namespace.
*/ */
if (IsReservedName(NameStr(authform->rolname))) if (IsReservedName(NameStr(authform->rolname)))
ereport(ERROR, ereport(ERROR,

View File

@ -1192,9 +1192,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
} }
/* /*
* If the all-visible page is turned out to be all-frozen but not marked, * If the all-visible page is turned out to be all-frozen but not
* we should so mark it. Note that all_frozen is only valid if all_visible * marked, we should so mark it. Note that all_frozen is only valid
* is true, so we must check both. * if all_visible is true, so we must check both.
*/ */
else if (all_visible_according_to_vm && all_visible && all_frozen && else if (all_visible_according_to_vm && all_visible && all_frozen &&
!VM_ALL_FROZEN(onerel, blkno, &vmbuffer)) !VM_ALL_FROZEN(onerel, blkno, &vmbuffer))

View File

@ -880,9 +880,9 @@ check_role(char **newval, void **extra, GucSource source)
ReleaseSysCache(roleTup); ReleaseSysCache(roleTup);
/* /*
* Verify that session user is allowed to become this role, but * Verify that session user is allowed to become this role, but skip
* skip this in parallel mode, where we must blindly recreate the * this in parallel mode, where we must blindly recreate the parallel
* parallel leader's state. * leader's state.
*/ */
if (!InitializingParallelWorker && if (!InitializingParallelWorker &&
!is_member_of_role(GetSessionUserId(), roleid)) !is_member_of_role(GetSessionUserId(), roleid))

View File

@ -444,10 +444,9 @@ ExecSupportsBackwardScan(Plan *node)
return false; return false;
/* /*
* Parallel-aware nodes return a subset of the tuples in each worker, * Parallel-aware nodes return a subset of the tuples in each worker, and
* and in general we can't expect to have enough bookkeeping state to * in general we can't expect to have enough bookkeeping state to know
* know which ones we returned in this worker as opposed to some other * which ones we returned in this worker as opposed to some other worker.
* worker.
*/ */
if (node->parallel_aware) if (node->parallel_aware)
return false; return false;

View File

@ -391,8 +391,8 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
shm_toc_estimate_keys(&pcxt->estimator, 1); shm_toc_estimate_keys(&pcxt->estimator, 1);
/* /*
* Give parallel-aware nodes a chance to add to the estimates, and get * Give parallel-aware nodes a chance to add to the estimates, and get a
* a count of how many PlanState nodes there are. * count of how many PlanState nodes there are.
*/ */
e.pcxt = pcxt; e.pcxt = pcxt;
e.nnodes = 0; e.nnodes = 0;
@ -444,9 +444,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false); pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
/* /*
* If instrumentation options were supplied, allocate space for the * If instrumentation options were supplied, allocate space for the data.
* data. It only gets partially initialized here; the rest happens * It only gets partially initialized here; the rest happens during
* during ExecParallelInitializeDSM. * ExecParallelInitializeDSM.
*/ */
if (estate->es_instrument) if (estate->es_instrument)
{ {
@ -636,9 +636,9 @@ ExecParallelReportInstrumentation(PlanState *planstate,
/* /*
* If we shuffled the plan_node_id values in ps_instrument into sorted * If we shuffled the plan_node_id values in ps_instrument into sorted
* order, we could use binary search here. This might matter someday * order, we could use binary search here. This might matter someday if
* if we're pushing down sufficiently large plan trees. For now, do it * we're pushing down sufficiently large plan trees. For now, do it the
* the slow, dumb way. * slow, dumb way.
*/ */
for (i = 0; i < instrumentation->num_plan_nodes; ++i) for (i = 0; i < instrumentation->num_plan_nodes; ++i)
if (instrumentation->plan_node_id[i] == plan_node_id) if (instrumentation->plan_node_id[i] == plan_node_id)

View File

@ -981,10 +981,11 @@ combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
if (OidIsValid(pertrans->deserialfn_oid)) if (OidIsValid(pertrans->deserialfn_oid))
{ {
/* /*
* Don't call a strict deserialization function with NULL input. * Don't call a strict deserialization function with NULL input. A
* A strict deserialization function and a null value means we skip * strict deserialization function and a null value means we skip
* calling the combine function for this state. We assume that this * calling the combine function for this state. We assume that
* would be a waste of time and effort anyway so just skip it. * this would be a waste of time and effort anyway so just skip
* it.
*/ */
if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0]) if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0])
continue; continue;
@ -1429,8 +1430,8 @@ finalize_partialaggregate(AggState *aggstate,
oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory); oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
/* /*
* serialfn_oid will be set if we must serialize the input state * serialfn_oid will be set if we must serialize the input state before
* before calling the combine function on the state. * calling the combine function on the state.
*/ */
if (OidIsValid(pertrans->serialfn_oid)) if (OidIsValid(pertrans->serialfn_oid))
{ {
@ -1443,6 +1444,7 @@ finalize_partialaggregate(AggState *aggstate,
else else
{ {
FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo; FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo;
fcinfo->arg[0] = pergroupstate->transValue; fcinfo->arg[0] = pergroupstate->transValue;
fcinfo->argnull[0] = pergroupstate->transValueIsNull; fcinfo->argnull[0] = pergroupstate->transValueIsNull;
@ -3366,9 +3368,9 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg,
/* /*
* The serialization and deserialization functions must match, if * The serialization and deserialization functions must match, if
* present, as we're unable to share the trans state for aggregates * present, as we're unable to share the trans state for aggregates
* which will serialize or deserialize into different formats. Remember * which will serialize or deserialize into different formats.
* that these will be InvalidOid if they're not required for this agg * Remember that these will be InvalidOid if they're not required for
* node. * this agg node.
*/ */
if (aggserialfn != pertrans->serialfn_oid || if (aggserialfn != pertrans->serialfn_oid ||
aggdeserialfn != pertrans->deserialfn_oid) aggdeserialfn != pertrans->deserialfn_oid)

View File

@ -285,8 +285,8 @@ ExecReScanForeignScan(ForeignScanState *node)
/* /*
* If chgParam of subnode is not null then plan will be re-scanned by * If chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode. outerPlan may also be NULL, in which case there * first ExecProcNode. outerPlan may also be NULL, in which case there is
* is nothing to rescan at all. * nothing to rescan at all.
*/ */
if (outerPlan != NULL && outerPlan->chgParam == NULL) if (outerPlan != NULL && outerPlan->chgParam == NULL)
ExecReScan(outerPlan); ExecReScan(outerPlan);

View File

@ -138,8 +138,8 @@ ExecGather(GatherState *node)
/* /*
* Initialize the parallel context and workers on first execution. We do * Initialize the parallel context and workers on first execution. We do
* this on first execution rather than during node initialization, as it * this on first execution rather than during node initialization, as it
* needs to allocate large dynamic segment, so it is better to do if it * needs to allocate large dynamic segment, so it is better to do if it is
* is really needed. * really needed.
*/ */
if (!node->initialized) if (!node->initialized)
{ {
@ -147,8 +147,8 @@ ExecGather(GatherState *node)
Gather *gather = (Gather *) node->ps.plan; Gather *gather = (Gather *) node->ps.plan;
/* /*
* Sometimes we might have to run without parallelism; but if * Sometimes we might have to run without parallelism; but if parallel
* parallel mode is active then we can try to fire up some workers. * mode is active then we can try to fire up some workers.
*/ */
if (gather->num_workers > 0 && IsInParallelMode()) if (gather->num_workers > 0 && IsInParallelMode())
{ {
@ -330,8 +330,8 @@ gather_readnext(GatherState *gatherstate)
tup = TupleQueueReaderNext(reader, true, &readerdone); tup = TupleQueueReaderNext(reader, true, &readerdone);
/* /*
* If this reader is done, remove it. If all readers are done, * If this reader is done, remove it. If all readers are done, clean
* clean up remaining worker state. * up remaining worker state.
*/ */
if (readerdone) if (readerdone)
{ {
@ -452,10 +452,10 @@ void
ExecReScanGather(GatherState *node) ExecReScanGather(GatherState *node)
{ {
/* /*
* Re-initialize the parallel workers to perform rescan of relation. * Re-initialize the parallel workers to perform rescan of relation. We
* We want to gracefully shutdown all the workers so that they * want to gracefully shutdown all the workers so that they should be able
* should be able to propagate any error or other information to master * to propagate any error or other information to master backend before
* backend before dying. Parallel context will be reused for rescan. * dying. Parallel context will be reused for rescan.
*/ */
ExecShutdownGatherWorkers(node); ExecShutdownGatherWorkers(node);

View File

@ -1221,10 +1221,10 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/* /*
* Note that it is possible that the target tuple has been modified in * Note that it is possible that the target tuple has been modified in
* this session, after the above heap_lock_tuple. We choose to not error * this session, after the above heap_lock_tuple. We choose to not error
* out in that case, in line with ExecUpdate's treatment of similar * out in that case, in line with ExecUpdate's treatment of similar cases.
* cases. This can happen if an UPDATE is triggered from within * This can happen if an UPDATE is triggered from within ExecQual(),
* ExecQual(), ExecWithCheckOptions() or ExecProject() above, e.g. by * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
* selecting from a wCTE in the ON CONFLICT's SET. * wCTE in the ON CONFLICT's SET.
*/ */
/* Execute UPDATE with projection */ /* Execute UPDATE with projection */

View File

@ -65,8 +65,8 @@ SeqNext(SeqScanState *node)
if (scandesc == NULL) if (scandesc == NULL)
{ {
/* /*
* We reach here if the scan is not parallel, or if we're executing * We reach here if the scan is not parallel, or if we're executing a
* a scan that was intended to be parallel serially. * scan that was intended to be parallel serially.
*/ */
scandesc = heap_beginscan(node->ss.ss_currentRelation, scandesc = heap_beginscan(node->ss.ss_currentRelation,
estate->es_snapshot, estate->es_snapshot,

View File

@ -81,19 +81,19 @@ struct TupleQueueReader
#define TUPLE_QUEUE_MODE_CONTROL 'c' #define TUPLE_QUEUE_MODE_CONTROL 'c'
#define TUPLE_QUEUE_MODE_DATA 'd' #define TUPLE_QUEUE_MODE_DATA 'd'
static void tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, static void tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype,
Datum value); Datum value);
static void tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value); static void tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value);
static void tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value); static void tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value);
static void tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value); static void tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value);
static void tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod, static void tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc); TupleDesc tupledesc);
static void TupleQueueHandleControlMessage(TupleQueueReader *reader, static void TupleQueueHandleControlMessage(TupleQueueReader *reader,
Size nbytes, char *data); Size nbytes, char *data);
static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader, static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader,
Size nbytes, HeapTupleHeader data); Size nbytes, HeapTupleHeader data);
static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader, static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader,
TupleDesc tupledesc, RemapInfo * remapinfo, TupleDesc tupledesc, RemapInfo *remapinfo,
HeapTuple tuple); HeapTuple tuple);
static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass, static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass,
Datum value); Datum value);
@ -212,7 +212,7 @@ tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self)
* Invoke the appropriate walker function based on the given RemapClass. * Invoke the appropriate walker function based on the given RemapClass.
*/ */
static void static void
tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value) tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value)
{ {
check_stack_depth(); check_stack_depth();
@ -237,7 +237,7 @@ tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
* contained therein. * contained therein.
*/ */
static void static void
tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value) tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value)
{ {
HeapTupleHeader tup; HeapTupleHeader tup;
Oid typeid; Oid typeid;
@ -304,7 +304,7 @@ tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
* contained therein. * contained therein.
*/ */
static void static void
tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value) tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value)
{ {
ArrayType *arr = DatumGetArrayTypeP(value); ArrayType *arr = DatumGetArrayTypeP(value);
Oid typeid = ARR_ELEMTYPE(arr); Oid typeid = ARR_ELEMTYPE(arr);
@ -342,7 +342,7 @@ tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
* contained therein. * contained therein.
*/ */
static void static void
tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value) tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value)
{ {
RangeType *range = DatumGetRangeType(value); RangeType *range = DatumGetRangeType(value);
Oid typeid = RangeTypeGetOid(range); Oid typeid = RangeTypeGetOid(range);
@ -386,7 +386,7 @@ tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
* already done so previously. * already done so previously.
*/ */
static void static void
tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod, tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc) TupleDesc tupledesc)
{ {
StringInfoData buf; StringInfoData buf;
@ -613,7 +613,7 @@ TupleQueueHandleDataMessage(TupleQueueReader *reader,
*/ */
static HeapTuple static HeapTuple
TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc, TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc,
RemapInfo * remapinfo, HeapTuple tuple) RemapInfo *remapinfo, HeapTuple tuple)
{ {
Datum *values; Datum *values;
bool *isnull; bool *isnull;

View File

@ -2539,11 +2539,10 @@ CheckRADIUSAuth(Port *port)
radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier)); radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier));
/* /*
* RADIUS password attributes are calculated as: * RADIUS password attributes are calculated as: e[0] = p[0] XOR
* e[0] = p[0] XOR MD5(secret + Request Authenticator) * MD5(secret + Request Authenticator) for the first group of 16 octets,
* for the first group of 16 octets, and then: * and then: e[i] = p[i] XOR MD5(secret + e[i-1]) for the following ones
* e[i] = p[i] XOR MD5(secret + e[i-1]) * (if necessary)
* for the following ones (if necessary)
*/ */
encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH; encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH;
cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH); cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH);
@ -2554,7 +2553,11 @@ CheckRADIUSAuth(Port *port)
for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH) for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH)
{ {
memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH); memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH);
/* .. and for subsequent iterations the result of the previous XOR (calculated below) */
/*
* .. and for subsequent iterations the result of the previous XOR
* (calculated below)
*/
md5trailer = encryptedpassword + i; md5trailer = encryptedpassword + i;
if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i)) if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i))
@ -2565,7 +2568,7 @@ CheckRADIUSAuth(Port *port)
return STATUS_ERROR; return STATUS_ERROR;
} }
for (j = i; j < i+RADIUS_VECTOR_LENGTH; j++) for (j = i; j < i + RADIUS_VECTOR_LENGTH; j++)
{ {
if (j < strlen(passwd)) if (j < strlen(passwd))
encryptedpassword[j] = passwd[j] ^ encryptedpassword[j]; encryptedpassword[j] = passwd[j] ^ encryptedpassword[j];

View File

@ -377,11 +377,12 @@ be_tls_open_server(Port *port)
port->ssl_in_use = true; port->ssl_in_use = true;
aloop: aloop:
/* /*
* Prepare to call SSL_get_error() by clearing thread's OpenSSL error * Prepare to call SSL_get_error() by clearing thread's OpenSSL error
* queue. In general, the current thread's error queue must be empty * queue. In general, the current thread's error queue must be empty
* before the TLS/SSL I/O operation is attempted, or SSL_get_error() * before the TLS/SSL I/O operation is attempted, or SSL_get_error() will
* will not work reliably. An extension may have failed to clear the * not work reliably. An extension may have failed to clear the
* per-thread error queue following another call to an OpenSSL I/O * per-thread error queue following another call to an OpenSSL I/O
* routine. * routine.
*/ */
@ -393,12 +394,11 @@ aloop:
/* /*
* Other clients of OpenSSL in the backend may fail to call * Other clients of OpenSSL in the backend may fail to call
* ERR_get_error(), but we always do, so as to not cause problems * ERR_get_error(), but we always do, so as to not cause problems for
* for OpenSSL clients that don't call ERR_clear_error() * OpenSSL clients that don't call ERR_clear_error() defensively. Be
* defensively. Be sure that this happens by calling now. * sure that this happens by calling now. SSL_get_error() relies on
* SSL_get_error() relies on the OpenSSL per-thread error queue * the OpenSSL per-thread error queue being intact, so this is the
* being intact, so this is the earliest possible point * earliest possible point ERR_get_error() may be called.
* ERR_get_error() may be called.
*/ */
ecode = ERR_get_error(); ecode = ERR_get_error();
switch (err) switch (err)

View File

@ -146,20 +146,20 @@ retry:
ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL); ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1); WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
/* /*
* If the postmaster has died, it's not safe to continue running, * If the postmaster has died, it's not safe to continue running,
* because it is the postmaster's job to kill us if some other backend * because it is the postmaster's job to kill us if some other backend
* exists uncleanly. Moreover, we won't run very well in this state; * exists uncleanly. Moreover, we won't run very well in this state;
* helper processes like walwriter and the bgwriter will exit, so * helper processes like walwriter and the bgwriter will exit, so
* performance may be poor. Finally, if we don't exit, pg_ctl will * performance may be poor. Finally, if we don't exit, pg_ctl will be
* be unable to restart the postmaster without manual intervention, * unable to restart the postmaster without manual intervention, so no
* so no new connections can be accepted. Exiting clears the deck * new connections can be accepted. Exiting clears the deck for a
* for a postmaster restart. * postmaster restart.
* *
* (Note that we only make this check when we would otherwise sleep * (Note that we only make this check when we would otherwise sleep on
* on our latch. We might still continue running for a while if the * our latch. We might still continue running for a while if the
* postmaster is killed in mid-query, or even through multiple queries * postmaster is killed in mid-query, or even through multiple queries
* if we never have to wait for read. We don't want to burn too many * if we never have to wait for read. We don't want to burn too many
* cycles checking for this very rare condition, and this should cause * cycles checking for this very rare condition, and this should cause
@ -247,7 +247,7 @@ retry:
ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL); ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1); WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
/* See comments in secure_read. */ /* See comments in secure_read. */
if (event.events & WL_POSTMASTER_DEATH) if (event.events & WL_POSTMASTER_DEATH)

View File

@ -143,9 +143,9 @@ mq_putmessage(char msgtype, const char *s, size_t len)
/* /*
* If the message queue is already gone, just ignore the message. This * If the message queue is already gone, just ignore the message. This
* doesn't necessarily indicate a problem; for example, DEBUG messages * doesn't necessarily indicate a problem; for example, DEBUG messages can
* can be generated late in the shutdown sequence, after all DSMs have * be generated late in the shutdown sequence, after all DSMs have already
* already been detached. * been detached.
*/ */
if (pq_mq == NULL) if (pq_mq == NULL)
return 0; return 0;

View File

@ -270,13 +270,16 @@ startup_hacks(const char *progname)
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
#if defined(_M_AMD64) && _MSC_VER == 1800 #if defined(_M_AMD64) && _MSC_VER == 1800
/* /*
* Avoid crashing in certain floating-point operations if * Avoid crashing in certain floating-point operations if we were
* we were compiled for x64 with MS Visual Studio 2013 and * compiled for x64 with MS Visual Studio 2013 and are running on
* are running on Windows prior to 7/2008R2 SP1 on an * Windows prior to 7/2008R2 SP1 on an AVX2-capable CPU.
* AVX2-capable CPU.
* *
* Ref: https://connect.microsoft.com/VisualStudio/feedback/details/811093/visual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instructions * Ref:
* https://connect.microsoft.com/VisualStudio/feedback/details/811093/v
* isual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instruction
* s
*/ */
if (!IsWindows7SP1OrGreater()) if (!IsWindows7SP1OrGreater())
{ {

View File

@ -3499,7 +3499,7 @@ planstate_tree_walker(PlanState *planstate,
return true; return true;
break; break;
case T_CustomScan: case T_CustomScan:
foreach (lc, ((CustomScanState *) planstate)->custom_ps) foreach(lc, ((CustomScanState *) planstate)->custom_ps)
{ {
if (walker((PlanState *) lfirst(lc), context)) if (walker((PlanState *) lfirst(lc), context))
return true; return true;

View File

@ -2228,6 +2228,7 @@ _readExtensibleNode(void)
const ExtensibleNodeMethods *methods; const ExtensibleNodeMethods *methods;
ExtensibleNode *local_node; ExtensibleNode *local_node;
const char *extnodename; const char *extnodename;
READ_TEMP_LOCALS(); READ_TEMP_LOCALS();
token = pg_strtok(&length); /* skip: extnodename */ token = pg_strtok(&length); /* skip: extnodename */

View File

@ -163,8 +163,8 @@ make_one_rel(PlannerInfo *root, List *joinlist)
set_base_rel_consider_startup(root); set_base_rel_consider_startup(root);
/* /*
* Generate access paths for the base rels. set_base_rel_sizes also * Generate access paths for the base rels. set_base_rel_sizes also sets
* sets the consider_parallel flag for each baserel, if appropriate. * the consider_parallel flag for each baserel, if appropriate.
*/ */
set_base_rel_sizes(root); set_base_rel_sizes(root);
set_base_rel_pathlists(root); set_base_rel_pathlists(root);
@ -509,6 +509,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
switch (rte->rtekind) switch (rte->rtekind)
{ {
case RTE_RELATION: case RTE_RELATION:
/* /*
* Currently, parallel workers can't access the leader's temporary * Currently, parallel workers can't access the leader's temporary
* tables. We could possibly relax this if the wrote all of its * tables. We could possibly relax this if the wrote all of its
@ -557,14 +558,15 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break; break;
case RTE_SUBQUERY: case RTE_SUBQUERY:
/* /*
* Subplans currently aren't passed to workers. Even if they * Subplans currently aren't passed to workers. Even if they
* were, the subplan might be using parallelism internally, and * were, the subplan might be using parallelism internally, and we
* we can't support nested Gather nodes at present. Finally, * can't support nested Gather nodes at present. Finally, we
* we don't have a good way of knowing whether the subplan * don't have a good way of knowing whether the subplan involves
* involves any parallel-restricted operations. It would be * any parallel-restricted operations. It would be nice to relax
* nice to relax this restriction some day, but it's going to * this restriction some day, but it's going to take a fair amount
* take a fair amount of work. * of work.
*/ */
return; return;
@ -580,6 +582,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break; break;
case RTE_VALUES: case RTE_VALUES:
/* /*
* The data for a VALUES clause is stored in the plan tree itself, * The data for a VALUES clause is stored in the plan tree itself,
* so scanning it in a worker is fine. * so scanning it in a worker is fine.
@ -587,6 +590,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break; break;
case RTE_CTE: case RTE_CTE:
/* /*
* CTE tuplestores aren't shared among parallel workers, so we * CTE tuplestores aren't shared among parallel workers, so we
* force all CTE scans to happen in the leader. Also, populating * force all CTE scans to happen in the leader. Also, populating
@ -598,8 +602,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
} }
/* /*
* If there's anything in baserestrictinfo that's parallel-restricted, * If there's anything in baserestrictinfo that's parallel-restricted, we
* we give up on parallelizing access to this relation. We could consider * give up on parallelizing access to this relation. We could consider
* instead postponing application of the restricted quals until we're * instead postponing application of the restricted quals until we're
* above all the parallelism in the plan tree, but it's not clear that * above all the parallelism in the plan tree, but it's not clear that
* this would be a win in very many cases, and it might be tricky to make * this would be a win in very many cases, and it might be tricky to make
@ -609,8 +613,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
return; return;
/* /*
* If the relation's outputs are not parallel-safe, we must give up. * If the relation's outputs are not parallel-safe, we must give up. In
* In the common case where the relation only outputs Vars, this check is * the common case where the relation only outputs Vars, this check is
* very cheap; otherwise, we have to do more work. * very cheap; otherwise, we have to do more work.
*/ */
if (rel->reltarget_has_non_vars && if (rel->reltarget_has_non_vars &&
@ -1251,8 +1255,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
int parallel_workers = 0; int parallel_workers = 0;
/* /*
* Decide on the numebr of workers to request for this append path. For * Decide on the numebr of workers to request for this append path.
* now, we just use the maximum value from among the members. It * For now, we just use the maximum value from among the members. It
* might be useful to use a higher number if the Append node were * might be useful to use a higher number if the Append node were
* smart enough to spread out the workers, but it currently isn't. * smart enough to spread out the workers, but it currently isn't.
*/ */
@ -2160,8 +2164,8 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
* Run generate_gather_paths() for each just-processed joinrel. We * Run generate_gather_paths() for each just-processed joinrel. We
* could not do this earlier because both regular and partial paths * could not do this earlier because both regular and partial paths
* can get added to a particular joinrel at multiple times within * can get added to a particular joinrel at multiple times within
* join_search_one_level. After that, we're done creating paths * join_search_one_level. After that, we're done creating paths for
* for the joinrel, so run set_cheapest(). * the joinrel, so run set_cheapest().
*/ */
foreach(lc, root->join_rel_level[lev]) foreach(lc, root->join_rel_level[lev])
{ {

View File

@ -1428,15 +1428,14 @@ create_projection_plan(PlannerInfo *root, ProjectionPath *best_path)
* We might not really need a Result node here. There are several ways * We might not really need a Result node here. There are several ways
* that this can happen. For example, MergeAppend doesn't project, so we * that this can happen. For example, MergeAppend doesn't project, so we
* would have thought that we needed a projection to attach resjunk sort * would have thought that we needed a projection to attach resjunk sort
* columns to its output ... but create_merge_append_plan might have * columns to its output ... but create_merge_append_plan might have added
* added those same resjunk sort columns to both MergeAppend and its * those same resjunk sort columns to both MergeAppend and its children.
* children. Alternatively, apply_projection_to_path might have created * Alternatively, apply_projection_to_path might have created a projection
* a projection path as the subpath of a Gather node even though the * path as the subpath of a Gather node even though the subpath was
* subpath was projection-capable. So, if the subpath is capable of * projection-capable. So, if the subpath is capable of projection or the
* projection or the desired tlist is the same expression-wise as the * desired tlist is the same expression-wise as the subplan's, just jam it
* subplan's, just jam it in there. We'll have charged for a Result that * in there. We'll have charged for a Result that doesn't actually appear
* doesn't actually appear in the plan, but that's better than having a * in the plan, but that's better than having a Result we don't need.
* Result we don't need.
*/ */
if (is_projection_capable_path(best_path->subpath) || if (is_projection_capable_path(best_path->subpath) ||
tlist_same_exprs(tlist, subplan->targetlist)) tlist_same_exprs(tlist, subplan->targetlist))
@ -3248,8 +3247,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/* /*
* If a join between foreign relations was pushed down, remember it. The * If a join between foreign relations was pushed down, remember it. The
* push-down safety of the join depends upon the server and user mapping * push-down safety of the join depends upon the server and user mapping
* being same. That can change between planning and execution time, in which * being same. That can change between planning and execution time, in
* case the plan should be invalidated. * which case the plan should be invalidated.
*/ */
if (scan_relid == 0) if (scan_relid == 0)
root->glob->hasForeignJoin = true; root->glob->hasForeignJoin = true;
@ -3257,8 +3256,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/* /*
* Replace any outer-relation variables with nestloop params in the qual, * Replace any outer-relation variables with nestloop params in the qual,
* fdw_exprs and fdw_recheck_quals expressions. We do this last so that * fdw_exprs and fdw_recheck_quals expressions. We do this last so that
* the FDW doesn't have to be involved. (Note that parts of fdw_exprs * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
* or fdw_recheck_quals could have come from join clauses, so doing this * fdw_recheck_quals could have come from join clauses, so doing this
* beforehand on the scan_clauses wouldn't work.) We assume * beforehand on the scan_clauses wouldn't work.) We assume
* fdw_scan_tlist contains no such variables. * fdw_scan_tlist contains no such variables.
*/ */
@ -3279,8 +3278,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
* 0, but there can be no Var with relid 0 in the rel's targetlist or the * 0, but there can be no Var with relid 0 in the rel's targetlist or the
* restriction clauses, so we skip this in that case. Note that any such * restriction clauses, so we skip this in that case. Note that any such
* columns in base relations that were joined are assumed to be contained * columns in base relations that were joined are assumed to be contained
* in fdw_scan_tlist.) This is a bit of a kluge and might go away someday, * in fdw_scan_tlist.) This is a bit of a kluge and might go away
* so we intentionally leave it out of the API presented to FDWs. * someday, so we intentionally leave it out of the API presented to FDWs.
*/ */
scan_plan->fsSystemCol = false; scan_plan->fsSystemCol = false;
if (scan_relid > 0) if (scan_relid > 0)

View File

@ -1777,8 +1777,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
* findable from the PlannerInfo struct; anything else the FDW wants * findable from the PlannerInfo struct; anything else the FDW wants
* to know should be obtainable via "root". * to know should be obtainable via "root".
* *
* Note: CustomScan providers, as well as FDWs that don't want to * Note: CustomScan providers, as well as FDWs that don't want to use
* use this hook, can use the create_upper_paths_hook; see below. * this hook, can use the create_upper_paths_hook; see below.
*/ */
if (current_rel->fdwroutine && if (current_rel->fdwroutine &&
current_rel->fdwroutine->GetForeignUpperPaths) current_rel->fdwroutine->GetForeignUpperPaths)
@ -3196,8 +3196,8 @@ set_grouped_rel_consider_parallel(PlannerInfo *root, RelOptInfo *grouped_rel,
/* /*
* All that's left to check now is to make sure all aggregate functions * All that's left to check now is to make sure all aggregate functions
* support partial mode. If there's no aggregates then we can skip checking * support partial mode. If there's no aggregates then we can skip
* that. * checking that.
*/ */
if (!parse->hasAggs) if (!parse->hasAggs)
grouped_rel->consider_parallel = true; grouped_rel->consider_parallel = true;
@ -3370,9 +3370,10 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Determine whether it's possible to perform sort-based implementations * Determine whether it's possible to perform sort-based implementations
* of grouping. (Note that if groupClause is empty, grouping_is_sortable() * of grouping. (Note that if groupClause is empty,
* is trivially true, and all the pathkeys_contained_in() tests will * grouping_is_sortable() is trivially true, and all the
* succeed too, so that we'll consider every surviving input path.) * pathkeys_contained_in() tests will succeed too, so that we'll consider
* every surviving input path.)
*/ */
can_sort = grouping_is_sortable(parse->groupClause); can_sort = grouping_is_sortable(parse->groupClause);
@ -3616,8 +3617,8 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Now generate a complete GroupAgg Path atop of the cheapest partial * Now generate a complete GroupAgg Path atop of the cheapest partial
* path. We need only bother with the cheapest path here, as the output * path. We need only bother with the cheapest path here, as the
* of Gather is never sorted. * output of Gather is never sorted.
*/ */
if (grouped_rel->partial_pathlist) if (grouped_rel->partial_pathlist)
{ {
@ -3632,9 +3633,9 @@ create_grouping_paths(PlannerInfo *root,
&total_groups); &total_groups);
/* /*
* Gather is always unsorted, so we'll need to sort, unless there's * Gather is always unsorted, so we'll need to sort, unless
* no GROUP BY clause, in which case there will only be a single * there's no GROUP BY clause, in which case there will only be a
* group. * single group.
*/ */
if (parse->groupClause) if (parse->groupClause)
path = (Path *) create_sort_path(root, path = (Path *) create_sort_path(root,
@ -3678,15 +3679,15 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Provided that the estimated size of the hashtable does not exceed * Provided that the estimated size of the hashtable does not exceed
* work_mem, we'll generate a HashAgg Path, although if we were unable * work_mem, we'll generate a HashAgg Path, although if we were unable
* to sort above, then we'd better generate a Path, so that we at least * to sort above, then we'd better generate a Path, so that we at
* have one. * least have one.
*/ */
if (hashaggtablesize < work_mem * 1024L || if (hashaggtablesize < work_mem * 1024L ||
grouped_rel->pathlist == NIL) grouped_rel->pathlist == NIL)
{ {
/* /*
* We just need an Agg over the cheapest-total input path, since input * We just need an Agg over the cheapest-total input path, since
* order won't matter. * input order won't matter.
*/ */
add_path(grouped_rel, (Path *) add_path(grouped_rel, (Path *)
create_agg_path(root, grouped_rel, create_agg_path(root, grouped_rel,
@ -3704,8 +3705,8 @@ create_grouping_paths(PlannerInfo *root,
/* /*
* Generate a HashAgg Path atop of the cheapest partial path. Once * Generate a HashAgg Path atop of the cheapest partial path. Once
* again, we'll only do this if it looks as though the hash table won't * again, we'll only do this if it looks as though the hash table
* exceed work_mem. * won't exceed work_mem.
*/ */
if (grouped_rel->partial_pathlist) if (grouped_rel->partial_pathlist)
{ {

View File

@ -2100,6 +2100,7 @@ search_indexed_tlist_for_partial_aggref(Aggref *aggref, indexed_tlist *itlist,
continue; continue;
if (aggref->aggvariadic != tlistaggref->aggvariadic) if (aggref->aggvariadic != tlistaggref->aggvariadic)
continue; continue;
/* /*
* it would be harmless to compare aggcombine and aggpartial, but * it would be harmless to compare aggcombine and aggpartial, but
* it's also unnecessary * it's also unnecessary

View File

@ -1371,11 +1371,11 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
* recurse through Query objects to as to locate parallel-unsafe * recurse through Query objects to as to locate parallel-unsafe
* constructs anywhere in the tree. * constructs anywhere in the tree.
* *
* Later, we'll be called again for specific quals, possibly after * Later, we'll be called again for specific quals, possibly after some
* some planning has been done, we may encounter SubPlan, SubLink, * planning has been done, we may encounter SubPlan, SubLink, or
* or AlternativeSubLink nodes. Currently, there's no need to recurse * AlternativeSubLink nodes. Currently, there's no need to recurse
* through these; they can't be unsafe, since we've already cleared * through these; they can't be unsafe, since we've already cleared the
* the entire query of unsafe operations, and they're definitely * entire query of unsafe operations, and they're definitely
* parallel-restricted. * parallel-restricted.
*/ */
if (IsA(node, Query)) if (IsA(node, Query))
@ -1390,12 +1390,12 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
has_parallel_hazard_walker, has_parallel_hazard_walker,
context, 0); context, 0);
} }
else if (IsA(node, SubPlan) || IsA(node, SubLink) || else if (IsA(node, SubPlan) ||IsA(node, SubLink) ||
IsA(node, AlternativeSubPlan) || IsA(node, Param)) IsA(node, AlternativeSubPlan) ||IsA(node, Param))
{ {
/* /*
* Since we don't have the ability to push subplans down to workers * Since we don't have the ability to push subplans down to workers at
* at present, we treat subplan references as parallel-restricted. * present, we treat subplan references as parallel-restricted.
*/ */
if (!context->allow_restricted) if (!context->allow_restricted)
return true; return true;
@ -1405,12 +1405,14 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
if (IsA(node, RestrictInfo)) if (IsA(node, RestrictInfo))
{ {
RestrictInfo *rinfo = (RestrictInfo *) node; RestrictInfo *rinfo = (RestrictInfo *) node;
return has_parallel_hazard_walker((Node *) rinfo->clause, context); return has_parallel_hazard_walker((Node *) rinfo->clause, context);
} }
/* /*
* It is an error for a parallel worker to touch a temporary table in any * It is an error for a parallel worker to touch a temporary table in any
* way, so we can't handle nodes whose type is the rowtype of such a table. * way, so we can't handle nodes whose type is the rowtype of such a
* table.
*/ */
if (!context->allow_restricted) if (!context->allow_restricted)
{ {
@ -1535,6 +1537,7 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
foreach(opid, rcexpr->opnos) foreach(opid, rcexpr->opnos)
{ {
Oid opfuncid = get_opcode(lfirst_oid(opid)); Oid opfuncid = get_opcode(lfirst_oid(opid));
if (parallel_too_dangerous(func_parallel(opfuncid), context)) if (parallel_too_dangerous(func_parallel(opfuncid), context))
return true; return true;
} }
@ -1870,8 +1873,8 @@ contain_leaked_vars_walker(Node *node, void *context)
/* /*
* WHERE CURRENT OF doesn't contain function calls. Moreover, it * WHERE CURRENT OF doesn't contain function calls. Moreover, it
* is important that this can be pushed down into a * is important that this can be pushed down into a
* security_barrier view, since the planner must always generate * security_barrier view, since the planner must always generate a
* a TID scan when CURRENT OF is present -- c.f. cost_tidscan. * TID scan when CURRENT OF is present -- c.f. cost_tidscan.
*/ */
return false; return false;

View File

@ -287,15 +287,14 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
if (like_found) if (like_found)
{ {
/* /*
* To match INHERITS, the existence of any LIKE table with OIDs * To match INHERITS, the existence of any LIKE table with OIDs causes
* causes the new table to have oids. For the same reason, * the new table to have oids. For the same reason, WITH/WITHOUT OIDs
* WITH/WITHOUT OIDs is also ignored with LIKE. We prepend * is also ignored with LIKE. We prepend because the first oid option
* because the first oid option list entry is honored. Our * list entry is honored. Our prepended WITHOUT OIDS clause will be
* prepended WITHOUT OIDS clause will be overridden if an * overridden if an inherited table has oids.
* inherited table has oids.
*/ */
stmt->options = lcons(makeDefElem("oids", stmt->options = lcons(makeDefElem("oids",
(Node *)makeInteger(cxt.hasoids)), stmt->options); (Node *) makeInteger(cxt.hasoids)), stmt->options);
} }
foreach(elements, stmt->tableElts) foreach(elements, stmt->tableElts)
@ -305,6 +304,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
if (nodeTag(element) == T_Constraint) if (nodeTag(element) == T_Constraint)
transformTableConstraint(&cxt, (Constraint *) element); transformTableConstraint(&cxt, (Constraint *) element);
} }
/* /*
* transformIndexConstraints wants cxt.alist to contain only index * transformIndexConstraints wants cxt.alist to contain only index
* statements, so transfer anything we already have into save_alist. * statements, so transfer anything we already have into save_alist.
@ -1949,8 +1949,8 @@ transformCheckConstraints(CreateStmtContext *cxt, bool skipValidation)
/* /*
* If creating a new table, we can safely skip validation of check * If creating a new table, we can safely skip validation of check
* constraints, and nonetheless mark them valid. (This will override * constraints, and nonetheless mark them valid. (This will override any
* any user-supplied NOT VALID flag.) * user-supplied NOT VALID flag.)
*/ */
if (skipValidation) if (skipValidation)
{ {

View File

@ -35,8 +35,7 @@ pg_spinlock_barrier(void)
* *
* We use kill(0) for the fallback barrier as we assume that kernels on * We use kill(0) for the fallback barrier as we assume that kernels on
* systems old enough to require fallback barrier support will include an * systems old enough to require fallback barrier support will include an
* appropriate barrier while checking the existence of the postmaster * appropriate barrier while checking the existence of the postmaster pid.
* pid.
*/ */
(void) kill(PostmasterPid, 0); (void) kill(PostmasterPid, 0);
} }

View File

@ -672,9 +672,9 @@ AutoVacLauncherMain(int argc, char *argv[])
/* /*
* There are some conditions that we need to check before trying to * There are some conditions that we need to check before trying to
* start a worker. First, we need to make sure that there is a * start a worker. First, we need to make sure that there is a worker
* worker slot available. Second, we need to make sure that no * slot available. Second, we need to make sure that no other worker
* other worker failed while starting up. * failed while starting up.
*/ */
current_time = GetCurrentTimestamp(); current_time = GetCurrentTimestamp();

View File

@ -2727,6 +2727,7 @@ pgstat_bestart(void)
beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0'; beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0';
beentry->st_progress_command = PROGRESS_COMMAND_INVALID; beentry->st_progress_command = PROGRESS_COMMAND_INVALID;
beentry->st_progress_command_target = InvalidOid; beentry->st_progress_command_target = InvalidOid;
/* /*
* we don't zero st_progress_param here to save cycles; nobody should * we don't zero st_progress_param here to save cycles; nobody should
* examine it until st_progress_command has been set to something other * examine it until st_progress_command has been set to something other

View File

@ -1182,23 +1182,22 @@ PostmasterMain(int argc, char *argv[])
RemovePgTempFiles(); RemovePgTempFiles();
/* /*
* Forcibly remove the files signaling a standby promotion * Forcibly remove the files signaling a standby promotion request.
* request. Otherwise, the existence of those files triggers * Otherwise, the existence of those files triggers a promotion too early,
* a promotion too early, whether a user wants that or not. * whether a user wants that or not.
* *
* This removal of files is usually unnecessary because they * This removal of files is usually unnecessary because they can exist
* can exist only during a few moments during a standby * only during a few moments during a standby promotion. However there is
* promotion. However there is a race condition: if pg_ctl promote * a race condition: if pg_ctl promote is executed and creates the files
* is executed and creates the files during a promotion, * during a promotion, the files can stay around even after the server is
* the files can stay around even after the server is brought up * brought up to new master. Then, if new standby starts by using the
* to new master. Then, if new standby starts by using the backup * backup taken from that master, the files can exist at the server
* taken from that master, the files can exist at the server
* startup and should be removed in order to avoid an unexpected * startup and should be removed in order to avoid an unexpected
* promotion. * promotion.
* *
* Note that promotion signal files need to be removed before * Note that promotion signal files need to be removed before the startup
* the startup process is invoked. Because, after that, they can * process is invoked. Because, after that, they can be used by
* be used by postmaster's SIGUSR1 signal handler. * postmaster's SIGUSR1 signal handler.
*/ */
RemovePromoteSignalFiles(); RemovePromoteSignalFiles();
@ -2607,6 +2606,7 @@ pmdie(SIGNAL_ARGS)
if (pmState == PM_RECOVERY) if (pmState == PM_RECOVERY)
{ {
SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER); SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER);
/* /*
* Only startup, bgwriter, walreceiver, possibly bgworkers, * Only startup, bgwriter, walreceiver, possibly bgworkers,
* and/or checkpointer should be active in this state; we just * and/or checkpointer should be active in this state; we just
@ -3074,9 +3074,9 @@ CleanupBackgroundWorker(int pid,
/* /*
* It's possible that this background worker started some OTHER * It's possible that this background worker started some OTHER
* background worker and asked to be notified when that worker * background worker and asked to be notified when that worker started
* started or stopped. If so, cancel any notifications destined * or stopped. If so, cancel any notifications destined for the
* for the now-dead backend. * now-dead backend.
*/ */
if (rw->rw_backend->bgworker_notify) if (rw->rw_backend->bgworker_notify)
BackgroundWorkerStopNotifications(rw->rw_pid); BackgroundWorkerStopNotifications(rw->rw_pid);
@ -5696,9 +5696,8 @@ maybe_start_bgworker(void)
rw->rw_crashed_at = 0; rw->rw_crashed_at = 0;
/* /*
* Allocate and assign the Backend element. Note we * Allocate and assign the Backend element. Note we must do this
* must do this before forking, so that we can handle out of * before forking, so that we can handle out of memory properly.
* memory properly.
*/ */
if (!assign_backendlist_entry(rw)) if (!assign_backendlist_entry(rw))
return; return;

View File

@ -522,7 +522,8 @@ DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
snapshot = SnapBuildGetOrBuildSnapshot(builder, xid); snapshot = SnapBuildGetOrBuildSnapshot(builder, xid);
ReorderBufferQueueMessage(ctx->reorder, xid, snapshot, buf->endptr, ReorderBufferQueueMessage(ctx->reorder, xid, snapshot, buf->endptr,
message->transactional, message->transactional,
message->message, /* first part of message is prefix */ message->message, /* first part of message is
* prefix */
message->message_size, message->message_size,
message->message + message->prefix_size); message->message + message->prefix_size);
} }

View File

@ -1836,10 +1836,10 @@ ReorderBufferImmediateInvalidation(ReorderBuffer *rb, uint32 ninvalidations,
BeginInternalSubTransaction("replay"); BeginInternalSubTransaction("replay");
/* /*
* Force invalidations to happen outside of a valid transaction - that * Force invalidations to happen outside of a valid transaction - that way
* way entries will just be marked as invalid without accessing the * entries will just be marked as invalid without accessing the catalog.
* catalog. That's advantageous because we don't need to setup the * That's advantageous because we don't need to setup the full state
* full state necessary for catalog access. * necessary for catalog access.
*/ */
if (use_subtxn) if (use_subtxn)
AbortCurrentTransaction(); AbortCurrentTransaction();
@ -2543,7 +2543,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
change->data.msg.prefix = MemoryContextAlloc(rb->context, change->data.msg.prefix = MemoryContextAlloc(rb->context,
prefix_size); prefix_size);
memcpy(change->data.msg.prefix, data, prefix_size); memcpy(change->data.msg.prefix, data, prefix_size);
Assert(change->data.msg.prefix[prefix_size-1] == '\0'); Assert(change->data.msg.prefix[prefix_size - 1] == '\0');
data += prefix_size; data += prefix_size;
/* read the messsage */ /* read the messsage */

View File

@ -230,11 +230,11 @@ ReplicationSlotCreate(const char *name, bool db_specific,
ReplicationSlotValidateName(name, ERROR); ReplicationSlotValidateName(name, ERROR);
/* /*
* If some other backend ran this code concurrently with us, we'd likely both * If some other backend ran this code concurrently with us, we'd likely
* allocate the same slot, and that would be bad. We'd also be at risk of * both allocate the same slot, and that would be bad. We'd also be at
* missing a name collision. Also, we don't want to try to create a new * risk of missing a name collision. Also, we don't want to try to create
* slot while somebody's busy cleaning up an old one, because we might * a new slot while somebody's busy cleaning up an old one, because we
* both be monkeying with the same directory. * might both be monkeying with the same directory.
*/ */
LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE); LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
@ -533,6 +533,7 @@ void
ReplicationSlotMarkDirty(void) ReplicationSlotMarkDirty(void)
{ {
ReplicationSlot *slot = MyReplicationSlot; ReplicationSlot *slot = MyReplicationSlot;
Assert(MyReplicationSlot != NULL); Assert(MyReplicationSlot != NULL);
SpinLockAcquire(&slot->mutex); SpinLockAcquire(&slot->mutex);

View File

@ -212,8 +212,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
/* /*
* If a wait for synchronous replication is pending, we can neither * If a wait for synchronous replication is pending, we can neither
* acknowledge the commit nor raise ERROR or FATAL. The latter would * acknowledge the commit nor raise ERROR or FATAL. The latter would
* lead the client to believe that the transaction aborted, which * lead the client to believe that the transaction aborted, which is
* is not true: it's already committed locally. The former is no good * not true: it's already committed locally. The former is no good
* either: the client has requested synchronous replication, and is * either: the client has requested synchronous replication, and is
* entitled to assume that an acknowledged commit is also replicated, * entitled to assume that an acknowledged commit is also replicated,
* which might not be true. So in this case we issue a WARNING (which * which might not be true. So in this case we issue a WARNING (which
@ -400,8 +400,8 @@ SyncRepReleaseWaiters(void)
/* /*
* If this WALSender is serving a standby that is not on the list of * If this WALSender is serving a standby that is not on the list of
* potential sync standbys then we have nothing to do. If we are still * potential sync standbys then we have nothing to do. If we are still
* starting up, still running base backup or the current flush position * starting up, still running base backup or the current flush position is
* is still invalid, then leave quickly also. * still invalid, then leave quickly also.
*/ */
if (MyWalSnd->sync_standby_priority == 0 || if (MyWalSnd->sync_standby_priority == 0 ||
MyWalSnd->state < WALSNDSTATE_STREAMING || MyWalSnd->state < WALSNDSTATE_STREAMING ||
@ -412,21 +412,21 @@ SyncRepReleaseWaiters(void)
} }
/* /*
* We're a potential sync standby. Release waiters if there are * We're a potential sync standby. Release waiters if there are enough
* enough sync standbys and we are considered as sync. * sync standbys and we are considered as sync.
*/ */
LWLockAcquire(SyncRepLock, LW_EXCLUSIVE); LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
/* /*
* Check whether we are a sync standby or not, and calculate * Check whether we are a sync standby or not, and calculate the oldest
* the oldest positions among all sync standbys. * positions among all sync standbys.
*/ */
got_oldest = SyncRepGetOldestSyncRecPtr(&writePtr, &flushPtr, got_oldest = SyncRepGetOldestSyncRecPtr(&writePtr, &flushPtr,
&applyPtr, &am_sync); &applyPtr, &am_sync);
/* /*
* If we are managing a sync standby, though we weren't * If we are managing a sync standby, though we weren't prior to this,
* prior to this, then announce we are now a sync standby. * then announce we are now a sync standby.
*/ */
if (announce_next_takeover && am_sync) if (announce_next_takeover && am_sync)
{ {
@ -513,10 +513,10 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
} }
/* /*
* Scan through all sync standbys and calculate the oldest * Scan through all sync standbys and calculate the oldest Write, Flush
* Write, Flush and Apply positions. * and Apply positions.
*/ */
foreach (cell, sync_standbys) foreach(cell, sync_standbys)
{ {
WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)]; WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
XLogRecPtr write; XLogRecPtr write;
@ -562,8 +562,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
int priority; int priority;
int i; int i;
bool am_in_pending = false; bool am_in_pending = false;
volatile WalSnd *walsnd; /* Use volatile pointer to prevent volatile WalSnd *walsnd; /* Use volatile pointer to prevent code
* code rearrangement */ * rearrangement */
/* Set default result */ /* Set default result */
if (am_sync != NULL) if (am_sync != NULL)
@ -577,9 +577,9 @@ SyncRepGetSyncStandbys(bool *am_sync)
next_highest_priority = lowest_priority + 1; next_highest_priority = lowest_priority + 1;
/* /*
* Find the sync standbys which have the highest priority (i.e, 1). * Find the sync standbys which have the highest priority (i.e, 1). Also
* Also store all the other potential sync standbys into the pending list, * store all the other potential sync standbys into the pending list, in
* in order to scan it later and find other sync standbys from it quickly. * order to scan it later and find other sync standbys from it quickly.
*/ */
for (i = 0; i < max_wal_senders; i++) for (i = 0; i < max_wal_senders; i++)
{ {
@ -603,9 +603,9 @@ SyncRepGetSyncStandbys(bool *am_sync)
continue; continue;
/* /*
* If the priority is equal to 1, consider this standby as sync * If the priority is equal to 1, consider this standby as sync and
* and append it to the result. Otherwise append this standby * append it to the result. Otherwise append this standby to the
* to the pending list to check if it's actually sync or not later. * pending list to check if it's actually sync or not later.
*/ */
if (this_priority == 1) if (this_priority == 1)
{ {
@ -626,10 +626,10 @@ SyncRepGetSyncStandbys(bool *am_sync)
/* /*
* Track the highest priority among the standbys in the pending * Track the highest priority among the standbys in the pending
* list, in order to use it as the starting priority for later scan * list, in order to use it as the starting priority for later
* of the list. This is useful to find quickly the sync standbys * scan of the list. This is useful to find quickly the sync
* from the pending list later because we can skip unnecessary * standbys from the pending list later because we can skip
* scans for the unused priorities. * unnecessary scans for the unused priorities.
*/ */
if (this_priority < next_highest_priority) if (this_priority < next_highest_priority)
next_highest_priority = this_priority; next_highest_priority = this_priority;
@ -685,8 +685,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
/* /*
* We should always exit here after the scan of pending list * We should always exit here after the scan of pending list
* starts because we know that the list has enough elements * starts because we know that the list has enough elements to
* to reach SyncRepConfig->num_sync. * reach SyncRepConfig->num_sync.
*/ */
if (list_length(result) == SyncRepConfig->num_sync) if (list_length(result) == SyncRepConfig->num_sync)
{ {
@ -695,8 +695,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
} }
/* /*
* Remove the entry for this sync standby from the list * Remove the entry for this sync standby from the list to
* to prevent us from looking at the same entry again. * prevent us from looking at the same entry again.
*/ */
pending = list_delete_cell(pending, cell, prev); pending = list_delete_cell(pending, cell, prev);

View File

@ -475,8 +475,8 @@ WalReceiverMain(void)
/* /*
* The recovery process has asked us to send apply * The recovery process has asked us to send apply
* feedback now. Make sure the flag is really set to * feedback now. Make sure the flag is really set to
* false in shared memory before sending the reply, * false in shared memory before sending the reply, so
* so we don't miss a new request for a reply. * we don't miss a new request for a reply.
*/ */
walrcv->force_reply = false; walrcv->force_reply = false;
pg_memory_barrier(); pg_memory_barrier();
@ -1379,8 +1379,8 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS)
if (!superuser()) if (!superuser())
{ {
/* /*
* Only superusers can see details. Other users only get the pid * Only superusers can see details. Other users only get the pid value
* value to know whether it is a WAL receiver, but no details. * to know whether it is a WAL receiver, but no details.
*/ */
MemSet(&nulls[1], true, PG_STAT_GET_WAL_RECEIVER_COLS - 1); MemSet(&nulls[1], true, PG_STAT_GET_WAL_RECEIVER_COLS - 1);
} }

View File

@ -414,8 +414,8 @@ DefineQueryRewrite(char *rulename,
* any triggers, indexes, child tables, policies, or RLS enabled. * any triggers, indexes, child tables, policies, or RLS enabled.
* (Note: these tests are too strict, because they will reject * (Note: these tests are too strict, because they will reject
* relations that once had such but don't anymore. But we don't * relations that once had such but don't anymore. But we don't
* really care, because this whole business of converting relations * really care, because this whole business of converting relations to
* to views is just a kluge to allow dump/reload of views that * views is just a kluge to allow dump/reload of views that
* participate in circular dependencies.) * participate in circular dependencies.)
*/ */
if (event_relation->rd_rel->relkind != RELKIND_VIEW && if (event_relation->rd_rel->relkind != RELKIND_VIEW &&

View File

@ -170,22 +170,24 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
* visibility of records) associated with multiple command types (see * visibility of records) associated with multiple command types (see
* specific cases below). * specific cases below).
* *
* When considering the order in which to apply these USING policies, * When considering the order in which to apply these USING policies, we
* we prefer to apply higher privileged policies, those which allow the * prefer to apply higher privileged policies, those which allow the user
* user to lock records (UPDATE and DELETE), first, followed by policies * to lock records (UPDATE and DELETE), first, followed by policies which
* which don't (SELECT). * don't (SELECT).
* *
* Note that the optimizer is free to push down and reorder quals which * Note that the optimizer is free to push down and reorder quals which
* use leakproof functions. * use leakproof functions.
* *
* In all cases, if there are no policy clauses allowing access to rows in * In all cases, if there are no policy clauses allowing access to rows in
* the table for the specific type of operation, then a single always-false * the table for the specific type of operation, then a single
* clause (a default-deny policy) will be added (see add_security_quals). * always-false clause (a default-deny policy) will be added (see
* add_security_quals).
*/ */
/* /*
* For a SELECT, if UPDATE privileges are required (eg: the user has * For a SELECT, if UPDATE privileges are required (eg: the user has
* specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals first. * specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals
* first.
* *
* This way, we filter out any records from the SELECT FOR SHARE/UPDATE * This way, we filter out any records from the SELECT FOR SHARE/UPDATE
* which the user does not have access to via the UPDATE USING policies, * which the user does not have access to via the UPDATE USING policies,
@ -232,8 +234,8 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
* a WHERE clause which involves columns from the relation), we collect up * a WHERE clause which involves columns from the relation), we collect up
* CMD_SELECT policies and add them via add_security_quals first. * CMD_SELECT policies and add them via add_security_quals first.
* *
* This way, we filter out any records which are not visible through an ALL * This way, we filter out any records which are not visible through an
* or SELECT USING policy. * ALL or SELECT USING policy.
*/ */
if ((commandType == CMD_UPDATE || commandType == CMD_DELETE) && if ((commandType == CMD_UPDATE || commandType == CMD_DELETE) &&
rte->requiredPerms & ACL_SELECT) rte->requiredPerms & ACL_SELECT)
@ -272,9 +274,9 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
hasSubLinks); hasSubLinks);
/* /*
* Get and add ALL/SELECT policies, if SELECT rights are required * Get and add ALL/SELECT policies, if SELECT rights are required for
* for this relation (eg: when RETURNING is used). These are added as * this relation (eg: when RETURNING is used). These are added as WCO
* WCO policies rather than security quals to ensure that an error is * policies rather than security quals to ensure that an error is
* raised if a policy is violated; otherwise, we might end up silently * raised if a policy is violated; otherwise, we might end up silently
* dropping rows to be added. * dropping rows to be added.
*/ */
@ -324,11 +326,11 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
hasSubLinks); hasSubLinks);
/* /*
* Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK * Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK WCOs
* WCOs to ensure they are considered when taking the UPDATE * to ensure they are considered when taking the UPDATE path of an
* path of an INSERT .. ON CONFLICT DO UPDATE, if SELECT * INSERT .. ON CONFLICT DO UPDATE, if SELECT rights are required
* rights are required for this relation, also as WCO policies, * for this relation, also as WCO policies, again, to avoid
* again, to avoid silently dropping data. See above. * silently dropping data. See above.
*/ */
if (rte->requiredPerms & ACL_SELECT) if (rte->requiredPerms & ACL_SELECT)
{ {
@ -427,8 +429,8 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id,
} }
/* /*
* Add this policy to the list of permissive policies if it * Add this policy to the list of permissive policies if it applies to
* applies to the specified role. * the specified role.
*/ */
if (cmd_matches && check_role_for_policy(policy->roles, user_id)) if (cmd_matches && check_role_for_policy(policy->roles, user_id))
*permissive_policies = lappend(*permissive_policies, policy); *permissive_policies = lappend(*permissive_policies, policy);
@ -498,6 +500,7 @@ sort_policies_by_name(List *policies)
foreach(item, policies) foreach(item, policies)
{ {
RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item); RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item);
pols[ii++] = *policy; pols[ii++] = *policy;
} }
@ -551,8 +554,8 @@ add_security_quals(int rt_index,
Expr *rowsec_expr; Expr *rowsec_expr;
/* /*
* First collect up the permissive quals. If we do not find any permissive * First collect up the permissive quals. If we do not find any
* policies then no rows are visible (this is handled below). * permissive policies then no rows are visible (this is handled below).
*/ */
foreach(item, permissive_policies) foreach(item, permissive_policies)
{ {
@ -577,8 +580,8 @@ add_security_quals(int rt_index,
/* /*
* We now know that permissive policies exist, so we can now add * We now know that permissive policies exist, so we can now add
* security quals based on the USING clauses from the restrictive * security quals based on the USING clauses from the restrictive
* policies. Since these need to be "AND"d together, we can * policies. Since these need to be "AND"d together, we can just add
* just add them one at a time. * them one at a time.
*/ */
foreach(item, restrictive_policies) foreach(item, restrictive_policies)
{ {
@ -608,6 +611,7 @@ add_security_quals(int rt_index,
*securityQuals = list_append_unique(*securityQuals, rowsec_expr); *securityQuals = list_append_unique(*securityQuals, rowsec_expr);
} }
else else
/* /*
* A permissive policy must exist for rows to be visible at all. * A permissive policy must exist for rows to be visible at all.
* Therefore, if there were no permissive policies found, return a * Therefore, if there were no permissive policies found, return a
@ -668,11 +672,11 @@ add_with_check_options(Relation rel,
} }
/* /*
* There must be at least one permissive qual found or no rows are * There must be at least one permissive qual found or no rows are allowed
* allowed to be added. This is the same as in add_security_quals. * to be added. This is the same as in add_security_quals.
* *
* If there are no permissive_quals then we fall through and return a single * If there are no permissive_quals then we fall through and return a
* 'false' WCO, preventing all new rows. * single 'false' WCO, preventing all new rows.
*/ */
if (permissive_quals != NIL) if (permissive_quals != NIL)
{ {

View File

@ -187,11 +187,12 @@ BufferShmemSize(void)
/* /*
* It would be nice to include the I/O locks in the BufferDesc, but that * It would be nice to include the I/O locks in the BufferDesc, but that
* would increase the size of a BufferDesc to more than one cache line, and * would increase the size of a BufferDesc to more than one cache line,
* benchmarking has shown that keeping every BufferDesc aligned on a cache * and benchmarking has shown that keeping every BufferDesc aligned on a
* line boundary is important for performance. So, instead, the array of * cache line boundary is important for performance. So, instead, the
* I/O locks is allocated in a separate tranche. Because those locks are * array of I/O locks is allocated in a separate tranche. Because those
* not highly contentended, we lay out the array with minimal padding. * locks are not highly contentended, we lay out the array with minimal
* padding.
*/ */
size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded))); size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
/* to allow aligning the above */ /* to allow aligning the above */

View File

@ -219,9 +219,9 @@ UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum,
fsm_update_recursive(rel, addr, new_cat); fsm_update_recursive(rel, addr, new_cat);
/* /*
* Get the last block number on this FSM page. If that's greater * Get the last block number on this FSM page. If that's greater than
* than or equal to our endBlkNum, we're done. Otherwise, advance * or equal to our endBlkNum, we're done. Otherwise, advance to the
* to the first block on the next page. * first block on the next page.
*/ */
lastBlkOnPage = fsm_get_lastblckno(rel, addr); lastBlkOnPage = fsm_get_lastblckno(rel, addr);
if (lastBlkOnPage >= endBlkNum) if (lastBlkOnPage >= endBlkNum)
@ -841,8 +841,8 @@ fsm_get_lastblckno(Relation rel, FSMAddress addr)
int slot; int slot;
/* /*
* Get the last slot number on the given address and convert that to * Get the last slot number on the given address and convert that to block
* block number * number
*/ */
slot = SlotsPerFSMPage - 1; slot = SlotsPerFSMPage - 1;
return fsm_get_heap_blk(addr, slot); return fsm_get_heap_blk(addr, slot);
@ -862,8 +862,8 @@ fsm_update_recursive(Relation rel, FSMAddress addr, uint8 new_cat)
return; return;
/* /*
* Get the parent page and our slot in the parent page, and * Get the parent page and our slot in the parent page, and update the
* update the information in that. * information in that.
*/ */
parent = fsm_get_parent(addr, &parentslot); parent = fsm_get_parent(addr, &parentslot);
fsm_set_and_search(rel, parent, parentslot, new_cat, 0); fsm_set_and_search(rel, parent, parentslot, new_cat, 0);

View File

@ -245,8 +245,8 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
} }
/* /*
* OK, the control segment looks basically valid, so we can use it to * OK, the control segment looks basically valid, so we can use it to get
* get a list of segments that need to be removed. * a list of segments that need to be removed.
*/ */
nitems = old_control->nitems; nitems = old_control->nitems;
for (i = 0; i < nitems; ++i) for (i = 0; i < nitems; ++i)

View File

@ -642,8 +642,8 @@ ProcArrayInitRecovery(TransactionId initializedUptoXID)
Assert(TransactionIdIsNormal(initializedUptoXID)); Assert(TransactionIdIsNormal(initializedUptoXID));
/* /*
* we set latestObservedXid to the xid SUBTRANS has been initialized up to, * we set latestObservedXid to the xid SUBTRANS has been initialized up
* so we can extend it from that point onwards in * to, so we can extend it from that point onwards in
* RecordKnownAssignedTransactionIds, and when we get consistent in * RecordKnownAssignedTransactionIds, and when we get consistent in
* ProcArrayApplyRecoveryInfo(). * ProcArrayApplyRecoveryInfo().
*/ */
@ -2591,8 +2591,8 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid)
/* /*
* We ignore an invalid pxmin because this means that backend has * We ignore an invalid pxmin because this means that backend has
* no snapshot currently. We hold a Share lock to avoid contention * no snapshot currently. We hold a Share lock to avoid contention
* with users taking snapshots. That is not a problem because * with users taking snapshots. That is not a problem because the
* the current xmin is always at least one higher than the latest * current xmin is always at least one higher than the latest
* removed xid, so any new snapshot would never conflict with the * removed xid, so any new snapshot would never conflict with the
* test here. * test here.
*/ */

View File

@ -370,6 +370,7 @@ ResolveRecoveryConflictWithLock(LOCKTAG locktag)
* We're already behind, so clear a path as quickly as possible. * We're already behind, so clear a path as quickly as possible.
*/ */
VirtualTransactionId *backends; VirtualTransactionId *backends;
backends = GetLockConflicts(&locktag, AccessExclusiveLock); backends = GetLockConflicts(&locktag, AccessExclusiveLock);
ResolveRecoveryConflictWithVirtualXIDs(backends, ResolveRecoveryConflictWithVirtualXIDs(backends,
PROCSIG_RECOVERY_CONFLICT_LOCK); PROCSIG_RECOVERY_CONFLICT_LOCK);

View File

@ -1153,13 +1153,13 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
uint32 partition = LockHashPartition(hashcode); uint32 partition = LockHashPartition(hashcode);
/* /*
* It might seem unsafe to access proclock->groupLeader without a lock, * It might seem unsafe to access proclock->groupLeader without a
* but it's not really. Either we are initializing a proclock on our * lock, but it's not really. Either we are initializing a proclock
* own behalf, in which case our group leader isn't changing because * on our own behalf, in which case our group leader isn't changing
* the group leader for a process can only ever be changed by the * because the group leader for a process can only ever be changed by
* process itself; or else we are transferring a fast-path lock to the * the process itself; or else we are transferring a fast-path lock to
* main lock table, in which case that process can't change it's lock * the main lock table, in which case that process can't change it's
* group leader without first releasing all of its locks (and in * lock group leader without first releasing all of its locks (and in
* particular the one we are currently transferring). * particular the one we are currently transferring).
*/ */
proclock->groupLeader = proc->lockGroupLeader != NULL ? proclock->groupLeader = proc->lockGroupLeader != NULL ?
@ -1319,10 +1319,9 @@ LockCheckConflicts(LockMethod lockMethodTable,
} }
/* /*
* Rats. Something conflicts. But it could still be my own lock, or * Rats. Something conflicts. But it could still be my own lock, or a
* a lock held by another member of my locking group. First, figure out * lock held by another member of my locking group. First, figure out how
* how many conflicts remain after subtracting out any locks I hold * many conflicts remain after subtracting out any locks I hold myself.
* myself.
*/ */
myLocks = proclock->holdMask; myLocks = proclock->holdMask;
for (i = 1; i <= numLockModes; i++) for (i = 1; i <= numLockModes; i++)
@ -1357,9 +1356,10 @@ LockCheckConflicts(LockMethod lockMethodTable,
/* /*
* Locks held in conflicting modes by members of our own lock group are * Locks held in conflicting modes by members of our own lock group are
* not real conflicts; we can subtract those out and see if we still have * not real conflicts; we can subtract those out and see if we still have
* a conflict. This is O(N) in the number of processes holding or awaiting * a conflict. This is O(N) in the number of processes holding or
* locks on this object. We could improve that by making the shared memory * awaiting locks on this object. We could improve that by making the
* state more complex (and larger) but it doesn't seem worth it. * shared memory state more complex (and larger) but it doesn't seem worth
* it.
*/ */
procLocks = &(lock->procLocks); procLocks = &(lock->procLocks);
otherproclock = (PROCLOCK *) otherproclock = (PROCLOCK *)
@ -2583,8 +2583,8 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
* *
* proc->databaseId is set at backend startup time and never changes * proc->databaseId is set at backend startup time and never changes
* thereafter, so it might be safe to perform this test before * thereafter, so it might be safe to perform this test before
* acquiring &proc->backendLock. In particular, it's certainly safe to * acquiring &proc->backendLock. In particular, it's certainly safe
* assume that if the target backend holds any fast-path locks, it * to assume that if the target backend holds any fast-path locks, it
* must have performed a memory-fencing operation (in particular, an * must have performed a memory-fencing operation (in particular, an
* LWLock acquisition) since setting proc->databaseId. However, it's * LWLock acquisition) since setting proc->databaseId. However, it's
* less clear that our backend is certain to have performed a memory * less clear that our backend is certain to have performed a memory

View File

@ -760,8 +760,8 @@ GetLWLockIdentifier(uint8 classId, uint16 eventId)
/* /*
* It is quite possible that user has registered tranche in one of the * It is quite possible that user has registered tranche in one of the
* backends (e.g. by allocating lwlocks in dynamic shared memory) but * backends (e.g. by allocating lwlocks in dynamic shared memory) but not
* not all of them, so we can't assume the tranche is registered here. * all of them, so we can't assume the tranche is registered here.
*/ */
if (eventId >= LWLockTranchesAllocated || if (eventId >= LWLockTranchesAllocated ||
LWLockTrancheArray[eventId]->name == NULL) LWLockTrancheArray[eventId]->name == NULL)

View File

@ -288,7 +288,7 @@ InitProcGlobal(void)
void void
InitProcess(void) InitProcess(void)
{ {
PGPROC * volatile * procgloballist; PGPROC *volatile * procgloballist;
/* /*
* ProcGlobal should be set up already (if we are a backend, we inherit * ProcGlobal should be set up already (if we are a backend, we inherit
@ -342,8 +342,8 @@ InitProcess(void)
MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno]; MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
/* /*
* Cross-check that the PGPROC is of the type we expect; if this were * Cross-check that the PGPROC is of the type we expect; if this were not
* not the case, it would get returned to the wrong list. * the case, it would get returned to the wrong list.
*/ */
Assert(MyProc->procgloballist == procgloballist); Assert(MyProc->procgloballist == procgloballist);
@ -781,7 +781,7 @@ static void
ProcKill(int code, Datum arg) ProcKill(int code, Datum arg)
{ {
PGPROC *proc; PGPROC *proc;
PGPROC * volatile * procgloballist; PGPROC *volatile * procgloballist;
Assert(MyProc != NULL); Assert(MyProc != NULL);

View File

@ -358,7 +358,7 @@ getNextFlagFromString(IspellDict *Conf, char **sflagset, char *sflag)
maxstep = (Conf->flagMode == FM_LONG) ? 2 : 1; maxstep = (Conf->flagMode == FM_LONG) ? 2 : 1;
while(**sflagset) while (**sflagset)
{ {
switch (Conf->flagMode) switch (Conf->flagMode)
{ {
@ -527,6 +527,7 @@ NIImportDictionary(IspellDict *Conf, const char *filename)
{ {
char *s, char *s,
*pstr; *pstr;
/* Set of affix flags */ /* Set of affix flags */
const char *flag; const char *flag;
@ -620,9 +621,9 @@ FindWord(IspellDict *Conf, const char *word, char *affixflag, int flag)
if (flag == 0) if (flag == 0)
{ {
/* /*
* The word can be formed only with another word. * The word can be formed only with another word. And
* And in the flag parameter there is not a sign * in the flag parameter there is not a sign that we
* that we search compound words. * search compound words.
*/ */
if (StopMiddle->compoundflag & FF_COMPOUNDONLY) if (StopMiddle->compoundflag & FF_COMPOUNDONLY)
return 0; return 0;
@ -671,7 +672,7 @@ FindWord(IspellDict *Conf, const char *word, char *affixflag, int flag)
* type: FF_SUFFIX or FF_PREFIX. * type: FF_SUFFIX or FF_PREFIX.
*/ */
static void static void
NIAddAffix(IspellDict *Conf, const char* flag, char flagflags, const char *mask, NIAddAffix(IspellDict *Conf, const char *flag, char flagflags, const char *mask,
const char *find, const char *repl, int type) const char *find, const char *repl, int type)
{ {
AFFIX *Affix; AFFIX *Affix;
@ -1161,9 +1162,10 @@ getAffixFlagSet(IspellDict *Conf, char *s)
errmsg("invalid affix alias \"%s\"", s))); errmsg("invalid affix alias \"%s\"", s)));
if (curaffix > 0 && curaffix <= Conf->nAffixData) if (curaffix > 0 && curaffix <= Conf->nAffixData)
/* /*
* Do not subtract 1 from curaffix * Do not subtract 1 from curaffix because empty string was added
* because empty string was added in NIImportOOAffixes * in NIImportOOAffixes
*/ */
return Conf->AffixData[curaffix]; return Conf->AffixData[curaffix];
else else
@ -1597,6 +1599,7 @@ static uint32
makeCompoundFlags(IspellDict *Conf, int affix) makeCompoundFlags(IspellDict *Conf, int affix)
{ {
char *str = Conf->AffixData[affix]; char *str = Conf->AffixData[affix];
return (getCompoundAffixFlagValue(Conf, str) & FF_COMPOUNDFLAGMASK); return (getCompoundAffixFlagValue(Conf, str) & FF_COMPOUNDFLAGMASK);
} }
@ -1700,8 +1703,8 @@ NISortDictionary(IspellDict *Conf)
/* compress affixes */ /* compress affixes */
/* /*
* If we use flag aliases then we need to use Conf->AffixData filled * If we use flag aliases then we need to use Conf->AffixData filled in
* in the NIImportOOAffixes(). * the NIImportOOAffixes().
*/ */
if (Conf->useFlagAliases) if (Conf->useFlagAliases)
{ {

View File

@ -295,8 +295,8 @@ pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval,
while (count < prs.curwords) while (count < prs.curwords)
{ {
/* /*
* Were any stop words removed? If so, fill empty positions * Were any stop words removed? If so, fill empty positions with
* with placeholders linked by an appropriate operator. * placeholders linked by an appropriate operator.
*/ */
if (pos > 0 && pos + 1 < prs.words[count].pos.pos) if (pos > 0 && pos + 1 < prs.words[count].pos.pos)
{ {

View File

@ -267,6 +267,7 @@ datumEstimateSpace(Datum value, bool isnull, bool typByVal, int typLen)
else if (VARATT_IS_EXTERNAL_EXPANDED(value)) else if (VARATT_IS_EXTERNAL_EXPANDED(value))
{ {
ExpandedObjectHeader *eoh = DatumGetEOHP(value); ExpandedObjectHeader *eoh = DatumGetEOHP(value);
sz += EOH_get_flat_size(eoh); sz += EOH_get_flat_size(eoh);
} }
else else

View File

@ -254,7 +254,7 @@ contain2D(RangeBox *range_box, Range *query)
/* Can any rectangle from rect_box contain this argument? */ /* Can any rectangle from rect_box contain this argument? */
static bool static bool
contain4D(RectBox *rect_box, RangeBox * query) contain4D(RectBox *rect_box, RangeBox *query)
{ {
return contain2D(&rect_box->range_box_x, &query->left) && return contain2D(&rect_box->range_box_x, &query->left) &&
contain2D(&rect_box->range_box_y, &query->right); contain2D(&rect_box->range_box_y, &query->right);
@ -442,8 +442,8 @@ spg_box_quad_picksplit(PG_FUNCTION_ARGS)
out->leafTupleDatums = palloc(sizeof(Datum) * in->nTuples); out->leafTupleDatums = palloc(sizeof(Datum) * in->nTuples);
/* /*
* Assign ranges to corresponding nodes according to quadrants * Assign ranges to corresponding nodes according to quadrants relative to
* relative to the "centroid" range * the "centroid" range
*/ */
for (i = 0; i < in->nTuples; i++) for (i = 0; i < in->nTuples; i++)
{ {
@ -484,8 +484,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
} }
/* /*
* We are saving the traversal value or initialize it an unbounded * We are saving the traversal value or initialize it an unbounded one, if
* one, if we have just begun to walk the tree. * we have just begun to walk the tree.
*/ */
if (in->traversalValue) if (in->traversalValue)
rect_box = in->traversalValue; rect_box = in->traversalValue;
@ -493,8 +493,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
rect_box = initRectBox(); rect_box = initRectBox();
/* /*
* We are casting the prefix and queries to RangeBoxes for ease of * We are casting the prefix and queries to RangeBoxes for ease of the
* the following operations. * following operations.
*/ */
centroid = getRangeBox(DatumGetBoxP(in->prefixDatum)); centroid = getRangeBox(DatumGetBoxP(in->prefixDatum));
queries = (RangeBox **) palloc(in->nkeys * sizeof(RangeBox *)); queries = (RangeBox **) palloc(in->nkeys * sizeof(RangeBox *));
@ -507,9 +507,9 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes); out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes);
/* /*
* We switch memory context, because we want to allocate memory for * We switch memory context, because we want to allocate memory for new
* new traversal values (next_rect_box) and pass these pieces of * traversal values (next_rect_box) and pass these pieces of memory to
* memory to further call of this function. * further call of this function.
*/ */
old_ctx = MemoryContextSwitchTo(in->traversalMemoryContext); old_ctx = MemoryContextSwitchTo(in->traversalMemoryContext);
@ -587,8 +587,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
else else
{ {
/* /*
* If this node is not selected, we don't need to keep * If this node is not selected, we don't need to keep the next
* the next traversal value in the memory context. * traversal value in the memory context.
*/ */
pfree(next_rect_box); pfree(next_rect_box);
} }

View File

@ -1305,7 +1305,7 @@ compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar)
case jbvBool: case jbvBool:
if (aScalar->val.boolean == bScalar->val.boolean) if (aScalar->val.boolean == bScalar->val.boolean)
return 0; return 0;
else if (aScalar->val.boolean >bScalar->val.boolean) else if (aScalar->val.boolean > bScalar->val.boolean)
return 1; return 1;
else else
return -1; return -1;

View File

@ -1002,8 +1002,8 @@ get_array_start(void *state)
{ {
/* /*
* Special case: we should match the entire array. We only need this * Special case: we should match the entire array. We only need this
* at the outermost level because at nested levels the match will * at the outermost level because at nested levels the match will have
* have been started by the outer field or array element callback. * been started by the outer field or array element callback.
*/ */
_state->result_start = _state->lex->token_start; _state->result_start = _state->lex->token_start;
} }
@ -3368,9 +3368,9 @@ jsonb_concat(PG_FUNCTION_ARGS)
*it2; *it2;
/* /*
* If one of the jsonb is empty, just return the other if it's not * If one of the jsonb is empty, just return the other if it's not scalar
* scalar and both are of the same kind. If it's a scalar or they are * and both are of the same kind. If it's a scalar or they are of
* of different kinds we need to perform the concatenation even if one is * different kinds we need to perform the concatenation even if one is
* empty. * empty.
*/ */
if (JB_ROOT_IS_OBJECT(jb1) == JB_ROOT_IS_OBJECT(jb2)) if (JB_ROOT_IS_OBJECT(jb1) == JB_ROOT_IS_OBJECT(jb2))
@ -3481,7 +3481,7 @@ jsonb_delete_idx(PG_FUNCTION_ARGS)
it = JsonbIteratorInit(&in->root); it = JsonbIteratorInit(&in->root);
r = JsonbIteratorNext(&it, &v, false); r = JsonbIteratorNext(&it, &v, false);
Assert (r == WJB_BEGIN_ARRAY); Assert(r == WJB_BEGIN_ARRAY);
n = v.val.array.nElems; n = v.val.array.nElems;
if (idx < 0) if (idx < 0)
@ -3868,8 +3868,8 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
if (level == path_len - 1) if (level == path_len - 1)
{ {
/* /*
* called from jsonb_insert(), it forbids redefining * called from jsonb_insert(), it forbids redefining an
* an existsing value * existsing value
*/ */
if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_INSERT_AFTER)) if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_INSERT_AFTER))
ereport(ERROR, ereport(ERROR,
@ -4005,8 +4005,8 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
/* /*
* We should keep current value only in case of * We should keep current value only in case of
* JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER * JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER because
* because otherwise it should be deleted or replaced * otherwise it should be deleted or replaced
*/ */
if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_INSERT_BEFORE)) if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_INSERT_BEFORE))
(void) pushJsonbValue(st, r, &v); (void) pushJsonbValue(st, r, &v);

View File

@ -3397,8 +3397,8 @@ numeric_combine(PG_FUNCTION_ARGS)
state1->NaNcount += state2->NaNcount; state1->NaNcount += state2->NaNcount;
/* /*
* These are currently only needed for moving aggregates, but let's * These are currently only needed for moving aggregates, but let's do
* do the right thing anyway... * the right thing anyway...
*/ */
if (state2->maxScale > state1->maxScale) if (state2->maxScale > state1->maxScale)
{ {
@ -3485,8 +3485,8 @@ numeric_avg_combine(PG_FUNCTION_ARGS)
state1->NaNcount += state2->NaNcount; state1->NaNcount += state2->NaNcount;
/* /*
* These are currently only needed for moving aggregates, but let's * These are currently only needed for moving aggregates, but let's do
* do the right thing anyway... * the right thing anyway...
*/ */
if (state2->maxScale > state1->maxScale) if (state2->maxScale > state1->maxScale)
{ {

View File

@ -613,14 +613,14 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS)
if (has_privs_of_role(GetUserId(), beentry->st_userid)) if (has_privs_of_role(GetUserId(), beentry->st_userid))
{ {
values[2] = ObjectIdGetDatum(beentry->st_progress_command_target); values[2] = ObjectIdGetDatum(beentry->st_progress_command_target);
for(i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++) for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++)
values[i+3] = Int64GetDatum(beentry->st_progress_param[i]); values[i + 3] = Int64GetDatum(beentry->st_progress_param[i]);
} }
else else
{ {
nulls[2] = true; nulls[2] = true;
for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++) for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++)
nulls[i+3] = true; nulls[i + 3] = true;
} }
tuplestore_putvalues(tupstore, tupdesc, values, nulls); tuplestore_putvalues(tupstore, tupdesc, values, nulls);

View File

@ -762,11 +762,12 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
{ {
Datum previousCentroid; Datum previousCentroid;
/* We know, that in->prefixDatum in this place is varlena, /*
* We know, that in->prefixDatum in this place is varlena,
* because it's range * because it's range
*/ */
previousCentroid = datumCopy(in->prefixDatum, false, -1); previousCentroid = datumCopy(in->prefixDatum, false, -1);
out->traversalValues[out->nNodes] = (void *)previousCentroid; out->traversalValues[out->nNodes] = (void *) previousCentroid;
} }
out->nodeNumbers[out->nNodes] = i - 1; out->nodeNumbers[out->nNodes] = i - 1;
out->nNodes++; out->nNodes++;

View File

@ -184,8 +184,8 @@ checkcondition_gin_internal(GinChkVal *gcv, QueryOperand *val, ExecPhraseData *d
int j; int j;
/* /*
* if any val requiring a weight is used or caller * if any val requiring a weight is used or caller needs position
* needs position information then set recheck flag * information then set recheck flag
*/ */
if (val->weight != 0 || data != NULL) if (val->weight != 0 || data != NULL)
*gcv->need_recheck = true; *gcv->need_recheck = true;
@ -236,9 +236,10 @@ TS_execute_ternary(GinChkVal *gcv, QueryItem *curitem)
return !result; return !result;
case OP_PHRASE: case OP_PHRASE:
/* /*
* GIN doesn't contain any information about positions, * GIN doesn't contain any information about positions, treat
* treat OP_PHRASE as OP_AND with recheck requirement * OP_PHRASE as OP_AND with recheck requirement
*/ */
*gcv->need_recheck = true; *gcv->need_recheck = true;
/* FALL THRU */ /* FALL THRU */

View File

@ -136,7 +136,7 @@ parse_phrase_operator(char *buf, int16 *distance)
while (*ptr) while (*ptr)
{ {
switch(state) switch (state)
{ {
case PHRASE_OPEN: case PHRASE_OPEN:
Assert(t_iseq(ptr, '<')); Assert(t_iseq(ptr, '<'));
@ -192,7 +192,7 @@ parse_phrase_operator(char *buf, int16 *distance)
} }
} }
err: err:
*distance = -1; *distance = -1;
return buf; return buf;
} }
@ -696,8 +696,8 @@ parse_tsquery(char *buf,
findoprnd(ptr, query->size, &needcleanup); findoprnd(ptr, query->size, &needcleanup);
/* /*
* QI_VALSTOP nodes should be cleaned and * QI_VALSTOP nodes should be cleaned and and OP_PHRASE should be pushed
* and OP_PHRASE should be pushed down * down
*/ */
if (needcleanup) if (needcleanup)
return cleanup_fakeval_and_phrase(query); return cleanup_fakeval_and_phrase(query);
@ -852,7 +852,8 @@ infix(INFIX *in, int parentPriority)
in->curpol++; in->curpol++;
if (priority < parentPriority || if (priority < parentPriority ||
(op == OP_PHRASE && (op == OP_PHRASE &&
(priority == parentPriority || /* phrases are not commutative! */ (priority == parentPriority || /* phrases are not
* commutative! */
parentPriority == OP_PRIORITY(OP_AND)))) parentPriority == OP_PRIORITY(OP_AND))))
{ {
needParenthesis = true; needParenthesis = true;
@ -874,7 +875,7 @@ infix(INFIX *in, int parentPriority)
infix(in, priority); infix(in, priority);
/* print operator & right operand */ /* print operator & right operand */
RESIZEBUF(in, 3 + (2 + 10 /* distance */) + (nrm.cur - nrm.buf)); RESIZEBUF(in, 3 + (2 + 10 /* distance */ ) + (nrm.cur - nrm.buf));
switch (op) switch (op)
{ {
case OP_OR: case OP_OR:
@ -923,7 +924,7 @@ tsqueryout(PG_FUNCTION_ARGS)
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen); nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
*(nrm.cur) = '\0'; *(nrm.cur) = '\0';
nrm.op = GETOPERAND(query); nrm.op = GETOPERAND(query);
infix(&nrm, -1 /* lowest priority */); infix(&nrm, -1 /* lowest priority */ );
PG_FREE_IF_COPY(query, 0); PG_FREE_IF_COPY(query, 0);
PG_RETURN_CSTRING(nrm.buf); PG_RETURN_CSTRING(nrm.buf);

View File

@ -257,7 +257,9 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
else else
{ {
NODE *res = node; NODE *res = node;
int ndistance, ldistance = 0, rdistance = 0; int ndistance,
ldistance = 0,
rdistance = 0;
ndistance = (node->valnode->qoperator.oper == OP_PHRASE) ? ndistance = (node->valnode->qoperator.oper == OP_PHRASE) ?
node->valnode->qoperator.distance : node->valnode->qoperator.distance :
@ -272,8 +274,8 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
ndistance ? &rdistance : NULL); ndistance ? &rdistance : NULL);
/* /*
* ndistance, ldistance and rdistance are greater than zero * ndistance, ldistance and rdistance are greater than zero if their
* if their corresponding nodes are OP_PHRASE * corresponding nodes are OP_PHRASE
*/ */
if (lresult == V_STOP && rresult == V_STOP) if (lresult == V_STOP && rresult == V_STOP)
@ -287,9 +289,10 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
else if (lresult == V_STOP) else if (lresult == V_STOP)
{ {
res = node->right; res = node->right;
/* /*
* propagate distance from current node to the * propagate distance from current node to the right upper
* right upper subtree. * subtree.
*/ */
if (adddistance && ndistance) if (adddistance && ndistance)
*adddistance = rdistance; *adddistance = rdistance;
@ -298,6 +301,7 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
else if (rresult == V_STOP) else if (rresult == V_STOP)
{ {
res = node->left; res = node->left;
/* /*
* propagate distance from current node to the upper tree. * propagate distance from current node to the upper tree.
*/ */
@ -417,8 +421,8 @@ normalize_phrase_tree(NODE *node)
return node; return node;
/* /*
* We can't swap left-right and works only with left child * We can't swap left-right and works only with left child because of
* because of a <-> b != b <-> a * a <-> b != b <-> a
*/ */
distance = node->valnode->qoperator.distance; distance = node->valnode->qoperator.distance;
@ -464,7 +468,7 @@ normalize_phrase_tree(NODE *node)
/* no-op */ /* no-op */
break; break;
default: default:
elog(ERROR,"Wrong type of tsquery node: %d", elog(ERROR, "Wrong type of tsquery node: %d",
node->right->valnode->qoperator.oper); node->right->valnode->qoperator.oper);
} }
} }
@ -476,7 +480,7 @@ normalize_phrase_tree(NODE *node)
* if the node is still OP_PHRASE, check the left subtree, * if the node is still OP_PHRASE, check the left subtree,
* otherwise the whole node will be transformed later. * otherwise the whole node will be transformed later.
*/ */
switch(node->left->valnode->qoperator.oper) switch (node->left->valnode->qoperator.oper)
{ {
case OP_AND: case OP_AND:
/* (a & b) <-> c => (a <-> c) & (b <-> c) */ /* (a & b) <-> c => (a <-> c) & (b <-> c) */
@ -515,7 +519,7 @@ normalize_phrase_tree(NODE *node)
/* no-op */ /* no-op */
break; break;
default: default:
elog(ERROR,"Wrong type of tsquery node: %d", elog(ERROR, "Wrong type of tsquery node: %d",
node->left->valnode->qoperator.oper); node->left->valnode->qoperator.oper);
} }
} }

View File

@ -498,12 +498,16 @@ ts_rank_tt(PG_FUNCTION_ARGS)
typedef struct typedef struct
{ {
union { union
struct { /* compiled doc representation */ {
struct
{ /* compiled doc representation */
QueryItem **items; QueryItem **items;
int16 nitem; int16 nitem;
} query; } query;
struct { /* struct is used for preparing doc representation */ struct
{ /* struct is used for preparing doc
* representation */
QueryItem *item; QueryItem *item;
WordEntry *entry; WordEntry *entry;
} map; } map;
@ -537,8 +541,8 @@ compareDocR(const void *va, const void *vb)
typedef struct typedef struct
{ {
bool operandexists; bool operandexists;
bool reverseinsert; /* indicates insert order, bool reverseinsert; /* indicates insert order, true means
true means descending order */ * descending order */
uint32 npos; uint32 npos;
WordEntryPos pos[MAXQROPOS]; WordEntryPos pos[MAXQROPOS];
} QueryRepresentationOperand; } QueryRepresentationOperand;
@ -586,7 +590,7 @@ resetQueryRepresentation(QueryRepresentation *qr, bool reverseinsert)
{ {
int i; int i;
for(i = 0; i < qr->query->size; i++) for (i = 0; i < qr->query->size; i++)
{ {
qr->operandData[i].operandexists = false; qr->operandData[i].operandexists = false;
qr->operandData[i].reverseinsert = reverseinsert; qr->operandData[i].reverseinsert = reverseinsert;
@ -731,8 +735,8 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen)
doc = (DocRepresentation *) palloc(sizeof(DocRepresentation) * len); doc = (DocRepresentation *) palloc(sizeof(DocRepresentation) * len);
/* /*
* Iterate through query to make DocRepresentaion for words and it's entries * Iterate through query to make DocRepresentaion for words and it's
* satisfied by query * entries satisfied by query
*/ */
for (i = 0; i < qr->query->size; i++) for (i = 0; i < qr->query->size; i++)
{ {
@ -806,8 +810,8 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen)
while (rptr - doc < cur) while (rptr - doc < cur)
{ {
if (rptr->pos == (rptr-1)->pos && if (rptr->pos == (rptr - 1)->pos &&
rptr->data.map.entry == (rptr-1)->data.map.entry) rptr->data.map.entry == (rptr - 1)->data.map.entry)
{ {
storage.data.query.items[storage.data.query.nitem] = rptr->data.map.item; storage.data.query.items[storage.data.query.nitem] = rptr->data.map.item;
storage.data.query.nitem++; storage.data.query.nitem++;

View File

@ -276,16 +276,20 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
switch (char_weight) switch (char_weight)
{ {
case 'A': case 'a': case 'A':
case 'a':
weight = 3; weight = 3;
break; break;
case 'B': case 'b': case 'B':
case 'b':
weight = 2; weight = 2;
break; break;
case 'C': case 'c': case 'C':
case 'c':
weight = 1; weight = 1;
break; break;
case 'D': case 'd': case 'D':
case 'd':
weight = 0; weight = 0;
break; break;
default: default:
@ -301,9 +305,9 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
&dlexemes, &nulls, &nlexemes); &dlexemes, &nulls, &nlexemes);
/* /*
* Assuming that lexemes array is significantly shorter than tsvector * Assuming that lexemes array is significantly shorter than tsvector we
* we can iterate through lexemes performing binary search * can iterate through lexemes performing binary search of each lexeme
* of each lexeme from lexemes in tsvector. * from lexemes in tsvector.
*/ */
for (i = 0; i < nlexemes; i++) for (i = 0; i < nlexemes; i++)
{ {
@ -323,6 +327,7 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
if (lex_pos >= 0 && (j = POSDATALEN(tsout, entry + lex_pos)) != 0) if (lex_pos >= 0 && (j = POSDATALEN(tsout, entry + lex_pos)) != 0)
{ {
WordEntryPos *p = POSDATAPTR(tsout, entry + lex_pos); WordEntryPos *p = POSDATAPTR(tsout, entry + lex_pos);
while (j--) while (j--)
{ {
WEP_SETWEIGHT(*p, weight); WEP_SETWEIGHT(*p, weight);
@ -393,7 +398,7 @@ tsvector_bsearch(const TSVector tsv, char *lexeme, int lexeme_len)
while (StopLow < StopHigh) while (StopLow < StopHigh)
{ {
StopMiddle = (StopLow + StopHigh)/2; StopMiddle = (StopLow + StopHigh) / 2;
cmp = tsCompareString(lexeme, lexeme_len, cmp = tsCompareString(lexeme, lexeme_len,
STRPTR(tsv) + arrin[StopMiddle].pos, STRPTR(tsv) + arrin[StopMiddle].pos,
@ -440,13 +445,15 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
*arrout; *arrout;
char *data = STRPTR(tsv), char *data = STRPTR(tsv),
*dataout; *dataout;
int i, j, k, int i,
j,
k,
curoff; curoff;
/* /*
* Here we overestimates tsout size, since we don't know exact size * Here we overestimates tsout size, since we don't know exact size
* occupied by positions and weights. We will set exact size later * occupied by positions and weights. We will set exact size later after a
* after a pass through TSVector. * pass through TSVector.
*/ */
tsout = (TSVector) palloc0(VARSIZE(tsv)); tsout = (TSVector) palloc0(VARSIZE(tsv));
arrout = ARRPTR(tsout); arrout = ARRPTR(tsout);
@ -465,10 +472,11 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
{ {
/* /*
* Here we should check whether current i is present in * Here we should check whether current i is present in
* indices_to_delete or not. Since indices_to_delete is already * indices_to_delete or not. Since indices_to_delete is already sorted
* sorted we can advance it index only when we have match. * we can advance it index only when we have match.
*/ */
if (k < indices_count && i == indices_to_delete[k]){ if (k < indices_count && i == indices_to_delete[k])
{
k++; k++;
continue; continue;
} }
@ -481,8 +489,9 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
curoff += arrin[i].len; curoff += arrin[i].len;
if (arrin[i].haspos) if (arrin[i].haspos)
{ {
int len = POSDATALEN(tsv, arrin+i) * sizeof(WordEntryPos) + int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos) +
sizeof(uint16); sizeof(uint16);
curoff = SHORTALIGN(curoff); curoff = SHORTALIGN(curoff);
memcpy(dataout + curoff, memcpy(dataout + curoff,
STRPTR(tsv) + SHORTALIGN(arrin[i].pos + arrin[i].len), STRPTR(tsv) + SHORTALIGN(arrin[i].pos + arrin[i].len),
@ -494,9 +503,10 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
} }
/* /*
* After the pass through TSVector k should equals exactly to indices_count. * After the pass through TSVector k should equals exactly to
* If it isn't then the caller provided us with indices outside of * indices_count. If it isn't then the caller provided us with indices
* [0, tsv->size) range and estimation of tsout's size is wrong. * outside of [0, tsv->size) range and estimation of tsout's size is
* wrong.
*/ */
Assert(k == indices_count); Assert(k == indices_count);
@ -538,7 +548,8 @@ tsvector_delete_arr(PG_FUNCTION_ARGS)
TSVector tsin = PG_GETARG_TSVECTOR(0), TSVector tsin = PG_GETARG_TSVECTOR(0),
tsout; tsout;
ArrayType *lexemes = PG_GETARG_ARRAYTYPE_P(1); ArrayType *lexemes = PG_GETARG_ARRAYTYPE_P(1);
int i, nlex, int i,
nlex,
skip_count, skip_count,
*skip_indices; *skip_indices;
Datum *dlexemes; Datum *dlexemes;
@ -548,8 +559,8 @@ tsvector_delete_arr(PG_FUNCTION_ARGS)
&dlexemes, &nulls, &nlex); &dlexemes, &nulls, &nlex);
/* /*
* In typical use case array of lexemes to delete is relatively small. * In typical use case array of lexemes to delete is relatively small. So
* So here we optimizing things for that scenario: iterate through lexarr * here we optimizing things for that scenario: iterate through lexarr
* performing binary search of each lexeme from lexarr in tsvector. * performing binary search of each lexeme from lexarr in tsvector.
*/ */
skip_indices = palloc0(nlex * sizeof(int)); skip_indices = palloc0(nlex * sizeof(int));
@ -641,8 +652,8 @@ tsvector_unnest(PG_FUNCTION_ARGS)
/* /*
* Internally tsvector stores position and weight in the same * Internally tsvector stores position and weight in the same
* uint16 (2 bits for weight, 14 for position). Here we extract that * uint16 (2 bits for weight, 14 for position). Here we extract
* in two separate arrays. * that in two separate arrays.
*/ */
posv = _POSVECPTR(tsin, arrin + i); posv = _POSVECPTR(tsin, arrin + i);
positions = palloc(posv->npos * sizeof(Datum)); positions = palloc(posv->npos * sizeof(Datum));
@ -772,7 +783,8 @@ tsvector_filter(PG_FUNCTION_ARGS)
Datum *dweights; Datum *dweights;
bool *nulls; bool *nulls;
int nweights; int nweights;
int i, j; int i,
j;
int cur_pos = 0; int cur_pos = 0;
char mask = 0; char mask = 0;
@ -791,16 +803,20 @@ tsvector_filter(PG_FUNCTION_ARGS)
char_weight = DatumGetChar(dweights[i]); char_weight = DatumGetChar(dweights[i]);
switch (char_weight) switch (char_weight)
{ {
case 'A': case 'a': case 'A':
case 'a':
mask = mask | 8; mask = mask | 8;
break; break;
case 'B': case 'b': case 'B':
case 'b':
mask = mask | 4; mask = mask | 4;
break; break;
case 'C': case 'c': case 'C':
case 'c':
mask = mask | 2; mask = mask | 2;
break; break;
case 'D': case 'd': case 'D':
case 'd':
mask = mask | 1; mask = mask | 1;
break; break;
default: default:
@ -846,7 +862,7 @@ tsvector_filter(PG_FUNCTION_ARGS)
memcpy(dataout + cur_pos, datain + arrin[i].pos, arrin[i].len); memcpy(dataout + cur_pos, datain + arrin[i].pos, arrin[i].len);
posvout->npos = npos; posvout->npos = npos;
cur_pos += SHORTALIGN(arrin[i].len); cur_pos += SHORTALIGN(arrin[i].len);
cur_pos += POSDATALEN(tsout, arrout+j) * sizeof(WordEntryPos) + cur_pos += POSDATALEN(tsout, arrout + j) * sizeof(WordEntryPos) +
sizeof(uint16); sizeof(uint16);
j++; j++;
} }
@ -1276,6 +1292,7 @@ checkcondition_str(void *checkval, QueryOperand *val, ExecPhraseData *data)
WordEntryPos *allpos = NULL; WordEntryPos *allpos = NULL;
int npos = 0, int npos = 0,
totalpos = 0; totalpos = 0;
/* /*
* there was a failed exact search, so we should scan further to find * there was a failed exact search, so we should scan further to find
* a prefix match. We also need to do so if caller needs position info * a prefix match. We also need to do so if caller needs position info
@ -1371,22 +1388,24 @@ TS_phrase_execute(QueryItem *curitem,
return false; return false;
/* /*
* if at least one of the operands has no position * if at least one of the operands has no position information,
* information, fallback to AND operation. * fallback to AND operation.
*/ */
if (Ldata.npos == 0 || Rdata.npos == 0) if (Ldata.npos == 0 || Rdata.npos == 0)
return true; return true;
/* /*
* Result of the operation is a list of the * Result of the operation is a list of the corresponding positions of
* corresponding positions of RIGHT operand. * RIGHT operand.
*/ */
if (data) if (data)
{ {
if (!Rdata.allocated) if (!Rdata.allocated)
/* /*
* OP_PHRASE is based on the OP_AND, so the number of resulting * OP_PHRASE is based on the OP_AND, so the number of
* positions could not be greater than the total amount of operands. * resulting positions could not be greater than the total
* amount of operands.
*/ */
data->pos = palloc(sizeof(WordEntryPos) * Min(Ldata.npos, Rdata.npos)); data->pos = palloc(sizeof(WordEntryPos) * Min(Ldata.npos, Rdata.npos));
else else
@ -1439,8 +1458,8 @@ TS_phrase_execute(QueryItem *curitem,
else else
{ {
/* /*
* Go to the next Rpos, because Lpos * Go to the next Rpos, because Lpos is ahead of the
* is ahead of the current Rpos * current Rpos
*/ */
break; break;
} }
@ -1484,7 +1503,7 @@ TS_execute(QueryItem *curitem, void *checkval, bool calcnot,
if (curitem->type == QI_VAL) if (curitem->type == QI_VAL)
return chkcond(checkval, (QueryOperand *) curitem, return chkcond(checkval, (QueryOperand *) curitem,
NULL /* we don't need position info */); NULL /* we don't need position info */ );
switch (curitem->qoperator.oper) switch (curitem->qoperator.oper)
{ {
@ -1546,6 +1565,7 @@ tsquery_requires_match(QueryItem *curitem)
return false; return false;
case OP_PHRASE: case OP_PHRASE:
/* /*
* Treat OP_PHRASE as OP_AND here * Treat OP_PHRASE as OP_AND here
*/ */

Some files were not shown because too many files have changed in this diff Show More