mirror of
https://github.com/postgres/postgres.git
synced 2025-05-15 00:02:24 -04:00
pgindent run for 9.6
This commit is contained in:
parent
9164deea2f
commit
4bc424b968
@ -209,8 +209,8 @@ static void
|
||||
explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||
{
|
||||
/*
|
||||
* For rate sampling, randomly choose top-level statement. Either
|
||||
* all nested statements will be explained or none will.
|
||||
* For rate sampling, randomly choose top-level statement. Either all
|
||||
* nested statements will be explained or none will.
|
||||
*/
|
||||
if (auto_explain_log_min_duration >= 0 && nesting_level == 0)
|
||||
current_query_sampled = (random() < auto_explain_sample_rate *
|
||||
|
@ -33,8 +33,8 @@ PG_MODULE_MAGIC;
|
||||
typedef struct
|
||||
{
|
||||
BloomState blstate; /* bloom index state */
|
||||
MemoryContext tmpCtx; /* temporary memory context reset after
|
||||
* each tuple */
|
||||
MemoryContext tmpCtx; /* temporary memory context reset after each
|
||||
* tuple */
|
||||
char data[BLCKSZ]; /* cached page */
|
||||
int64 count; /* number of tuples in cached page */
|
||||
} BloomBuildState;
|
||||
@ -140,8 +140,8 @@ blbuild(Relation heap, Relation index, IndexInfo *indexInfo)
|
||||
bloomBuildCallback, (void *) &buildstate);
|
||||
|
||||
/*
|
||||
* There are could be some items in cached page. Flush this page
|
||||
* if needed.
|
||||
* There are could be some items in cached page. Flush this page if
|
||||
* needed.
|
||||
*/
|
||||
if (buildstate.count > 0)
|
||||
flushCachedPage(index, &buildstate);
|
||||
|
@ -33,10 +33,9 @@ typedef struct BloomPageOpaqueData
|
||||
{
|
||||
OffsetNumber maxoff; /* number of index tuples on page */
|
||||
uint16 flags; /* see bit definitions below */
|
||||
uint16 unused; /* placeholder to force maxaligning of size
|
||||
* of BloomPageOpaqueData and to place
|
||||
* bloom_page_id exactly at the end of page
|
||||
*/
|
||||
uint16 unused; /* placeholder to force maxaligning of size of
|
||||
* BloomPageOpaqueData and to place
|
||||
* bloom_page_id exactly at the end of page */
|
||||
uint16 bloom_page_id; /* for identification of BLOOM indexes */
|
||||
} BloomPageOpaqueData;
|
||||
|
||||
@ -102,8 +101,8 @@ typedef struct BloomOptions
|
||||
{
|
||||
int32 vl_len_; /* varlena header (do not touch directly!) */
|
||||
int bloomLength; /* length of signature in words (not bits!) */
|
||||
int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for each
|
||||
* index key */
|
||||
int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for
|
||||
* each index key */
|
||||
} BloomOptions;
|
||||
|
||||
/*
|
||||
@ -176,14 +175,14 @@ typedef BloomScanOpaqueData *BloomScanOpaque;
|
||||
/* blutils.c */
|
||||
extern void _PG_init(void);
|
||||
extern Datum blhandler(PG_FUNCTION_ARGS);
|
||||
extern void initBloomState(BloomState * state, Relation index);
|
||||
extern void initBloomState(BloomState *state, Relation index);
|
||||
extern void BloomFillMetapage(Relation index, Page metaPage);
|
||||
extern void BloomInitMetapage(Relation index);
|
||||
extern void BloomInitPage(Page page, uint16 flags);
|
||||
extern Buffer BloomNewBuffer(Relation index);
|
||||
extern void signValue(BloomState * state, BloomSignatureWord * sign, Datum value, int attno);
|
||||
extern BloomTuple *BloomFormTuple(BloomState * state, ItemPointer iptr, Datum *values, bool *isnull);
|
||||
extern bool BloomPageAddItem(BloomState * state, Page page, BloomTuple * tuple);
|
||||
extern void signValue(BloomState *state, BloomSignatureWord *sign, Datum value, int attno);
|
||||
extern BloomTuple *BloomFormTuple(BloomState *state, ItemPointer iptr, Datum *values, bool *isnull);
|
||||
extern bool BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple);
|
||||
|
||||
/* blvalidate.c */
|
||||
extern bool blvalidate(Oid opclassoid);
|
||||
|
@ -37,6 +37,7 @@ PG_FUNCTION_INFO_V1(blhandler);
|
||||
|
||||
/* Kind of relation options for bloom index */
|
||||
static relopt_kind bl_relopt_kind;
|
||||
|
||||
/* parse table for fillRelOptions */
|
||||
static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1];
|
||||
|
||||
@ -215,7 +216,9 @@ myRand(void)
|
||||
* October 1988, p. 1195.
|
||||
*----------
|
||||
*/
|
||||
int32 hi, lo, x;
|
||||
int32 hi,
|
||||
lo,
|
||||
x;
|
||||
|
||||
/* Must be in [1, 0x7ffffffe] range at this point. */
|
||||
hi = next / 127773;
|
||||
|
@ -109,8 +109,8 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
|
||||
OffsetNumberNext(BloomPageGetMaxOffset(page))));
|
||||
|
||||
/*
|
||||
* Add page to notFullPage list if we will not mark page as deleted and
|
||||
* there is a free space on it
|
||||
* Add page to notFullPage list if we will not mark page as deleted
|
||||
* and there is a free space on it
|
||||
*/
|
||||
if (BloomPageGetMaxOffset(page) != 0 &&
|
||||
BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple &&
|
||||
|
@ -444,9 +444,9 @@ ean2ISBN(char *isn)
|
||||
unsigned check;
|
||||
|
||||
/*
|
||||
* The number should come in this format: 978-0-000-00000-0
|
||||
* or may be an ISBN-13 number, 979-..., which does not have a short
|
||||
* representation. Do the short output version if possible.
|
||||
* The number should come in this format: 978-0-000-00000-0 or may be an
|
||||
* ISBN-13 number, 979-..., which does not have a short representation. Do
|
||||
* the short output version if possible.
|
||||
*/
|
||||
if (strncmp("978-", isn, 4) == 0)
|
||||
{
|
||||
|
@ -318,10 +318,10 @@ tuple_data_split_internal(Oid relid, char *tupdata,
|
||||
is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits);
|
||||
|
||||
/*
|
||||
* Tuple header can specify less attributes than tuple descriptor
|
||||
* as ALTER TABLE ADD COLUMN without DEFAULT keyword does not
|
||||
* actually change tuples in pages, so attributes with numbers greater
|
||||
* than (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
|
||||
* Tuple header can specify less attributes than tuple descriptor as
|
||||
* ALTER TABLE ADD COLUMN without DEFAULT keyword does not actually
|
||||
* change tuples in pages, so attributes with numbers greater than
|
||||
* (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
|
||||
*/
|
||||
if (i >= (t_infomask2 & HEAP_NATTS_MASK))
|
||||
is_null = true;
|
||||
@ -334,6 +334,7 @@ tuple_data_split_internal(Oid relid, char *tupdata,
|
||||
{
|
||||
off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1,
|
||||
tupdata + off);
|
||||
|
||||
/*
|
||||
* As VARSIZE_ANY throws an exception if it can't properly
|
||||
* detect the type of external storage in macros VARTAG_SIZE,
|
||||
|
@ -293,7 +293,8 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
|
||||
}
|
||||
|
||||
/*
|
||||
* See comment in gin_trgm_consistent() about * upper bound formula
|
||||
* See comment in gin_trgm_consistent() about * upper bound
|
||||
* formula
|
||||
*/
|
||||
res = (nkeys == 0)
|
||||
? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit)
|
||||
@ -330,9 +331,9 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* As trigramsMatchGraph implements a monotonic boolean function,
|
||||
* promoting all GIN_MAYBE keys to GIN_TRUE will give a
|
||||
* conservative result.
|
||||
* As trigramsMatchGraph implements a monotonic boolean
|
||||
* function, promoting all GIN_MAYBE keys to GIN_TRUE will
|
||||
* give a conservative result.
|
||||
*/
|
||||
boolcheck = (bool *) palloc(sizeof(bool) * nkeys);
|
||||
for (i = 0; i < nkeys; i++)
|
||||
|
@ -296,6 +296,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
|
||||
|
||||
if (GIST_LEAF(entry))
|
||||
{ /* all leafs contains orig trgm */
|
||||
|
||||
/*
|
||||
* Prevent gcc optimizing the tmpsml variable using volatile
|
||||
* keyword. Otherwise comparison of nlimit and tmpsml may give
|
||||
@ -476,12 +477,14 @@ gtrgm_distance(PG_FUNCTION_ARGS)
|
||||
*recheck = strategy == WordDistanceStrategyNumber;
|
||||
if (GIST_LEAF(entry))
|
||||
{ /* all leafs contains orig trgm */
|
||||
|
||||
/*
|
||||
* Prevent gcc optimizing the sml variable using volatile
|
||||
* keyword. Otherwise res can differ from the
|
||||
* word_similarity_dist_op() function.
|
||||
*/
|
||||
float4 volatile sml = cnt_sml(qtrg, key, *recheck);
|
||||
|
||||
res = 1.0 - sml;
|
||||
}
|
||||
else if (ISALLTRUE(key))
|
||||
|
@ -362,7 +362,8 @@ static pos_trgm *
|
||||
make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
|
||||
{
|
||||
pos_trgm *result;
|
||||
int i, len = len1 + len2;
|
||||
int i,
|
||||
len = len1 + len2;
|
||||
|
||||
result = (pos_trgm *) palloc(sizeof(pos_trgm) * len);
|
||||
|
||||
@ -387,8 +388,8 @@ make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
|
||||
static int
|
||||
comp_ptrgm(const void *v1, const void *v2)
|
||||
{
|
||||
const pos_trgm *p1 = (const pos_trgm *)v1;
|
||||
const pos_trgm *p2 = (const pos_trgm *)v2;
|
||||
const pos_trgm *p1 = (const pos_trgm *) v1;
|
||||
const pos_trgm *p2 = (const pos_trgm *) v2;
|
||||
int cmp;
|
||||
|
||||
cmp = CMPTRGM(p1->trg, p2->trg);
|
||||
@ -488,10 +489,11 @@ iterate_word_similarity(int *trg2indexes,
|
||||
lower = tmp_lower;
|
||||
count = tmp_count;
|
||||
}
|
||||
|
||||
/*
|
||||
* if we only check that word similarity is greater than
|
||||
* pg_trgm.word_similarity_threshold we do not need to calculate
|
||||
* a maximum similarity.
|
||||
* pg_trgm.word_similarity_threshold we do not need to
|
||||
* calculate a maximum similarity.
|
||||
*/
|
||||
if (check_only && smlr_cur >= word_similarity_threshold)
|
||||
break;
|
||||
@ -506,6 +508,7 @@ iterate_word_similarity(int *trg2indexes,
|
||||
}
|
||||
|
||||
smlr_max = Max(smlr_max, smlr_cur);
|
||||
|
||||
/*
|
||||
* if we only check that word similarity is greater than
|
||||
* pg_trgm.word_similarity_threshold we do not need to calculate a
|
||||
@ -517,6 +520,7 @@ iterate_word_similarity(int *trg2indexes,
|
||||
for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++)
|
||||
{
|
||||
int tmp_trgindex;
|
||||
|
||||
tmp_trgindex = trg2indexes[tmp_lower];
|
||||
if (lastpos[tmp_trgindex] == tmp_lower)
|
||||
lastpos[tmp_trgindex] = -1;
|
||||
@ -568,8 +572,8 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
|
||||
protect_out_of_mem(slen1 + slen2);
|
||||
|
||||
/* Make positional trigrams */
|
||||
trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) * 3);
|
||||
trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) * 3);
|
||||
trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) *3);
|
||||
trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) *3);
|
||||
|
||||
len1 = generate_trgm_only(trg1, str1, slen1);
|
||||
len2 = generate_trgm_only(trg2, str2, slen2);
|
||||
@ -595,6 +599,7 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
|
||||
if (i > 0)
|
||||
{
|
||||
int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg);
|
||||
|
||||
if (cmp != 0)
|
||||
{
|
||||
if (found[j])
|
||||
|
@ -301,7 +301,7 @@ collect_visibility_data(Oid relid, bool include_pd)
|
||||
rel = relation_open(relid, AccessShareLock);
|
||||
|
||||
nblocks = RelationGetNumberOfBlocks(rel);
|
||||
info = palloc0(offsetof(vbits, bits) + nblocks);
|
||||
info = palloc0(offsetof(vbits, bits) +nblocks);
|
||||
info->next = 0;
|
||||
info->count = nblocks;
|
||||
|
||||
@ -320,8 +320,8 @@ collect_visibility_data(Oid relid, bool include_pd)
|
||||
info->bits[blkno] |= (1 << 1);
|
||||
|
||||
/*
|
||||
* Page-level data requires reading every block, so only get it if
|
||||
* the caller needs it. Use a buffer access strategy, too, to prevent
|
||||
* Page-level data requires reading every block, so only get it if the
|
||||
* caller needs it. Use a buffer access strategy, too, to prevent
|
||||
* cache-trashing.
|
||||
*/
|
||||
if (include_pd)
|
||||
|
@ -675,9 +675,9 @@ pgfdw_xact_callback(XactEvent event, void *arg)
|
||||
/*
|
||||
* If a command has been submitted to the remote server by
|
||||
* using an asynchronous execution function, the command
|
||||
* might not have yet completed. Check to see if a command
|
||||
* is still being processed by the remote server, and if so,
|
||||
* request cancellation of the command.
|
||||
* might not have yet completed. Check to see if a
|
||||
* command is still being processed by the remote server,
|
||||
* and if so, request cancellation of the command.
|
||||
*/
|
||||
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
|
||||
{
|
||||
@ -798,11 +798,11 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
|
||||
entry->have_error = true;
|
||||
|
||||
/*
|
||||
* If a command has been submitted to the remote server by using an
|
||||
* asynchronous execution function, the command might not have yet
|
||||
* completed. Check to see if a command is still being processed by
|
||||
* the remote server, and if so, request cancellation of the
|
||||
* command.
|
||||
* If a command has been submitted to the remote server by using
|
||||
* an asynchronous execution function, the command might not have
|
||||
* yet completed. Check to see if a command is still being
|
||||
* processed by the remote server, and if so, request cancellation
|
||||
* of the command.
|
||||
*/
|
||||
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
|
||||
{
|
||||
|
@ -1583,8 +1583,8 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
|
||||
/*
|
||||
* All other system attributes are fetched as 0, except for table OID,
|
||||
* which is fetched as the local table OID. However, we must be
|
||||
* careful; the table could be beneath an outer join, in which case
|
||||
* it must go to NULL whenever the rest of the row does.
|
||||
* careful; the table could be beneath an outer join, in which case it
|
||||
* must go to NULL whenever the rest of the row does.
|
||||
*/
|
||||
Oid fetchval = 0;
|
||||
|
||||
@ -1633,10 +1633,10 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
|
||||
0 - FirstLowInvalidHeapAttributeNumber);
|
||||
|
||||
/*
|
||||
* In case the whole-row reference is under an outer join then it has to
|
||||
* go NULL whenver the rest of the row goes NULL. Deparsing a join query
|
||||
* would always involve multiple relations, thus qualify_col would be
|
||||
* true.
|
||||
* In case the whole-row reference is under an outer join then it has
|
||||
* to go NULL whenver the rest of the row goes NULL. Deparsing a join
|
||||
* query would always involve multiple relations, thus qualify_col
|
||||
* would be true.
|
||||
*/
|
||||
if (qualify_col)
|
||||
{
|
||||
@ -1652,7 +1652,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
|
||||
|
||||
/* Complete the CASE WHEN statement started above. */
|
||||
if (qualify_col)
|
||||
appendStringInfo(buf," END");
|
||||
appendStringInfo(buf, " END");
|
||||
|
||||
heap_close(rel, NoLock);
|
||||
bms_free(attrs_used);
|
||||
|
@ -135,7 +135,7 @@ postgres_fdw_validator(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int fetch_size;
|
||||
|
||||
fetch_size = strtol(defGetString(def), NULL,10);
|
||||
fetch_size = strtol(defGetString(def), NULL, 10);
|
||||
if (fetch_size <= 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SYNTAX_ERROR),
|
||||
|
@ -4063,19 +4063,20 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
|
||||
|
||||
/*
|
||||
* Pull the other remote conditions from the joining relations into join
|
||||
* clauses or other remote clauses (remote_conds) of this relation wherever
|
||||
* possible. This avoids building subqueries at every join step, which is
|
||||
* not currently supported by the deparser logic.
|
||||
* clauses or other remote clauses (remote_conds) of this relation
|
||||
* wherever possible. This avoids building subqueries at every join step,
|
||||
* which is not currently supported by the deparser logic.
|
||||
*
|
||||
* For an inner join, clauses from both the relations are added to the
|
||||
* other remote clauses. For LEFT and RIGHT OUTER join, the clauses from the
|
||||
* outer side are added to remote_conds since those can be evaluated after
|
||||
* the join is evaluated. The clauses from inner side are added to the
|
||||
* joinclauses, since they need to evaluated while constructing the join.
|
||||
* other remote clauses. For LEFT and RIGHT OUTER join, the clauses from
|
||||
* the outer side are added to remote_conds since those can be evaluated
|
||||
* after the join is evaluated. The clauses from inner side are added to
|
||||
* the joinclauses, since they need to evaluated while constructing the
|
||||
* join.
|
||||
*
|
||||
* For a FULL OUTER JOIN, the other clauses from either relation can not be
|
||||
* added to the joinclauses or remote_conds, since each relation acts as an
|
||||
* outer relation for the other. Consider such full outer join as
|
||||
* For a FULL OUTER JOIN, the other clauses from either relation can not
|
||||
* be added to the joinclauses or remote_conds, since each relation acts
|
||||
* as an outer relation for the other. Consider such full outer join as
|
||||
* unshippable because of the reasons mentioned above in this comment.
|
||||
*
|
||||
* The joining sides can not have local conditions, thus no need to test
|
||||
|
@ -101,7 +101,8 @@ static relopt_int intRelOpts[] =
|
||||
"fillfactor",
|
||||
"Packs table pages only to this percentage",
|
||||
RELOPT_KIND_HEAP,
|
||||
ShareUpdateExclusiveLock /* since it applies only to later inserts */
|
||||
ShareUpdateExclusiveLock /* since it applies only to later
|
||||
* inserts */
|
||||
},
|
||||
HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100
|
||||
},
|
||||
@ -110,7 +111,8 @@ static relopt_int intRelOpts[] =
|
||||
"fillfactor",
|
||||
"Packs btree index pages only to this percentage",
|
||||
RELOPT_KIND_BTREE,
|
||||
ShareUpdateExclusiveLock /* since it applies only to later inserts */
|
||||
ShareUpdateExclusiveLock /* since it applies only to later
|
||||
* inserts */
|
||||
},
|
||||
BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100
|
||||
},
|
||||
@ -119,7 +121,8 @@ static relopt_int intRelOpts[] =
|
||||
"fillfactor",
|
||||
"Packs hash index pages only to this percentage",
|
||||
RELOPT_KIND_HASH,
|
||||
ShareUpdateExclusiveLock /* since it applies only to later inserts */
|
||||
ShareUpdateExclusiveLock /* since it applies only to later
|
||||
* inserts */
|
||||
},
|
||||
HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100
|
||||
},
|
||||
@ -128,7 +131,8 @@ static relopt_int intRelOpts[] =
|
||||
"fillfactor",
|
||||
"Packs gist index pages only to this percentage",
|
||||
RELOPT_KIND_GIST,
|
||||
ShareUpdateExclusiveLock /* since it applies only to later inserts */
|
||||
ShareUpdateExclusiveLock /* since it applies only to later
|
||||
* inserts */
|
||||
},
|
||||
GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100
|
||||
},
|
||||
@ -137,7 +141,8 @@ static relopt_int intRelOpts[] =
|
||||
"fillfactor",
|
||||
"Packs spgist index pages only to this percentage",
|
||||
RELOPT_KIND_SPGIST,
|
||||
ShareUpdateExclusiveLock /* since it applies only to later inserts */
|
||||
ShareUpdateExclusiveLock /* since it applies only to later
|
||||
* inserts */
|
||||
},
|
||||
SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100
|
||||
},
|
||||
|
@ -745,18 +745,17 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
|
||||
bool inVacuum = (stats == NULL);
|
||||
|
||||
/*
|
||||
* We would like to prevent concurrent cleanup process. For
|
||||
* that we will lock metapage in exclusive mode using LockPage()
|
||||
* call. Nobody other will use that lock for metapage, so
|
||||
* we keep possibility of concurrent insertion into pending list
|
||||
* We would like to prevent concurrent cleanup process. For that we will
|
||||
* lock metapage in exclusive mode using LockPage() call. Nobody other
|
||||
* will use that lock for metapage, so we keep possibility of concurrent
|
||||
* insertion into pending list
|
||||
*/
|
||||
|
||||
if (inVacuum)
|
||||
{
|
||||
/*
|
||||
* We are called from [auto]vacuum/analyze or
|
||||
* gin_clean_pending_list() and we would like to wait
|
||||
* concurrent cleanup to finish.
|
||||
* We are called from [auto]vacuum/analyze or gin_clean_pending_list()
|
||||
* and we would like to wait concurrent cleanup to finish.
|
||||
*/
|
||||
LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
|
||||
workMemory =
|
||||
@ -766,9 +765,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* We are called from regular insert and if we see
|
||||
* concurrent cleanup just exit in hope that concurrent
|
||||
* process will clean up pending list.
|
||||
* We are called from regular insert and if we see concurrent cleanup
|
||||
* just exit in hope that concurrent process will clean up pending
|
||||
* list.
|
||||
*/
|
||||
if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock))
|
||||
return;
|
||||
@ -829,9 +828,10 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
|
||||
Assert(!GinPageIsDeleted(page));
|
||||
|
||||
/*
|
||||
* Are we walk through the page which as we remember was a tail when we
|
||||
* start our cleanup? But if caller asks us to clean up whole pending
|
||||
* list then ignore old tail, we will work until list becomes empty.
|
||||
* Are we walk through the page which as we remember was a tail when
|
||||
* we start our cleanup? But if caller asks us to clean up whole
|
||||
* pending list then ignore old tail, we will work until list becomes
|
||||
* empty.
|
||||
*/
|
||||
if (blkno == blknoFinish && full_clean == false)
|
||||
cleanupFinish = true;
|
||||
@ -917,8 +917,8 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
|
||||
* locking */
|
||||
|
||||
/*
|
||||
* remove read pages from pending list, at this point all
|
||||
* content of read pages is in regular structure
|
||||
* remove read pages from pending list, at this point all content
|
||||
* of read pages is in regular structure
|
||||
*/
|
||||
shiftList(index, metabuffer, blkno, fill_fsm, stats);
|
||||
|
||||
@ -961,9 +961,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
|
||||
ReleaseBuffer(metabuffer);
|
||||
|
||||
/*
|
||||
* As pending list pages can have a high churn rate, it is
|
||||
* desirable to recycle them immediately to the FreeSpace Map when
|
||||
* ordinary backends clean the list.
|
||||
* As pending list pages can have a high churn rate, it is desirable to
|
||||
* recycle them immediately to the FreeSpace Map when ordinary backends
|
||||
* clean the list.
|
||||
*/
|
||||
if (fsm_vac && fill_fsm)
|
||||
IndexFreeSpaceMapVacuum(index);
|
||||
|
@ -281,7 +281,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values,
|
||||
&htup->t_self);
|
||||
|
||||
/* If we've maxed out our available memory, dump everything to the index */
|
||||
if (buildstate->accum.allocatedMemory >= (Size)maintenance_work_mem * 1024L)
|
||||
if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L)
|
||||
{
|
||||
ItemPointerData *list;
|
||||
Datum key;
|
||||
|
@ -540,8 +540,10 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
|
||||
{
|
||||
/* Yes, so initialize stats to zeroes */
|
||||
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
|
||||
|
||||
/*
|
||||
* and cleanup any pending inserts */
|
||||
* and cleanup any pending inserts
|
||||
*/
|
||||
ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),
|
||||
false, stats);
|
||||
}
|
||||
|
@ -1499,7 +1499,8 @@ gistvacuumpage(Relation rel, Page page, Buffer buffer)
|
||||
{
|
||||
OffsetNumber deletable[MaxIndexTuplesPerPage];
|
||||
int ndeletable = 0;
|
||||
OffsetNumber offnum, maxoff;
|
||||
OffsetNumber offnum,
|
||||
maxoff;
|
||||
|
||||
Assert(GistPageIsLeaf(page));
|
||||
|
||||
|
@ -57,10 +57,11 @@ gistkillitems(IndexScanDesc scan)
|
||||
page = BufferGetPage(buffer);
|
||||
|
||||
/*
|
||||
* If page LSN differs it means that the page was modified since the last read.
|
||||
* killedItems could be not valid so LP_DEAD hints applying is not safe.
|
||||
* If page LSN differs it means that the page was modified since the last
|
||||
* read. killedItems could be not valid so LP_DEAD hints applying is not
|
||||
* safe.
|
||||
*/
|
||||
if(PageGetLSN(page) != so->curPageLSN)
|
||||
if (PageGetLSN(page) != so->curPageLSN)
|
||||
{
|
||||
UnlockReleaseBuffer(buffer);
|
||||
so->numKilled = 0; /* reset counter */
|
||||
@ -70,8 +71,8 @@ gistkillitems(IndexScanDesc scan)
|
||||
Assert(GistPageIsLeaf(page));
|
||||
|
||||
/*
|
||||
* Mark all killedItems as dead. We need no additional recheck,
|
||||
* because, if page was modified, pageLSN must have changed.
|
||||
* Mark all killedItems as dead. We need no additional recheck, because,
|
||||
* if page was modified, pageLSN must have changed.
|
||||
*/
|
||||
for (i = 0; i < so->numKilled; i++)
|
||||
{
|
||||
@ -400,10 +401,11 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
|
||||
* If the scan specifies not to return killed tuples, then we treat a
|
||||
* killed tuple as not passing the qual.
|
||||
*/
|
||||
if(scan->ignore_killed_tuples && ItemIdIsDead(iid))
|
||||
if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
|
||||
continue;
|
||||
|
||||
it = (IndexTuple) PageGetItem(page, iid);
|
||||
|
||||
/*
|
||||
* Must call gistindex_keytest in tempCxt, and clean up any leftover
|
||||
* junk afterward.
|
||||
|
@ -230,8 +230,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
|
||||
ScanKey skey = scan->keyData + i;
|
||||
|
||||
/*
|
||||
* Copy consistent support function to ScanKey structure
|
||||
* instead of function implementing filtering operator.
|
||||
* Copy consistent support function to ScanKey structure instead
|
||||
* of function implementing filtering operator.
|
||||
*/
|
||||
fmgr_info_copy(&(skey->sk_func),
|
||||
&(so->giststate->consistentFn[skey->sk_attno - 1]),
|
||||
@ -303,8 +303,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
|
||||
so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid);
|
||||
|
||||
/*
|
||||
* Copy distance support function to ScanKey structure
|
||||
* instead of function implementing ordering operator.
|
||||
* Copy distance support function to ScanKey structure instead of
|
||||
* function implementing ordering operator.
|
||||
*/
|
||||
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
|
||||
|
||||
|
@ -191,10 +191,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
|
||||
return;
|
||||
|
||||
/*
|
||||
* It might seem like multiplying the number of lock waiters by as much
|
||||
* as 20 is too aggressive, but benchmarking revealed that smaller numbers
|
||||
* were insufficient. 512 is just an arbitrary cap to prevent pathological
|
||||
* results.
|
||||
* It might seem like multiplying the number of lock waiters by as much as
|
||||
* 20 is too aggressive, but benchmarking revealed that smaller numbers
|
||||
* were insufficient. 512 is just an arbitrary cap to prevent
|
||||
* pathological results.
|
||||
*/
|
||||
extraBlocks = Min(512, lockWaiters * 20);
|
||||
|
||||
@ -225,10 +225,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
|
||||
}
|
||||
|
||||
/*
|
||||
* Updating the upper levels of the free space map is too expensive
|
||||
* to do for every block, but it's worth doing once at the end to make
|
||||
* sure that subsequent insertion activity sees all of those nifty free
|
||||
* pages we just inserted.
|
||||
* Updating the upper levels of the free space map is too expensive to do
|
||||
* for every block, but it's worth doing once at the end to make sure that
|
||||
* subsequent insertion activity sees all of those nifty free pages we
|
||||
* just inserted.
|
||||
*
|
||||
* Note that we're using the freespace value that was reported for the
|
||||
* last block we added as if it were the freespace value for every block
|
||||
@ -547,8 +547,8 @@ loop:
|
||||
}
|
||||
|
||||
/*
|
||||
* In addition to whatever extension we performed above, we always add
|
||||
* at least one block to satisfy our own request.
|
||||
* In addition to whatever extension we performed above, we always add at
|
||||
* least one block to satisfy our own request.
|
||||
*
|
||||
* XXX This does an lseek - rather expensive - but at the moment it is the
|
||||
* only way to accurately determine how many blocks are in a relation. Is
|
||||
|
@ -291,7 +291,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
|
||||
elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
|
||||
|
||||
page = BufferGetPage(vmBuf);
|
||||
map = (uint8 *)PageGetContents(page);
|
||||
map = (uint8 *) PageGetContents(page);
|
||||
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))
|
||||
|
@ -395,7 +395,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
|
||||
* Check for a conflict-in as we would if we were going to
|
||||
* write to this page. We aren't actually going to write,
|
||||
* but we want a chance to report SSI conflicts that would
|
||||
* otherwise be masked by this unique constraint violation.
|
||||
* otherwise be masked by this unique constraint
|
||||
* violation.
|
||||
*/
|
||||
CheckForSerializableConflictIn(rel, NULL, buf);
|
||||
|
||||
|
@ -813,8 +813,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
|
||||
|
||||
/*
|
||||
* Check to see if we need to issue one final WAL record for this index,
|
||||
* which may be needed for correctness on a hot standby node when
|
||||
* non-MVCC index scans could take place.
|
||||
* which may be needed for correctness on a hot standby node when non-MVCC
|
||||
* index scans could take place.
|
||||
*
|
||||
* If the WAL is replayed in hot standby, the replay process needs to get
|
||||
* cleanup locks on all index leaf pages, just as we've been doing here.
|
||||
@ -1025,13 +1025,13 @@ restart:
|
||||
if (ndeletable > 0)
|
||||
{
|
||||
/*
|
||||
* Notice that the issued XLOG_BTREE_VACUUM WAL record includes all
|
||||
* information to the replay code to allow it to get a cleanup lock
|
||||
* on all pages between the previous lastBlockVacuumed and this page.
|
||||
* This ensures that WAL replay locks all leaf pages at some point,
|
||||
* which is important should non-MVCC scans be requested.
|
||||
* This is currently unused on standby, but we record it anyway, so
|
||||
* that the WAL contains the required information.
|
||||
* Notice that the issued XLOG_BTREE_VACUUM WAL record includes
|
||||
* all information to the replay code to allow it to get a cleanup
|
||||
* lock on all pages between the previous lastBlockVacuumed and
|
||||
* this page. This ensures that WAL replay locks all leaf pages at
|
||||
* some point, which is important should non-MVCC scans be
|
||||
* requested. This is currently unused on standby, but we record
|
||||
* it anyway, so that the WAL contains the required information.
|
||||
*
|
||||
* Since we can visit leaf pages out-of-order when recursing,
|
||||
* replay might end up locking such pages an extra time, but it
|
||||
|
@ -392,15 +392,15 @@ btree_xlog_vacuum(XLogReaderState *record)
|
||||
xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
|
||||
|
||||
/*
|
||||
* This section of code is thought to be no longer needed, after
|
||||
* analysis of the calling paths. It is retained to allow the code
|
||||
* to be reinstated if a flaw is revealed in that thinking.
|
||||
* This section of code is thought to be no longer needed, after analysis
|
||||
* of the calling paths. It is retained to allow the code to be reinstated
|
||||
* if a flaw is revealed in that thinking.
|
||||
*
|
||||
* If we are running non-MVCC scans using this index we need to do some
|
||||
* additional work to ensure correctness, which is known as a "pin scan"
|
||||
* described in more detail in next paragraphs. We used to do the extra
|
||||
* work in all cases, whereas we now avoid that work in most cases.
|
||||
* If lastBlockVacuumed is set to InvalidBlockNumber then we skip the
|
||||
* work in all cases, whereas we now avoid that work in most cases. If
|
||||
* lastBlockVacuumed is set to InvalidBlockNumber then we skip the
|
||||
* additional work required for the pin scan.
|
||||
*
|
||||
* Avoiding this extra work is important since it requires us to touch
|
||||
|
@ -153,9 +153,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
|
||||
* No-op if the module is not active.
|
||||
*
|
||||
* An unlocked read here is fine, because in a standby (the only place
|
||||
* where the flag can change in flight) this routine is only called by
|
||||
* the recovery process, which is also the only process which can change
|
||||
* the flag.
|
||||
* where the flag can change in flight) this routine is only called by the
|
||||
* recovery process, which is also the only process which can change the
|
||||
* flag.
|
||||
*/
|
||||
if (!commitTsShared->commitTsActive)
|
||||
return;
|
||||
@ -767,8 +767,8 @@ ExtendCommitTs(TransactionId newestXact)
|
||||
int pageno;
|
||||
|
||||
/*
|
||||
* Nothing to do if module not enabled. Note we do an unlocked read of the
|
||||
* flag here, which is okay because this routine is only called from
|
||||
* Nothing to do if module not enabled. Note we do an unlocked read of
|
||||
* the flag here, which is okay because this routine is only called from
|
||||
* GetNewTransactionId, which is never called in a standby.
|
||||
*/
|
||||
Assert(!InRecovery);
|
||||
|
@ -52,9 +52,8 @@ typedef struct
|
||||
Buffer buffer; /* registered buffer */
|
||||
int flags; /* flags for this buffer */
|
||||
int deltaLen; /* space consumed in delta field */
|
||||
char *image; /* copy of page image for modification,
|
||||
* do not do it in-place to have aligned
|
||||
* memory chunk */
|
||||
char *image; /* copy of page image for modification, do not
|
||||
* do it in-place to have aligned memory chunk */
|
||||
char delta[MAX_DELTA_SIZE]; /* delta between page images */
|
||||
} PageData;
|
||||
|
||||
|
@ -988,8 +988,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
|
||||
char *oldest_datname = get_database_name(oldest_datoid);
|
||||
|
||||
/*
|
||||
* Immediately kick autovacuum into action as we're already
|
||||
* in ERROR territory.
|
||||
* Immediately kick autovacuum into action as we're already in
|
||||
* ERROR territory.
|
||||
*/
|
||||
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
|
||||
|
||||
|
@ -134,9 +134,9 @@ CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
|
||||
nworkers = 0;
|
||||
|
||||
/*
|
||||
* If we are running under serializable isolation, we can't use
|
||||
* parallel workers, at least not until somebody enhances that mechanism
|
||||
* to be parallel-aware.
|
||||
* If we are running under serializable isolation, we can't use parallel
|
||||
* workers, at least not until somebody enhances that mechanism to be
|
||||
* parallel-aware.
|
||||
*/
|
||||
if (IsolationIsSerializable())
|
||||
nworkers = 0;
|
||||
@ -646,9 +646,9 @@ DestroyParallelContext(ParallelContext *pcxt)
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't finish transaction commit or abort until all of the
|
||||
* workers have exited. This means, in particular, that we can't respond
|
||||
* to interrupts at this stage.
|
||||
* We can't finish transaction commit or abort until all of the workers
|
||||
* have exited. This means, in particular, that we can't respond to
|
||||
* interrupts at this stage.
|
||||
*/
|
||||
HOLD_INTERRUPTS();
|
||||
WaitForParallelWorkersToExit(pcxt);
|
||||
@ -958,9 +958,9 @@ ParallelWorkerMain(Datum main_arg)
|
||||
*/
|
||||
|
||||
/*
|
||||
* Join locking group. We must do this before anything that could try
|
||||
* to acquire a heavyweight lock, because any heavyweight locks acquired
|
||||
* to this point could block either directly against the parallel group
|
||||
* Join locking group. We must do this before anything that could try to
|
||||
* acquire a heavyweight lock, because any heavyweight locks acquired to
|
||||
* this point could block either directly against the parallel group
|
||||
* leader or against some process which in turn waits for a lock that
|
||||
* conflicts with the parallel group leader, causing an undetected
|
||||
* deadlock. (If we can't join the lock group, the leader has gone away,
|
||||
|
@ -257,7 +257,7 @@ StartupSUBTRANS(TransactionId oldestActiveXID)
|
||||
startPage++;
|
||||
/* must account for wraparound */
|
||||
if (startPage > TransactionIdToPage(MaxTransactionId))
|
||||
startPage=0;
|
||||
startPage = 0;
|
||||
}
|
||||
(void) ZeroSUBTRANSPage(startPage);
|
||||
|
||||
|
@ -140,11 +140,11 @@ typedef struct GlobalTransactionData
|
||||
TimestampTz prepared_at; /* time of preparation */
|
||||
|
||||
/*
|
||||
* Note that we need to keep track of two LSNs for each GXACT.
|
||||
* We keep track of the start LSN because this is the address we must
|
||||
* use to read state data back from WAL when committing a prepared GXACT.
|
||||
* We keep track of the end LSN because that is the LSN we need to wait
|
||||
* for prior to commit.
|
||||
* Note that we need to keep track of two LSNs for each GXACT. We keep
|
||||
* track of the start LSN because this is the address we must use to read
|
||||
* state data back from WAL when committing a prepared GXACT. We keep
|
||||
* track of the end LSN because that is the LSN we need to wait for prior
|
||||
* to commit.
|
||||
*/
|
||||
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
|
||||
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
|
||||
@ -1280,7 +1280,7 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
|
||||
if (len != NULL)
|
||||
*len = XLogRecGetDataLen(xlogreader);
|
||||
|
||||
*buf = palloc(sizeof(char)*XLogRecGetDataLen(xlogreader));
|
||||
*buf = palloc(sizeof(char) * XLogRecGetDataLen(xlogreader));
|
||||
memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader));
|
||||
|
||||
XLogReaderFree(xlogreader);
|
||||
@ -1347,10 +1347,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
|
||||
xid = pgxact->xid;
|
||||
|
||||
/*
|
||||
* Read and validate 2PC state data.
|
||||
* State data will typically be stored in WAL files if the LSN is after the
|
||||
* last checkpoint record, or moved to disk if for some reason they have
|
||||
* lived for a long time.
|
||||
* Read and validate 2PC state data. State data will typically be stored
|
||||
* in WAL files if the LSN is after the last checkpoint record, or moved
|
||||
* to disk if for some reason they have lived for a long time.
|
||||
*/
|
||||
if (gxact->ondisk)
|
||||
buf = ReadTwoPhaseFile(xid, true);
|
||||
@ -1605,22 +1604,20 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
|
||||
TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START();
|
||||
|
||||
/*
|
||||
* We are expecting there to be zero GXACTs that need to be
|
||||
* copied to disk, so we perform all I/O while holding
|
||||
* TwoPhaseStateLock for simplicity. This prevents any new xacts
|
||||
* from preparing while this occurs, which shouldn't be a problem
|
||||
* since the presence of long-lived prepared xacts indicates the
|
||||
* transaction manager isn't active.
|
||||
* We are expecting there to be zero GXACTs that need to be copied to
|
||||
* disk, so we perform all I/O while holding TwoPhaseStateLock for
|
||||
* simplicity. This prevents any new xacts from preparing while this
|
||||
* occurs, which shouldn't be a problem since the presence of long-lived
|
||||
* prepared xacts indicates the transaction manager isn't active.
|
||||
*
|
||||
* It's also possible to move I/O out of the lock, but on
|
||||
* every error we should check whether somebody committed our
|
||||
* transaction in different backend. Let's leave this optimisation
|
||||
* for future, if somebody will spot that this place cause
|
||||
* bottleneck.
|
||||
* It's also possible to move I/O out of the lock, but on every error we
|
||||
* should check whether somebody committed our transaction in different
|
||||
* backend. Let's leave this optimisation for future, if somebody will
|
||||
* spot that this place cause bottleneck.
|
||||
*
|
||||
* Note that it isn't possible for there to be a GXACT with
|
||||
* a prepare_end_lsn set prior to the last checkpoint yet
|
||||
* is marked invalid, because of the efforts with delayChkpt.
|
||||
* Note that it isn't possible for there to be a GXACT with a
|
||||
* prepare_end_lsn set prior to the last checkpoint yet is marked invalid,
|
||||
* because of the efforts with delayChkpt.
|
||||
*/
|
||||
LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
|
||||
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
|
||||
|
@ -1166,13 +1166,13 @@ RecordTransactionCommit(void)
|
||||
/*
|
||||
* Transactions without an assigned xid can contain invalidation
|
||||
* messages (e.g. explicit relcache invalidations or catcache
|
||||
* invalidations for inplace updates); standbys need to process
|
||||
* those. We can't emit a commit record without an xid, and we don't
|
||||
* want to force assigning an xid, because that'd be problematic for
|
||||
* e.g. vacuum. Hence we emit a bespoke record for the
|
||||
* invalidations. We don't want to use that in case a commit record is
|
||||
* emitted, so they happen synchronously with commits (besides not
|
||||
* wanting to emit more WAL recoreds).
|
||||
* invalidations for inplace updates); standbys need to process those.
|
||||
* We can't emit a commit record without an xid, and we don't want to
|
||||
* force assigning an xid, because that'd be problematic for e.g.
|
||||
* vacuum. Hence we emit a bespoke record for the invalidations. We
|
||||
* don't want to use that in case a commit record is emitted, so they
|
||||
* happen synchronously with commits (besides not wanting to emit more
|
||||
* WAL recoreds).
|
||||
*/
|
||||
if (nmsgs != 0)
|
||||
{
|
||||
@ -1272,8 +1272,8 @@ RecordTransactionCommit(void)
|
||||
* this case, but we don't currently try to do that. It would certainly
|
||||
* cause problems at least in Hot Standby mode, where the
|
||||
* KnownAssignedXids machinery requires tracking every XID assignment. It
|
||||
* might be OK to skip it only when wal_level < replica, but for now
|
||||
* we don't.)
|
||||
* might be OK to skip it only when wal_level < replica, but for now we
|
||||
* don't.)
|
||||
*
|
||||
* However, if we're doing cleanup of any non-temp rels or committing any
|
||||
* command that wanted to force sync commit, then we must flush XLOG
|
||||
@ -5486,8 +5486,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
|
||||
|
||||
/*
|
||||
* If asked by the primary (because someone is waiting for a synchronous
|
||||
* commit = remote_apply), we will need to ask walreceiver to send a
|
||||
* reply immediately.
|
||||
* commit = remote_apply), we will need to ask walreceiver to send a reply
|
||||
* immediately.
|
||||
*/
|
||||
if (XactCompletionApplyFeedback(parsed->xinfo))
|
||||
XLogRequestWalReceiverReply();
|
||||
|
@ -5880,8 +5880,8 @@ CheckRequiredParameterValues(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* For Hot Standby, the WAL must be generated with 'replica' mode, and
|
||||
* we must have at least as many backend slots as the primary.
|
||||
* For Hot Standby, the WAL must be generated with 'replica' mode, and we
|
||||
* must have at least as many backend slots as the primary.
|
||||
*/
|
||||
if (ArchiveRecoveryRequested && EnableHotStandby)
|
||||
{
|
||||
@ -6163,10 +6163,10 @@ StartupXLOG(void)
|
||||
* is no use of such file. There is no harm in retaining it, but it
|
||||
* is better to get rid of the map file so that we don't have any
|
||||
* redundant file in data directory and it will avoid any sort of
|
||||
* confusion. It seems prudent though to just rename the file out
|
||||
* of the way rather than delete it completely, also we ignore any
|
||||
* error that occurs in rename operation as even if map file is
|
||||
* present without backup_label file, it is harmless.
|
||||
* confusion. It seems prudent though to just rename the file out of
|
||||
* the way rather than delete it completely, also we ignore any error
|
||||
* that occurs in rename operation as even if map file is present
|
||||
* without backup_label file, it is harmless.
|
||||
*/
|
||||
if (stat(TABLESPACE_MAP, &st) == 0)
|
||||
{
|
||||
@ -6883,8 +6883,8 @@ StartupXLOG(void)
|
||||
SpinLockRelease(&XLogCtl->info_lck);
|
||||
|
||||
/*
|
||||
* If rm_redo called XLogRequestWalReceiverReply, then we
|
||||
* wake up the receiver so that it notices the updated
|
||||
* If rm_redo called XLogRequestWalReceiverReply, then we wake
|
||||
* up the receiver so that it notices the updated
|
||||
* lastReplayedEndRecPtr and sends a reply to the master.
|
||||
*/
|
||||
if (doRequestWalReceiverReply)
|
||||
|
@ -104,8 +104,8 @@ pg_start_backup(PG_FUNCTION_ARGS)
|
||||
MemoryContext oldcontext;
|
||||
|
||||
/*
|
||||
* Label file and tablespace map file need to be long-lived, since they
|
||||
* are read in pg_stop_backup.
|
||||
* Label file and tablespace map file need to be long-lived, since
|
||||
* they are read in pg_stop_backup.
|
||||
*/
|
||||
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
|
||||
label_file = makeStringInfo();
|
||||
@ -156,10 +156,10 @@ pg_stop_backup(PG_FUNCTION_ARGS)
|
||||
errhint("Did you mean to use pg_stop_backup('f')?")));
|
||||
|
||||
/*
|
||||
* Exclusive backups were typically started in a different connection,
|
||||
* so don't try to verify that exclusive_backup_running is set in this one.
|
||||
* Actual verification that an exclusive backup is in fact running is handled
|
||||
* inside do_pg_stop_backup.
|
||||
* Exclusive backups were typically started in a different connection, so
|
||||
* don't try to verify that exclusive_backup_running is set in this one.
|
||||
* Actual verification that an exclusive backup is in fact running is
|
||||
* handled inside do_pg_stop_backup.
|
||||
*/
|
||||
stoppoint = do_pg_stop_backup(NULL, true, NULL);
|
||||
|
||||
@ -248,9 +248,8 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS)
|
||||
errhint("Did you mean to use pg_stop_backup('t')?")));
|
||||
|
||||
/*
|
||||
* Stop the non-exclusive backup. Return a copy of the backup
|
||||
* label and tablespace map so they can be written to disk by
|
||||
* the caller.
|
||||
* Stop the non-exclusive backup. Return a copy of the backup label
|
||||
* and tablespace map so they can be written to disk by the caller.
|
||||
*/
|
||||
stoppoint = do_pg_stop_backup(label_file->data, true, NULL);
|
||||
nonexclusive_backup_running = false;
|
||||
|
@ -410,8 +410,8 @@ AggregateCreate(const char *aggName,
|
||||
Oid combineType;
|
||||
|
||||
/*
|
||||
* Combine function must have 2 argument, each of which is the
|
||||
* trans type
|
||||
* Combine function must have 2 argument, each of which is the trans
|
||||
* type
|
||||
*/
|
||||
fnArgs[0] = aggTransType;
|
||||
fnArgs[1] = aggTransType;
|
||||
@ -440,8 +440,9 @@ AggregateCreate(const char *aggName,
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate the serialization function, if present. We must ensure that the
|
||||
* return type of this function is the same as the specified serialType.
|
||||
* Validate the serialization function, if present. We must ensure that
|
||||
* the return type of this function is the same as the specified
|
||||
* serialType.
|
||||
*/
|
||||
if (aggserialfnName)
|
||||
{
|
||||
|
@ -338,8 +338,8 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
|
||||
/*
|
||||
* There's little point in having a serialization/deserialization
|
||||
* function on aggregates that don't have an internal state, so let's
|
||||
* just disallow this as it may help clear up any confusion or needless
|
||||
* authoring of these functions.
|
||||
* just disallow this as it may help clear up any confusion or
|
||||
* needless authoring of these functions.
|
||||
*/
|
||||
if (transTypeId != INTERNALOID)
|
||||
ereport(ERROR,
|
||||
@ -358,9 +358,9 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
|
||||
|
||||
/*
|
||||
* We disallow INTERNAL serialType as the whole point of the
|
||||
* serialized types is to allow the aggregate state to be output,
|
||||
* and we cannot output INTERNAL. This check, combined with the one
|
||||
* above ensures that the trans type and serialization type are not the
|
||||
* serialized types is to allow the aggregate state to be output, and
|
||||
* we cannot output INTERNAL. This check, combined with the one above
|
||||
* ensures that the trans type and serialization type are not the
|
||||
* same.
|
||||
*/
|
||||
if (serialTypeId == INTERNALOID)
|
||||
|
@ -409,9 +409,8 @@ ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddre
|
||||
stmt->objargs, &rel, AccessExclusiveLock, false);
|
||||
|
||||
/*
|
||||
* If a relation was involved, it would have been opened and locked.
|
||||
* We don't need the relation here, but we'll retain the lock until
|
||||
* commit.
|
||||
* If a relation was involved, it would have been opened and locked. We
|
||||
* don't need the relation here, but we'll retain the lock until commit.
|
||||
*/
|
||||
if (rel)
|
||||
heap_close(rel, NoLock);
|
||||
@ -630,8 +629,8 @@ AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid)
|
||||
oldNspOid = DatumGetObjectId(namespace);
|
||||
|
||||
/*
|
||||
* If the object is already in the correct namespace, we don't need
|
||||
* to do anything except fire the object access hook.
|
||||
* If the object is already in the correct namespace, we don't need to do
|
||||
* anything except fire the object access hook.
|
||||
*/
|
||||
if (oldNspOid == nspOid)
|
||||
{
|
||||
|
@ -217,9 +217,8 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
|
||||
RelationGetRelationName(matviewRel));
|
||||
|
||||
/*
|
||||
* Check that there is a unique index with no WHERE clause on
|
||||
* one or more columns of the materialized view if CONCURRENTLY
|
||||
* is specified.
|
||||
* Check that there is a unique index with no WHERE clause on one or more
|
||||
* columns of the materialized view if CONCURRENTLY is specified.
|
||||
*/
|
||||
if (concurrent)
|
||||
{
|
||||
@ -745,8 +744,8 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
|
||||
/*
|
||||
* There must be at least one unique index on the matview.
|
||||
*
|
||||
* ExecRefreshMatView() checks that after taking the exclusive lock on
|
||||
* the matview. So at least one unique index is guaranteed to exist here
|
||||
* ExecRefreshMatView() checks that after taking the exclusive lock on the
|
||||
* matview. So at least one unique index is guaranteed to exist here
|
||||
* because the lock is still being held.
|
||||
*/
|
||||
Assert(foundUniqueIndex);
|
||||
|
@ -511,7 +511,8 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
|
||||
*/
|
||||
if (!noperm && num_roles > 0)
|
||||
{
|
||||
int i, j;
|
||||
int i,
|
||||
j;
|
||||
Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles);
|
||||
Datum *role_oids;
|
||||
char *qual_value;
|
||||
@ -536,10 +537,9 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
|
||||
|
||||
/*
|
||||
* All of the dependencies will be removed from the policy and then
|
||||
* re-added. In order to get them correct, we need to extract out
|
||||
* the expressions in the policy and construct a parsestate just
|
||||
* enough to build the range table(s) to then pass to
|
||||
* recordDependencyOnExpr().
|
||||
* re-added. In order to get them correct, we need to extract out the
|
||||
* expressions in the policy and construct a parsestate just enough to
|
||||
* build the range table(s) to then pass to recordDependencyOnExpr().
|
||||
*/
|
||||
|
||||
/* Get policy qual, to update dependencies */
|
||||
@ -665,7 +665,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
|
||||
|
||||
heap_close(pg_policy_rel, RowExclusiveLock);
|
||||
|
||||
return(noperm || num_roles > 0);
|
||||
return (noperm || num_roles > 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1035,9 +1035,9 @@ AlterPolicy(AlterPolicyStmt *stmt)
|
||||
ArrayType *policy_roles;
|
||||
|
||||
/*
|
||||
* We need to pull the set of roles this policy applies to from
|
||||
* what's in the catalog, so that we can recreate the dependencies
|
||||
* correctly for the policy.
|
||||
* We need to pull the set of roles this policy applies to from what's
|
||||
* in the catalog, so that we can recreate the dependencies correctly
|
||||
* for the policy.
|
||||
*/
|
||||
|
||||
roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles,
|
||||
@ -1070,8 +1070,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
|
||||
|
||||
/*
|
||||
* We need to pull the USING expression and build the range table for
|
||||
* the policy from what's in the catalog, so that we can recreate
|
||||
* the dependencies correctly for the policy.
|
||||
* the policy from what's in the catalog, so that we can recreate the
|
||||
* dependencies correctly for the policy.
|
||||
*/
|
||||
|
||||
/* Check if the policy has a USING expr */
|
||||
|
@ -532,8 +532,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
|
||||
* can skip this for internally generated triggers, since the name
|
||||
* modification above should be sufficient.
|
||||
*
|
||||
* NOTE that this is cool only because we have ShareRowExclusiveLock on the
|
||||
* relation, so the trigger set won't be changing underneath us.
|
||||
* NOTE that this is cool only because we have ShareRowExclusiveLock on
|
||||
* the relation, so the trigger set won't be changing underneath us.
|
||||
*/
|
||||
if (!isInternal)
|
||||
{
|
||||
|
@ -3313,9 +3313,9 @@ AlterTypeOwner_oid(Oid typeOid, Oid newOwnerId, bool hasDependEntry)
|
||||
typTup = (Form_pg_type) GETSTRUCT(tup);
|
||||
|
||||
/*
|
||||
* If it's a composite type, invoke ATExecChangeOwner so that we fix up the
|
||||
* pg_class entry properly. That will call back to AlterTypeOwnerInternal
|
||||
* to take care of the pg_type entry(s).
|
||||
* If it's a composite type, invoke ATExecChangeOwner so that we fix up
|
||||
* the pg_class entry properly. That will call back to
|
||||
* AlterTypeOwnerInternal to take care of the pg_type entry(s).
|
||||
*/
|
||||
if (typTup->typtype == TYPTYPE_COMPOSITE)
|
||||
ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock);
|
||||
|
@ -1167,8 +1167,8 @@ RenameRole(const char *oldname, const char *newname)
|
||||
errmsg("current user cannot be renamed")));
|
||||
|
||||
/*
|
||||
* Check that the user is not trying to rename a system role and
|
||||
* not trying to rename a role into the reserved "pg_" namespace.
|
||||
* Check that the user is not trying to rename a system role and not
|
||||
* trying to rename a role into the reserved "pg_" namespace.
|
||||
*/
|
||||
if (IsReservedName(NameStr(authform->rolname)))
|
||||
ereport(ERROR,
|
||||
|
@ -1192,9 +1192,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
}
|
||||
|
||||
/*
|
||||
* If the all-visible page is turned out to be all-frozen but not marked,
|
||||
* we should so mark it. Note that all_frozen is only valid if all_visible
|
||||
* is true, so we must check both.
|
||||
* If the all-visible page is turned out to be all-frozen but not
|
||||
* marked, we should so mark it. Note that all_frozen is only valid
|
||||
* if all_visible is true, so we must check both.
|
||||
*/
|
||||
else if (all_visible_according_to_vm && all_visible && all_frozen &&
|
||||
!VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
|
||||
|
@ -880,9 +880,9 @@ check_role(char **newval, void **extra, GucSource source)
|
||||
ReleaseSysCache(roleTup);
|
||||
|
||||
/*
|
||||
* Verify that session user is allowed to become this role, but
|
||||
* skip this in parallel mode, where we must blindly recreate the
|
||||
* parallel leader's state.
|
||||
* Verify that session user is allowed to become this role, but skip
|
||||
* this in parallel mode, where we must blindly recreate the parallel
|
||||
* leader's state.
|
||||
*/
|
||||
if (!InitializingParallelWorker &&
|
||||
!is_member_of_role(GetSessionUserId(), roleid))
|
||||
|
@ -444,10 +444,9 @@ ExecSupportsBackwardScan(Plan *node)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Parallel-aware nodes return a subset of the tuples in each worker,
|
||||
* and in general we can't expect to have enough bookkeeping state to
|
||||
* know which ones we returned in this worker as opposed to some other
|
||||
* worker.
|
||||
* Parallel-aware nodes return a subset of the tuples in each worker, and
|
||||
* in general we can't expect to have enough bookkeeping state to know
|
||||
* which ones we returned in this worker as opposed to some other worker.
|
||||
*/
|
||||
if (node->parallel_aware)
|
||||
return false;
|
||||
|
@ -391,8 +391,8 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
|
||||
shm_toc_estimate_keys(&pcxt->estimator, 1);
|
||||
|
||||
/*
|
||||
* Give parallel-aware nodes a chance to add to the estimates, and get
|
||||
* a count of how many PlanState nodes there are.
|
||||
* Give parallel-aware nodes a chance to add to the estimates, and get a
|
||||
* count of how many PlanState nodes there are.
|
||||
*/
|
||||
e.pcxt = pcxt;
|
||||
e.nnodes = 0;
|
||||
@ -444,9 +444,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
|
||||
pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
|
||||
|
||||
/*
|
||||
* If instrumentation options were supplied, allocate space for the
|
||||
* data. It only gets partially initialized here; the rest happens
|
||||
* during ExecParallelInitializeDSM.
|
||||
* If instrumentation options were supplied, allocate space for the data.
|
||||
* It only gets partially initialized here; the rest happens during
|
||||
* ExecParallelInitializeDSM.
|
||||
*/
|
||||
if (estate->es_instrument)
|
||||
{
|
||||
@ -636,9 +636,9 @@ ExecParallelReportInstrumentation(PlanState *planstate,
|
||||
|
||||
/*
|
||||
* If we shuffled the plan_node_id values in ps_instrument into sorted
|
||||
* order, we could use binary search here. This might matter someday
|
||||
* if we're pushing down sufficiently large plan trees. For now, do it
|
||||
* the slow, dumb way.
|
||||
* order, we could use binary search here. This might matter someday if
|
||||
* we're pushing down sufficiently large plan trees. For now, do it the
|
||||
* slow, dumb way.
|
||||
*/
|
||||
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
|
||||
if (instrumentation->plan_node_id[i] == plan_node_id)
|
||||
|
@ -981,10 +981,11 @@ combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
|
||||
if (OidIsValid(pertrans->deserialfn_oid))
|
||||
{
|
||||
/*
|
||||
* Don't call a strict deserialization function with NULL input.
|
||||
* A strict deserialization function and a null value means we skip
|
||||
* calling the combine function for this state. We assume that this
|
||||
* would be a waste of time and effort anyway so just skip it.
|
||||
* Don't call a strict deserialization function with NULL input. A
|
||||
* strict deserialization function and a null value means we skip
|
||||
* calling the combine function for this state. We assume that
|
||||
* this would be a waste of time and effort anyway so just skip
|
||||
* it.
|
||||
*/
|
||||
if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0])
|
||||
continue;
|
||||
@ -1429,8 +1430,8 @@ finalize_partialaggregate(AggState *aggstate,
|
||||
oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
|
||||
|
||||
/*
|
||||
* serialfn_oid will be set if we must serialize the input state
|
||||
* before calling the combine function on the state.
|
||||
* serialfn_oid will be set if we must serialize the input state before
|
||||
* calling the combine function on the state.
|
||||
*/
|
||||
if (OidIsValid(pertrans->serialfn_oid))
|
||||
{
|
||||
@ -1443,6 +1444,7 @@ finalize_partialaggregate(AggState *aggstate,
|
||||
else
|
||||
{
|
||||
FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo;
|
||||
|
||||
fcinfo->arg[0] = pergroupstate->transValue;
|
||||
fcinfo->argnull[0] = pergroupstate->transValueIsNull;
|
||||
|
||||
@ -3366,9 +3368,9 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg,
|
||||
/*
|
||||
* The serialization and deserialization functions must match, if
|
||||
* present, as we're unable to share the trans state for aggregates
|
||||
* which will serialize or deserialize into different formats. Remember
|
||||
* that these will be InvalidOid if they're not required for this agg
|
||||
* node.
|
||||
* which will serialize or deserialize into different formats.
|
||||
* Remember that these will be InvalidOid if they're not required for
|
||||
* this agg node.
|
||||
*/
|
||||
if (aggserialfn != pertrans->serialfn_oid ||
|
||||
aggdeserialfn != pertrans->deserialfn_oid)
|
||||
|
@ -285,8 +285,8 @@ ExecReScanForeignScan(ForeignScanState *node)
|
||||
|
||||
/*
|
||||
* If chgParam of subnode is not null then plan will be re-scanned by
|
||||
* first ExecProcNode. outerPlan may also be NULL, in which case there
|
||||
* is nothing to rescan at all.
|
||||
* first ExecProcNode. outerPlan may also be NULL, in which case there is
|
||||
* nothing to rescan at all.
|
||||
*/
|
||||
if (outerPlan != NULL && outerPlan->chgParam == NULL)
|
||||
ExecReScan(outerPlan);
|
||||
|
@ -138,8 +138,8 @@ ExecGather(GatherState *node)
|
||||
/*
|
||||
* Initialize the parallel context and workers on first execution. We do
|
||||
* this on first execution rather than during node initialization, as it
|
||||
* needs to allocate large dynamic segment, so it is better to do if it
|
||||
* is really needed.
|
||||
* needs to allocate large dynamic segment, so it is better to do if it is
|
||||
* really needed.
|
||||
*/
|
||||
if (!node->initialized)
|
||||
{
|
||||
@ -147,8 +147,8 @@ ExecGather(GatherState *node)
|
||||
Gather *gather = (Gather *) node->ps.plan;
|
||||
|
||||
/*
|
||||
* Sometimes we might have to run without parallelism; but if
|
||||
* parallel mode is active then we can try to fire up some workers.
|
||||
* Sometimes we might have to run without parallelism; but if parallel
|
||||
* mode is active then we can try to fire up some workers.
|
||||
*/
|
||||
if (gather->num_workers > 0 && IsInParallelMode())
|
||||
{
|
||||
@ -330,8 +330,8 @@ gather_readnext(GatherState *gatherstate)
|
||||
tup = TupleQueueReaderNext(reader, true, &readerdone);
|
||||
|
||||
/*
|
||||
* If this reader is done, remove it. If all readers are done,
|
||||
* clean up remaining worker state.
|
||||
* If this reader is done, remove it. If all readers are done, clean
|
||||
* up remaining worker state.
|
||||
*/
|
||||
if (readerdone)
|
||||
{
|
||||
@ -452,10 +452,10 @@ void
|
||||
ExecReScanGather(GatherState *node)
|
||||
{
|
||||
/*
|
||||
* Re-initialize the parallel workers to perform rescan of relation.
|
||||
* We want to gracefully shutdown all the workers so that they
|
||||
* should be able to propagate any error or other information to master
|
||||
* backend before dying. Parallel context will be reused for rescan.
|
||||
* Re-initialize the parallel workers to perform rescan of relation. We
|
||||
* want to gracefully shutdown all the workers so that they should be able
|
||||
* to propagate any error or other information to master backend before
|
||||
* dying. Parallel context will be reused for rescan.
|
||||
*/
|
||||
ExecShutdownGatherWorkers(node);
|
||||
|
||||
|
@ -1221,10 +1221,10 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
|
||||
/*
|
||||
* Note that it is possible that the target tuple has been modified in
|
||||
* this session, after the above heap_lock_tuple. We choose to not error
|
||||
* out in that case, in line with ExecUpdate's treatment of similar
|
||||
* cases. This can happen if an UPDATE is triggered from within
|
||||
* ExecQual(), ExecWithCheckOptions() or ExecProject() above, e.g. by
|
||||
* selecting from a wCTE in the ON CONFLICT's SET.
|
||||
* out in that case, in line with ExecUpdate's treatment of similar cases.
|
||||
* This can happen if an UPDATE is triggered from within ExecQual(),
|
||||
* ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
|
||||
* wCTE in the ON CONFLICT's SET.
|
||||
*/
|
||||
|
||||
/* Execute UPDATE with projection */
|
||||
|
@ -65,8 +65,8 @@ SeqNext(SeqScanState *node)
|
||||
if (scandesc == NULL)
|
||||
{
|
||||
/*
|
||||
* We reach here if the scan is not parallel, or if we're executing
|
||||
* a scan that was intended to be parallel serially.
|
||||
* We reach here if the scan is not parallel, or if we're executing a
|
||||
* scan that was intended to be parallel serially.
|
||||
*/
|
||||
scandesc = heap_beginscan(node->ss.ss_currentRelation,
|
||||
estate->es_snapshot,
|
||||
|
@ -81,19 +81,19 @@ struct TupleQueueReader
|
||||
#define TUPLE_QUEUE_MODE_CONTROL 'c'
|
||||
#define TUPLE_QUEUE_MODE_DATA 'd'
|
||||
|
||||
static void tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype,
|
||||
static void tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype,
|
||||
Datum value);
|
||||
static void tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value);
|
||||
static void tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value);
|
||||
static void tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value);
|
||||
static void tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod,
|
||||
static void tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value);
|
||||
static void tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value);
|
||||
static void tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value);
|
||||
static void tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
|
||||
TupleDesc tupledesc);
|
||||
static void TupleQueueHandleControlMessage(TupleQueueReader *reader,
|
||||
Size nbytes, char *data);
|
||||
static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader,
|
||||
Size nbytes, HeapTupleHeader data);
|
||||
static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader,
|
||||
TupleDesc tupledesc, RemapInfo * remapinfo,
|
||||
TupleDesc tupledesc, RemapInfo *remapinfo,
|
||||
HeapTuple tuple);
|
||||
static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass,
|
||||
Datum value);
|
||||
@ -212,7 +212,7 @@ tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self)
|
||||
* Invoke the appropriate walker function based on the given RemapClass.
|
||||
*/
|
||||
static void
|
||||
tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
|
||||
tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value)
|
||||
{
|
||||
check_stack_depth();
|
||||
|
||||
@ -237,7 +237,7 @@ tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
|
||||
* contained therein.
|
||||
*/
|
||||
static void
|
||||
tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
|
||||
tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value)
|
||||
{
|
||||
HeapTupleHeader tup;
|
||||
Oid typeid;
|
||||
@ -304,7 +304,7 @@ tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
|
||||
* contained therein.
|
||||
*/
|
||||
static void
|
||||
tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
|
||||
tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value)
|
||||
{
|
||||
ArrayType *arr = DatumGetArrayTypeP(value);
|
||||
Oid typeid = ARR_ELEMTYPE(arr);
|
||||
@ -342,7 +342,7 @@ tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
|
||||
* contained therein.
|
||||
*/
|
||||
static void
|
||||
tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
|
||||
tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value)
|
||||
{
|
||||
RangeType *range = DatumGetRangeType(value);
|
||||
Oid typeid = RangeTypeGetOid(range);
|
||||
@ -386,7 +386,7 @@ tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
|
||||
* already done so previously.
|
||||
*/
|
||||
static void
|
||||
tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod,
|
||||
tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
|
||||
TupleDesc tupledesc)
|
||||
{
|
||||
StringInfoData buf;
|
||||
@ -613,7 +613,7 @@ TupleQueueHandleDataMessage(TupleQueueReader *reader,
|
||||
*/
|
||||
static HeapTuple
|
||||
TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc,
|
||||
RemapInfo * remapinfo, HeapTuple tuple)
|
||||
RemapInfo *remapinfo, HeapTuple tuple)
|
||||
{
|
||||
Datum *values;
|
||||
bool *isnull;
|
||||
|
@ -2539,11 +2539,10 @@ CheckRADIUSAuth(Port *port)
|
||||
radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier));
|
||||
|
||||
/*
|
||||
* RADIUS password attributes are calculated as:
|
||||
* e[0] = p[0] XOR MD5(secret + Request Authenticator)
|
||||
* for the first group of 16 octets, and then:
|
||||
* e[i] = p[i] XOR MD5(secret + e[i-1])
|
||||
* for the following ones (if necessary)
|
||||
* RADIUS password attributes are calculated as: e[0] = p[0] XOR
|
||||
* MD5(secret + Request Authenticator) for the first group of 16 octets,
|
||||
* and then: e[i] = p[i] XOR MD5(secret + e[i-1]) for the following ones
|
||||
* (if necessary)
|
||||
*/
|
||||
encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH;
|
||||
cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH);
|
||||
@ -2554,7 +2553,11 @@ CheckRADIUSAuth(Port *port)
|
||||
for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH)
|
||||
{
|
||||
memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH);
|
||||
/* .. and for subsequent iterations the result of the previous XOR (calculated below) */
|
||||
|
||||
/*
|
||||
* .. and for subsequent iterations the result of the previous XOR
|
||||
* (calculated below)
|
||||
*/
|
||||
md5trailer = encryptedpassword + i;
|
||||
|
||||
if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i))
|
||||
@ -2565,7 +2568,7 @@ CheckRADIUSAuth(Port *port)
|
||||
return STATUS_ERROR;
|
||||
}
|
||||
|
||||
for (j = i; j < i+RADIUS_VECTOR_LENGTH; j++)
|
||||
for (j = i; j < i + RADIUS_VECTOR_LENGTH; j++)
|
||||
{
|
||||
if (j < strlen(passwd))
|
||||
encryptedpassword[j] = passwd[j] ^ encryptedpassword[j];
|
||||
|
@ -377,11 +377,12 @@ be_tls_open_server(Port *port)
|
||||
port->ssl_in_use = true;
|
||||
|
||||
aloop:
|
||||
|
||||
/*
|
||||
* Prepare to call SSL_get_error() by clearing thread's OpenSSL error
|
||||
* queue. In general, the current thread's error queue must be empty
|
||||
* before the TLS/SSL I/O operation is attempted, or SSL_get_error()
|
||||
* will not work reliably. An extension may have failed to clear the
|
||||
* before the TLS/SSL I/O operation is attempted, or SSL_get_error() will
|
||||
* not work reliably. An extension may have failed to clear the
|
||||
* per-thread error queue following another call to an OpenSSL I/O
|
||||
* routine.
|
||||
*/
|
||||
@ -393,12 +394,11 @@ aloop:
|
||||
|
||||
/*
|
||||
* Other clients of OpenSSL in the backend may fail to call
|
||||
* ERR_get_error(), but we always do, so as to not cause problems
|
||||
* for OpenSSL clients that don't call ERR_clear_error()
|
||||
* defensively. Be sure that this happens by calling now.
|
||||
* SSL_get_error() relies on the OpenSSL per-thread error queue
|
||||
* being intact, so this is the earliest possible point
|
||||
* ERR_get_error() may be called.
|
||||
* ERR_get_error(), but we always do, so as to not cause problems for
|
||||
* OpenSSL clients that don't call ERR_clear_error() defensively. Be
|
||||
* sure that this happens by calling now. SSL_get_error() relies on
|
||||
* the OpenSSL per-thread error queue being intact, so this is the
|
||||
* earliest possible point ERR_get_error() may be called.
|
||||
*/
|
||||
ecode = ERR_get_error();
|
||||
switch (err)
|
||||
|
@ -146,20 +146,20 @@ retry:
|
||||
|
||||
ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
|
||||
|
||||
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1);
|
||||
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
|
||||
|
||||
/*
|
||||
* If the postmaster has died, it's not safe to continue running,
|
||||
* because it is the postmaster's job to kill us if some other backend
|
||||
* exists uncleanly. Moreover, we won't run very well in this state;
|
||||
* helper processes like walwriter and the bgwriter will exit, so
|
||||
* performance may be poor. Finally, if we don't exit, pg_ctl will
|
||||
* be unable to restart the postmaster without manual intervention,
|
||||
* so no new connections can be accepted. Exiting clears the deck
|
||||
* for a postmaster restart.
|
||||
* performance may be poor. Finally, if we don't exit, pg_ctl will be
|
||||
* unable to restart the postmaster without manual intervention, so no
|
||||
* new connections can be accepted. Exiting clears the deck for a
|
||||
* postmaster restart.
|
||||
*
|
||||
* (Note that we only make this check when we would otherwise sleep
|
||||
* on our latch. We might still continue running for a while if the
|
||||
* (Note that we only make this check when we would otherwise sleep on
|
||||
* our latch. We might still continue running for a while if the
|
||||
* postmaster is killed in mid-query, or even through multiple queries
|
||||
* if we never have to wait for read. We don't want to burn too many
|
||||
* cycles checking for this very rare condition, and this should cause
|
||||
@ -247,7 +247,7 @@ retry:
|
||||
|
||||
ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
|
||||
|
||||
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1);
|
||||
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
|
||||
|
||||
/* See comments in secure_read. */
|
||||
if (event.events & WL_POSTMASTER_DEATH)
|
||||
|
@ -143,9 +143,9 @@ mq_putmessage(char msgtype, const char *s, size_t len)
|
||||
|
||||
/*
|
||||
* If the message queue is already gone, just ignore the message. This
|
||||
* doesn't necessarily indicate a problem; for example, DEBUG messages
|
||||
* can be generated late in the shutdown sequence, after all DSMs have
|
||||
* already been detached.
|
||||
* doesn't necessarily indicate a problem; for example, DEBUG messages can
|
||||
* be generated late in the shutdown sequence, after all DSMs have already
|
||||
* been detached.
|
||||
*/
|
||||
if (pq_mq == NULL)
|
||||
return 0;
|
||||
|
@ -270,13 +270,16 @@ startup_hacks(const char *progname)
|
||||
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
|
||||
|
||||
#if defined(_M_AMD64) && _MSC_VER == 1800
|
||||
|
||||
/*
|
||||
* Avoid crashing in certain floating-point operations if
|
||||
* we were compiled for x64 with MS Visual Studio 2013 and
|
||||
* are running on Windows prior to 7/2008R2 SP1 on an
|
||||
* AVX2-capable CPU.
|
||||
* Avoid crashing in certain floating-point operations if we were
|
||||
* compiled for x64 with MS Visual Studio 2013 and are running on
|
||||
* Windows prior to 7/2008R2 SP1 on an AVX2-capable CPU.
|
||||
*
|
||||
* Ref: https://connect.microsoft.com/VisualStudio/feedback/details/811093/visual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instructions
|
||||
* Ref:
|
||||
* https://connect.microsoft.com/VisualStudio/feedback/details/811093/v
|
||||
* isual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instruction
|
||||
* s
|
||||
*/
|
||||
if (!IsWindows7SP1OrGreater())
|
||||
{
|
||||
|
@ -3499,7 +3499,7 @@ planstate_tree_walker(PlanState *planstate,
|
||||
return true;
|
||||
break;
|
||||
case T_CustomScan:
|
||||
foreach (lc, ((CustomScanState *) planstate)->custom_ps)
|
||||
foreach(lc, ((CustomScanState *) planstate)->custom_ps)
|
||||
{
|
||||
if (walker((PlanState *) lfirst(lc), context))
|
||||
return true;
|
||||
|
@ -2228,6 +2228,7 @@ _readExtensibleNode(void)
|
||||
const ExtensibleNodeMethods *methods;
|
||||
ExtensibleNode *local_node;
|
||||
const char *extnodename;
|
||||
|
||||
READ_TEMP_LOCALS();
|
||||
|
||||
token = pg_strtok(&length); /* skip: extnodename */
|
||||
|
@ -163,8 +163,8 @@ make_one_rel(PlannerInfo *root, List *joinlist)
|
||||
set_base_rel_consider_startup(root);
|
||||
|
||||
/*
|
||||
* Generate access paths for the base rels. set_base_rel_sizes also
|
||||
* sets the consider_parallel flag for each baserel, if appropriate.
|
||||
* Generate access paths for the base rels. set_base_rel_sizes also sets
|
||||
* the consider_parallel flag for each baserel, if appropriate.
|
||||
*/
|
||||
set_base_rel_sizes(root);
|
||||
set_base_rel_pathlists(root);
|
||||
@ -509,6 +509,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
|
||||
switch (rte->rtekind)
|
||||
{
|
||||
case RTE_RELATION:
|
||||
|
||||
/*
|
||||
* Currently, parallel workers can't access the leader's temporary
|
||||
* tables. We could possibly relax this if the wrote all of its
|
||||
@ -557,14 +558,15 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
|
||||
break;
|
||||
|
||||
case RTE_SUBQUERY:
|
||||
|
||||
/*
|
||||
* Subplans currently aren't passed to workers. Even if they
|
||||
* were, the subplan might be using parallelism internally, and
|
||||
* we can't support nested Gather nodes at present. Finally,
|
||||
* we don't have a good way of knowing whether the subplan
|
||||
* involves any parallel-restricted operations. It would be
|
||||
* nice to relax this restriction some day, but it's going to
|
||||
* take a fair amount of work.
|
||||
* were, the subplan might be using parallelism internally, and we
|
||||
* can't support nested Gather nodes at present. Finally, we
|
||||
* don't have a good way of knowing whether the subplan involves
|
||||
* any parallel-restricted operations. It would be nice to relax
|
||||
* this restriction some day, but it's going to take a fair amount
|
||||
* of work.
|
||||
*/
|
||||
return;
|
||||
|
||||
@ -580,6 +582,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
|
||||
break;
|
||||
|
||||
case RTE_VALUES:
|
||||
|
||||
/*
|
||||
* The data for a VALUES clause is stored in the plan tree itself,
|
||||
* so scanning it in a worker is fine.
|
||||
@ -587,6 +590,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
|
||||
break;
|
||||
|
||||
case RTE_CTE:
|
||||
|
||||
/*
|
||||
* CTE tuplestores aren't shared among parallel workers, so we
|
||||
* force all CTE scans to happen in the leader. Also, populating
|
||||
@ -598,8 +602,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* If there's anything in baserestrictinfo that's parallel-restricted,
|
||||
* we give up on parallelizing access to this relation. We could consider
|
||||
* If there's anything in baserestrictinfo that's parallel-restricted, we
|
||||
* give up on parallelizing access to this relation. We could consider
|
||||
* instead postponing application of the restricted quals until we're
|
||||
* above all the parallelism in the plan tree, but it's not clear that
|
||||
* this would be a win in very many cases, and it might be tricky to make
|
||||
@ -609,8 +613,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the relation's outputs are not parallel-safe, we must give up.
|
||||
* In the common case where the relation only outputs Vars, this check is
|
||||
* If the relation's outputs are not parallel-safe, we must give up. In
|
||||
* the common case where the relation only outputs Vars, this check is
|
||||
* very cheap; otherwise, we have to do more work.
|
||||
*/
|
||||
if (rel->reltarget_has_non_vars &&
|
||||
@ -1251,8 +1255,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
|
||||
int parallel_workers = 0;
|
||||
|
||||
/*
|
||||
* Decide on the numebr of workers to request for this append path. For
|
||||
* now, we just use the maximum value from among the members. It
|
||||
* Decide on the numebr of workers to request for this append path.
|
||||
* For now, we just use the maximum value from among the members. It
|
||||
* might be useful to use a higher number if the Append node were
|
||||
* smart enough to spread out the workers, but it currently isn't.
|
||||
*/
|
||||
@ -2160,8 +2164,8 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
|
||||
* Run generate_gather_paths() for each just-processed joinrel. We
|
||||
* could not do this earlier because both regular and partial paths
|
||||
* can get added to a particular joinrel at multiple times within
|
||||
* join_search_one_level. After that, we're done creating paths
|
||||
* for the joinrel, so run set_cheapest().
|
||||
* join_search_one_level. After that, we're done creating paths for
|
||||
* the joinrel, so run set_cheapest().
|
||||
*/
|
||||
foreach(lc, root->join_rel_level[lev])
|
||||
{
|
||||
|
@ -1428,15 +1428,14 @@ create_projection_plan(PlannerInfo *root, ProjectionPath *best_path)
|
||||
* We might not really need a Result node here. There are several ways
|
||||
* that this can happen. For example, MergeAppend doesn't project, so we
|
||||
* would have thought that we needed a projection to attach resjunk sort
|
||||
* columns to its output ... but create_merge_append_plan might have
|
||||
* added those same resjunk sort columns to both MergeAppend and its
|
||||
* children. Alternatively, apply_projection_to_path might have created
|
||||
* a projection path as the subpath of a Gather node even though the
|
||||
* subpath was projection-capable. So, if the subpath is capable of
|
||||
* projection or the desired tlist is the same expression-wise as the
|
||||
* subplan's, just jam it in there. We'll have charged for a Result that
|
||||
* doesn't actually appear in the plan, but that's better than having a
|
||||
* Result we don't need.
|
||||
* columns to its output ... but create_merge_append_plan might have added
|
||||
* those same resjunk sort columns to both MergeAppend and its children.
|
||||
* Alternatively, apply_projection_to_path might have created a projection
|
||||
* path as the subpath of a Gather node even though the subpath was
|
||||
* projection-capable. So, if the subpath is capable of projection or the
|
||||
* desired tlist is the same expression-wise as the subplan's, just jam it
|
||||
* in there. We'll have charged for a Result that doesn't actually appear
|
||||
* in the plan, but that's better than having a Result we don't need.
|
||||
*/
|
||||
if (is_projection_capable_path(best_path->subpath) ||
|
||||
tlist_same_exprs(tlist, subplan->targetlist))
|
||||
@ -3248,8 +3247,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
|
||||
/*
|
||||
* If a join between foreign relations was pushed down, remember it. The
|
||||
* push-down safety of the join depends upon the server and user mapping
|
||||
* being same. That can change between planning and execution time, in which
|
||||
* case the plan should be invalidated.
|
||||
* being same. That can change between planning and execution time, in
|
||||
* which case the plan should be invalidated.
|
||||
*/
|
||||
if (scan_relid == 0)
|
||||
root->glob->hasForeignJoin = true;
|
||||
@ -3257,8 +3256,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
|
||||
/*
|
||||
* Replace any outer-relation variables with nestloop params in the qual,
|
||||
* fdw_exprs and fdw_recheck_quals expressions. We do this last so that
|
||||
* the FDW doesn't have to be involved. (Note that parts of fdw_exprs
|
||||
* or fdw_recheck_quals could have come from join clauses, so doing this
|
||||
* the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
|
||||
* fdw_recheck_quals could have come from join clauses, so doing this
|
||||
* beforehand on the scan_clauses wouldn't work.) We assume
|
||||
* fdw_scan_tlist contains no such variables.
|
||||
*/
|
||||
@ -3279,8 +3278,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
|
||||
* 0, but there can be no Var with relid 0 in the rel's targetlist or the
|
||||
* restriction clauses, so we skip this in that case. Note that any such
|
||||
* columns in base relations that were joined are assumed to be contained
|
||||
* in fdw_scan_tlist.) This is a bit of a kluge and might go away someday,
|
||||
* so we intentionally leave it out of the API presented to FDWs.
|
||||
* in fdw_scan_tlist.) This is a bit of a kluge and might go away
|
||||
* someday, so we intentionally leave it out of the API presented to FDWs.
|
||||
*/
|
||||
scan_plan->fsSystemCol = false;
|
||||
if (scan_relid > 0)
|
||||
|
@ -1777,8 +1777,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
|
||||
* findable from the PlannerInfo struct; anything else the FDW wants
|
||||
* to know should be obtainable via "root".
|
||||
*
|
||||
* Note: CustomScan providers, as well as FDWs that don't want to
|
||||
* use this hook, can use the create_upper_paths_hook; see below.
|
||||
* Note: CustomScan providers, as well as FDWs that don't want to use
|
||||
* this hook, can use the create_upper_paths_hook; see below.
|
||||
*/
|
||||
if (current_rel->fdwroutine &&
|
||||
current_rel->fdwroutine->GetForeignUpperPaths)
|
||||
@ -3196,8 +3196,8 @@ set_grouped_rel_consider_parallel(PlannerInfo *root, RelOptInfo *grouped_rel,
|
||||
|
||||
/*
|
||||
* All that's left to check now is to make sure all aggregate functions
|
||||
* support partial mode. If there's no aggregates then we can skip checking
|
||||
* that.
|
||||
* support partial mode. If there's no aggregates then we can skip
|
||||
* checking that.
|
||||
*/
|
||||
if (!parse->hasAggs)
|
||||
grouped_rel->consider_parallel = true;
|
||||
@ -3370,9 +3370,10 @@ create_grouping_paths(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Determine whether it's possible to perform sort-based implementations
|
||||
* of grouping. (Note that if groupClause is empty, grouping_is_sortable()
|
||||
* is trivially true, and all the pathkeys_contained_in() tests will
|
||||
* succeed too, so that we'll consider every surviving input path.)
|
||||
* of grouping. (Note that if groupClause is empty,
|
||||
* grouping_is_sortable() is trivially true, and all the
|
||||
* pathkeys_contained_in() tests will succeed too, so that we'll consider
|
||||
* every surviving input path.)
|
||||
*/
|
||||
can_sort = grouping_is_sortable(parse->groupClause);
|
||||
|
||||
@ -3616,8 +3617,8 @@ create_grouping_paths(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Now generate a complete GroupAgg Path atop of the cheapest partial
|
||||
* path. We need only bother with the cheapest path here, as the output
|
||||
* of Gather is never sorted.
|
||||
* path. We need only bother with the cheapest path here, as the
|
||||
* output of Gather is never sorted.
|
||||
*/
|
||||
if (grouped_rel->partial_pathlist)
|
||||
{
|
||||
@ -3632,9 +3633,9 @@ create_grouping_paths(PlannerInfo *root,
|
||||
&total_groups);
|
||||
|
||||
/*
|
||||
* Gather is always unsorted, so we'll need to sort, unless there's
|
||||
* no GROUP BY clause, in which case there will only be a single
|
||||
* group.
|
||||
* Gather is always unsorted, so we'll need to sort, unless
|
||||
* there's no GROUP BY clause, in which case there will only be a
|
||||
* single group.
|
||||
*/
|
||||
if (parse->groupClause)
|
||||
path = (Path *) create_sort_path(root,
|
||||
@ -3678,15 +3679,15 @@ create_grouping_paths(PlannerInfo *root,
|
||||
/*
|
||||
* Provided that the estimated size of the hashtable does not exceed
|
||||
* work_mem, we'll generate a HashAgg Path, although if we were unable
|
||||
* to sort above, then we'd better generate a Path, so that we at least
|
||||
* have one.
|
||||
* to sort above, then we'd better generate a Path, so that we at
|
||||
* least have one.
|
||||
*/
|
||||
if (hashaggtablesize < work_mem * 1024L ||
|
||||
grouped_rel->pathlist == NIL)
|
||||
{
|
||||
/*
|
||||
* We just need an Agg over the cheapest-total input path, since input
|
||||
* order won't matter.
|
||||
* We just need an Agg over the cheapest-total input path, since
|
||||
* input order won't matter.
|
||||
*/
|
||||
add_path(grouped_rel, (Path *)
|
||||
create_agg_path(root, grouped_rel,
|
||||
@ -3704,8 +3705,8 @@ create_grouping_paths(PlannerInfo *root,
|
||||
|
||||
/*
|
||||
* Generate a HashAgg Path atop of the cheapest partial path. Once
|
||||
* again, we'll only do this if it looks as though the hash table won't
|
||||
* exceed work_mem.
|
||||
* again, we'll only do this if it looks as though the hash table
|
||||
* won't exceed work_mem.
|
||||
*/
|
||||
if (grouped_rel->partial_pathlist)
|
||||
{
|
||||
|
@ -2100,6 +2100,7 @@ search_indexed_tlist_for_partial_aggref(Aggref *aggref, indexed_tlist *itlist,
|
||||
continue;
|
||||
if (aggref->aggvariadic != tlistaggref->aggvariadic)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* it would be harmless to compare aggcombine and aggpartial, but
|
||||
* it's also unnecessary
|
||||
|
@ -1371,11 +1371,11 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
|
||||
* recurse through Query objects to as to locate parallel-unsafe
|
||||
* constructs anywhere in the tree.
|
||||
*
|
||||
* Later, we'll be called again for specific quals, possibly after
|
||||
* some planning has been done, we may encounter SubPlan, SubLink,
|
||||
* or AlternativeSubLink nodes. Currently, there's no need to recurse
|
||||
* through these; they can't be unsafe, since we've already cleared
|
||||
* the entire query of unsafe operations, and they're definitely
|
||||
* Later, we'll be called again for specific quals, possibly after some
|
||||
* planning has been done, we may encounter SubPlan, SubLink, or
|
||||
* AlternativeSubLink nodes. Currently, there's no need to recurse
|
||||
* through these; they can't be unsafe, since we've already cleared the
|
||||
* entire query of unsafe operations, and they're definitely
|
||||
* parallel-restricted.
|
||||
*/
|
||||
if (IsA(node, Query))
|
||||
@ -1390,12 +1390,12 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
|
||||
has_parallel_hazard_walker,
|
||||
context, 0);
|
||||
}
|
||||
else if (IsA(node, SubPlan) || IsA(node, SubLink) ||
|
||||
IsA(node, AlternativeSubPlan) || IsA(node, Param))
|
||||
else if (IsA(node, SubPlan) ||IsA(node, SubLink) ||
|
||||
IsA(node, AlternativeSubPlan) ||IsA(node, Param))
|
||||
{
|
||||
/*
|
||||
* Since we don't have the ability to push subplans down to workers
|
||||
* at present, we treat subplan references as parallel-restricted.
|
||||
* Since we don't have the ability to push subplans down to workers at
|
||||
* present, we treat subplan references as parallel-restricted.
|
||||
*/
|
||||
if (!context->allow_restricted)
|
||||
return true;
|
||||
@ -1405,12 +1405,14 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
|
||||
if (IsA(node, RestrictInfo))
|
||||
{
|
||||
RestrictInfo *rinfo = (RestrictInfo *) node;
|
||||
|
||||
return has_parallel_hazard_walker((Node *) rinfo->clause, context);
|
||||
}
|
||||
|
||||
/*
|
||||
* It is an error for a parallel worker to touch a temporary table in any
|
||||
* way, so we can't handle nodes whose type is the rowtype of such a table.
|
||||
* way, so we can't handle nodes whose type is the rowtype of such a
|
||||
* table.
|
||||
*/
|
||||
if (!context->allow_restricted)
|
||||
{
|
||||
@ -1535,6 +1537,7 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
|
||||
foreach(opid, rcexpr->opnos)
|
||||
{
|
||||
Oid opfuncid = get_opcode(lfirst_oid(opid));
|
||||
|
||||
if (parallel_too_dangerous(func_parallel(opfuncid), context))
|
||||
return true;
|
||||
}
|
||||
@ -1870,8 +1873,8 @@ contain_leaked_vars_walker(Node *node, void *context)
|
||||
/*
|
||||
* WHERE CURRENT OF doesn't contain function calls. Moreover, it
|
||||
* is important that this can be pushed down into a
|
||||
* security_barrier view, since the planner must always generate
|
||||
* a TID scan when CURRENT OF is present -- c.f. cost_tidscan.
|
||||
* security_barrier view, since the planner must always generate a
|
||||
* TID scan when CURRENT OF is present -- c.f. cost_tidscan.
|
||||
*/
|
||||
return false;
|
||||
|
||||
|
@ -287,15 +287,14 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
|
||||
if (like_found)
|
||||
{
|
||||
/*
|
||||
* To match INHERITS, the existence of any LIKE table with OIDs
|
||||
* causes the new table to have oids. For the same reason,
|
||||
* WITH/WITHOUT OIDs is also ignored with LIKE. We prepend
|
||||
* because the first oid option list entry is honored. Our
|
||||
* prepended WITHOUT OIDS clause will be overridden if an
|
||||
* inherited table has oids.
|
||||
* To match INHERITS, the existence of any LIKE table with OIDs causes
|
||||
* the new table to have oids. For the same reason, WITH/WITHOUT OIDs
|
||||
* is also ignored with LIKE. We prepend because the first oid option
|
||||
* list entry is honored. Our prepended WITHOUT OIDS clause will be
|
||||
* overridden if an inherited table has oids.
|
||||
*/
|
||||
stmt->options = lcons(makeDefElem("oids",
|
||||
(Node *)makeInteger(cxt.hasoids)), stmt->options);
|
||||
(Node *) makeInteger(cxt.hasoids)), stmt->options);
|
||||
}
|
||||
|
||||
foreach(elements, stmt->tableElts)
|
||||
@ -305,6 +304,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
|
||||
if (nodeTag(element) == T_Constraint)
|
||||
transformTableConstraint(&cxt, (Constraint *) element);
|
||||
}
|
||||
|
||||
/*
|
||||
* transformIndexConstraints wants cxt.alist to contain only index
|
||||
* statements, so transfer anything we already have into save_alist.
|
||||
@ -1949,8 +1949,8 @@ transformCheckConstraints(CreateStmtContext *cxt, bool skipValidation)
|
||||
|
||||
/*
|
||||
* If creating a new table, we can safely skip validation of check
|
||||
* constraints, and nonetheless mark them valid. (This will override
|
||||
* any user-supplied NOT VALID flag.)
|
||||
* constraints, and nonetheless mark them valid. (This will override any
|
||||
* user-supplied NOT VALID flag.)
|
||||
*/
|
||||
if (skipValidation)
|
||||
{
|
||||
|
@ -35,8 +35,7 @@ pg_spinlock_barrier(void)
|
||||
*
|
||||
* We use kill(0) for the fallback barrier as we assume that kernels on
|
||||
* systems old enough to require fallback barrier support will include an
|
||||
* appropriate barrier while checking the existence of the postmaster
|
||||
* pid.
|
||||
* appropriate barrier while checking the existence of the postmaster pid.
|
||||
*/
|
||||
(void) kill(PostmasterPid, 0);
|
||||
}
|
||||
|
@ -672,9 +672,9 @@ AutoVacLauncherMain(int argc, char *argv[])
|
||||
|
||||
/*
|
||||
* There are some conditions that we need to check before trying to
|
||||
* start a worker. First, we need to make sure that there is a
|
||||
* worker slot available. Second, we need to make sure that no
|
||||
* other worker failed while starting up.
|
||||
* start a worker. First, we need to make sure that there is a worker
|
||||
* slot available. Second, we need to make sure that no other worker
|
||||
* failed while starting up.
|
||||
*/
|
||||
|
||||
current_time = GetCurrentTimestamp();
|
||||
|
@ -2727,6 +2727,7 @@ pgstat_bestart(void)
|
||||
beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0';
|
||||
beentry->st_progress_command = PROGRESS_COMMAND_INVALID;
|
||||
beentry->st_progress_command_target = InvalidOid;
|
||||
|
||||
/*
|
||||
* we don't zero st_progress_param here to save cycles; nobody should
|
||||
* examine it until st_progress_command has been set to something other
|
||||
|
@ -1182,23 +1182,22 @@ PostmasterMain(int argc, char *argv[])
|
||||
RemovePgTempFiles();
|
||||
|
||||
/*
|
||||
* Forcibly remove the files signaling a standby promotion
|
||||
* request. Otherwise, the existence of those files triggers
|
||||
* a promotion too early, whether a user wants that or not.
|
||||
* Forcibly remove the files signaling a standby promotion request.
|
||||
* Otherwise, the existence of those files triggers a promotion too early,
|
||||
* whether a user wants that or not.
|
||||
*
|
||||
* This removal of files is usually unnecessary because they
|
||||
* can exist only during a few moments during a standby
|
||||
* promotion. However there is a race condition: if pg_ctl promote
|
||||
* is executed and creates the files during a promotion,
|
||||
* the files can stay around even after the server is brought up
|
||||
* to new master. Then, if new standby starts by using the backup
|
||||
* taken from that master, the files can exist at the server
|
||||
* This removal of files is usually unnecessary because they can exist
|
||||
* only during a few moments during a standby promotion. However there is
|
||||
* a race condition: if pg_ctl promote is executed and creates the files
|
||||
* during a promotion, the files can stay around even after the server is
|
||||
* brought up to new master. Then, if new standby starts by using the
|
||||
* backup taken from that master, the files can exist at the server
|
||||
* startup and should be removed in order to avoid an unexpected
|
||||
* promotion.
|
||||
*
|
||||
* Note that promotion signal files need to be removed before
|
||||
* the startup process is invoked. Because, after that, they can
|
||||
* be used by postmaster's SIGUSR1 signal handler.
|
||||
* Note that promotion signal files need to be removed before the startup
|
||||
* process is invoked. Because, after that, they can be used by
|
||||
* postmaster's SIGUSR1 signal handler.
|
||||
*/
|
||||
RemovePromoteSignalFiles();
|
||||
|
||||
@ -2607,6 +2606,7 @@ pmdie(SIGNAL_ARGS)
|
||||
if (pmState == PM_RECOVERY)
|
||||
{
|
||||
SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER);
|
||||
|
||||
/*
|
||||
* Only startup, bgwriter, walreceiver, possibly bgworkers,
|
||||
* and/or checkpointer should be active in this state; we just
|
||||
@ -3074,9 +3074,9 @@ CleanupBackgroundWorker(int pid,
|
||||
|
||||
/*
|
||||
* It's possible that this background worker started some OTHER
|
||||
* background worker and asked to be notified when that worker
|
||||
* started or stopped. If so, cancel any notifications destined
|
||||
* for the now-dead backend.
|
||||
* background worker and asked to be notified when that worker started
|
||||
* or stopped. If so, cancel any notifications destined for the
|
||||
* now-dead backend.
|
||||
*/
|
||||
if (rw->rw_backend->bgworker_notify)
|
||||
BackgroundWorkerStopNotifications(rw->rw_pid);
|
||||
@ -5696,9 +5696,8 @@ maybe_start_bgworker(void)
|
||||
rw->rw_crashed_at = 0;
|
||||
|
||||
/*
|
||||
* Allocate and assign the Backend element. Note we
|
||||
* must do this before forking, so that we can handle out of
|
||||
* memory properly.
|
||||
* Allocate and assign the Backend element. Note we must do this
|
||||
* before forking, so that we can handle out of memory properly.
|
||||
*/
|
||||
if (!assign_backendlist_entry(rw))
|
||||
return;
|
||||
|
@ -522,7 +522,8 @@ DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
|
||||
snapshot = SnapBuildGetOrBuildSnapshot(builder, xid);
|
||||
ReorderBufferQueueMessage(ctx->reorder, xid, snapshot, buf->endptr,
|
||||
message->transactional,
|
||||
message->message, /* first part of message is prefix */
|
||||
message->message, /* first part of message is
|
||||
* prefix */
|
||||
message->message_size,
|
||||
message->message + message->prefix_size);
|
||||
}
|
||||
|
@ -1836,10 +1836,10 @@ ReorderBufferImmediateInvalidation(ReorderBuffer *rb, uint32 ninvalidations,
|
||||
BeginInternalSubTransaction("replay");
|
||||
|
||||
/*
|
||||
* Force invalidations to happen outside of a valid transaction - that
|
||||
* way entries will just be marked as invalid without accessing the
|
||||
* catalog. That's advantageous because we don't need to setup the
|
||||
* full state necessary for catalog access.
|
||||
* Force invalidations to happen outside of a valid transaction - that way
|
||||
* entries will just be marked as invalid without accessing the catalog.
|
||||
* That's advantageous because we don't need to setup the full state
|
||||
* necessary for catalog access.
|
||||
*/
|
||||
if (use_subtxn)
|
||||
AbortCurrentTransaction();
|
||||
@ -2543,7 +2543,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
|
||||
change->data.msg.prefix = MemoryContextAlloc(rb->context,
|
||||
prefix_size);
|
||||
memcpy(change->data.msg.prefix, data, prefix_size);
|
||||
Assert(change->data.msg.prefix[prefix_size-1] == '\0');
|
||||
Assert(change->data.msg.prefix[prefix_size - 1] == '\0');
|
||||
data += prefix_size;
|
||||
|
||||
/* read the messsage */
|
||||
|
@ -230,11 +230,11 @@ ReplicationSlotCreate(const char *name, bool db_specific,
|
||||
ReplicationSlotValidateName(name, ERROR);
|
||||
|
||||
/*
|
||||
* If some other backend ran this code concurrently with us, we'd likely both
|
||||
* allocate the same slot, and that would be bad. We'd also be at risk of
|
||||
* missing a name collision. Also, we don't want to try to create a new
|
||||
* slot while somebody's busy cleaning up an old one, because we might
|
||||
* both be monkeying with the same directory.
|
||||
* If some other backend ran this code concurrently with us, we'd likely
|
||||
* both allocate the same slot, and that would be bad. We'd also be at
|
||||
* risk of missing a name collision. Also, we don't want to try to create
|
||||
* a new slot while somebody's busy cleaning up an old one, because we
|
||||
* might both be monkeying with the same directory.
|
||||
*/
|
||||
LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
|
||||
|
||||
@ -533,6 +533,7 @@ void
|
||||
ReplicationSlotMarkDirty(void)
|
||||
{
|
||||
ReplicationSlot *slot = MyReplicationSlot;
|
||||
|
||||
Assert(MyReplicationSlot != NULL);
|
||||
|
||||
SpinLockAcquire(&slot->mutex);
|
||||
|
@ -212,8 +212,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
|
||||
/*
|
||||
* If a wait for synchronous replication is pending, we can neither
|
||||
* acknowledge the commit nor raise ERROR or FATAL. The latter would
|
||||
* lead the client to believe that the transaction aborted, which
|
||||
* is not true: it's already committed locally. The former is no good
|
||||
* lead the client to believe that the transaction aborted, which is
|
||||
* not true: it's already committed locally. The former is no good
|
||||
* either: the client has requested synchronous replication, and is
|
||||
* entitled to assume that an acknowledged commit is also replicated,
|
||||
* which might not be true. So in this case we issue a WARNING (which
|
||||
@ -400,8 +400,8 @@ SyncRepReleaseWaiters(void)
|
||||
/*
|
||||
* If this WALSender is serving a standby that is not on the list of
|
||||
* potential sync standbys then we have nothing to do. If we are still
|
||||
* starting up, still running base backup or the current flush position
|
||||
* is still invalid, then leave quickly also.
|
||||
* starting up, still running base backup or the current flush position is
|
||||
* still invalid, then leave quickly also.
|
||||
*/
|
||||
if (MyWalSnd->sync_standby_priority == 0 ||
|
||||
MyWalSnd->state < WALSNDSTATE_STREAMING ||
|
||||
@ -412,21 +412,21 @@ SyncRepReleaseWaiters(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* We're a potential sync standby. Release waiters if there are
|
||||
* enough sync standbys and we are considered as sync.
|
||||
* We're a potential sync standby. Release waiters if there are enough
|
||||
* sync standbys and we are considered as sync.
|
||||
*/
|
||||
LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
|
||||
|
||||
/*
|
||||
* Check whether we are a sync standby or not, and calculate
|
||||
* the oldest positions among all sync standbys.
|
||||
* Check whether we are a sync standby or not, and calculate the oldest
|
||||
* positions among all sync standbys.
|
||||
*/
|
||||
got_oldest = SyncRepGetOldestSyncRecPtr(&writePtr, &flushPtr,
|
||||
&applyPtr, &am_sync);
|
||||
|
||||
/*
|
||||
* If we are managing a sync standby, though we weren't
|
||||
* prior to this, then announce we are now a sync standby.
|
||||
* If we are managing a sync standby, though we weren't prior to this,
|
||||
* then announce we are now a sync standby.
|
||||
*/
|
||||
if (announce_next_takeover && am_sync)
|
||||
{
|
||||
@ -513,10 +513,10 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan through all sync standbys and calculate the oldest
|
||||
* Write, Flush and Apply positions.
|
||||
* Scan through all sync standbys and calculate the oldest Write, Flush
|
||||
* and Apply positions.
|
||||
*/
|
||||
foreach (cell, sync_standbys)
|
||||
foreach(cell, sync_standbys)
|
||||
{
|
||||
WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
|
||||
XLogRecPtr write;
|
||||
@ -562,8 +562,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
|
||||
int priority;
|
||||
int i;
|
||||
bool am_in_pending = false;
|
||||
volatile WalSnd *walsnd; /* Use volatile pointer to prevent
|
||||
* code rearrangement */
|
||||
volatile WalSnd *walsnd; /* Use volatile pointer to prevent code
|
||||
* rearrangement */
|
||||
|
||||
/* Set default result */
|
||||
if (am_sync != NULL)
|
||||
@ -577,9 +577,9 @@ SyncRepGetSyncStandbys(bool *am_sync)
|
||||
next_highest_priority = lowest_priority + 1;
|
||||
|
||||
/*
|
||||
* Find the sync standbys which have the highest priority (i.e, 1).
|
||||
* Also store all the other potential sync standbys into the pending list,
|
||||
* in order to scan it later and find other sync standbys from it quickly.
|
||||
* Find the sync standbys which have the highest priority (i.e, 1). Also
|
||||
* store all the other potential sync standbys into the pending list, in
|
||||
* order to scan it later and find other sync standbys from it quickly.
|
||||
*/
|
||||
for (i = 0; i < max_wal_senders; i++)
|
||||
{
|
||||
@ -603,9 +603,9 @@ SyncRepGetSyncStandbys(bool *am_sync)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If the priority is equal to 1, consider this standby as sync
|
||||
* and append it to the result. Otherwise append this standby
|
||||
* to the pending list to check if it's actually sync or not later.
|
||||
* If the priority is equal to 1, consider this standby as sync and
|
||||
* append it to the result. Otherwise append this standby to the
|
||||
* pending list to check if it's actually sync or not later.
|
||||
*/
|
||||
if (this_priority == 1)
|
||||
{
|
||||
@ -626,10 +626,10 @@ SyncRepGetSyncStandbys(bool *am_sync)
|
||||
|
||||
/*
|
||||
* Track the highest priority among the standbys in the pending
|
||||
* list, in order to use it as the starting priority for later scan
|
||||
* of the list. This is useful to find quickly the sync standbys
|
||||
* from the pending list later because we can skip unnecessary
|
||||
* scans for the unused priorities.
|
||||
* list, in order to use it as the starting priority for later
|
||||
* scan of the list. This is useful to find quickly the sync
|
||||
* standbys from the pending list later because we can skip
|
||||
* unnecessary scans for the unused priorities.
|
||||
*/
|
||||
if (this_priority < next_highest_priority)
|
||||
next_highest_priority = this_priority;
|
||||
@ -685,8 +685,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
|
||||
|
||||
/*
|
||||
* We should always exit here after the scan of pending list
|
||||
* starts because we know that the list has enough elements
|
||||
* to reach SyncRepConfig->num_sync.
|
||||
* starts because we know that the list has enough elements to
|
||||
* reach SyncRepConfig->num_sync.
|
||||
*/
|
||||
if (list_length(result) == SyncRepConfig->num_sync)
|
||||
{
|
||||
@ -695,8 +695,8 @@ SyncRepGetSyncStandbys(bool *am_sync)
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the entry for this sync standby from the list
|
||||
* to prevent us from looking at the same entry again.
|
||||
* Remove the entry for this sync standby from the list to
|
||||
* prevent us from looking at the same entry again.
|
||||
*/
|
||||
pending = list_delete_cell(pending, cell, prev);
|
||||
|
||||
|
@ -475,8 +475,8 @@ WalReceiverMain(void)
|
||||
/*
|
||||
* The recovery process has asked us to send apply
|
||||
* feedback now. Make sure the flag is really set to
|
||||
* false in shared memory before sending the reply,
|
||||
* so we don't miss a new request for a reply.
|
||||
* false in shared memory before sending the reply, so
|
||||
* we don't miss a new request for a reply.
|
||||
*/
|
||||
walrcv->force_reply = false;
|
||||
pg_memory_barrier();
|
||||
@ -1379,8 +1379,8 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS)
|
||||
if (!superuser())
|
||||
{
|
||||
/*
|
||||
* Only superusers can see details. Other users only get the pid
|
||||
* value to know whether it is a WAL receiver, but no details.
|
||||
* Only superusers can see details. Other users only get the pid value
|
||||
* to know whether it is a WAL receiver, but no details.
|
||||
*/
|
||||
MemSet(&nulls[1], true, PG_STAT_GET_WAL_RECEIVER_COLS - 1);
|
||||
}
|
||||
|
@ -414,8 +414,8 @@ DefineQueryRewrite(char *rulename,
|
||||
* any triggers, indexes, child tables, policies, or RLS enabled.
|
||||
* (Note: these tests are too strict, because they will reject
|
||||
* relations that once had such but don't anymore. But we don't
|
||||
* really care, because this whole business of converting relations
|
||||
* to views is just a kluge to allow dump/reload of views that
|
||||
* really care, because this whole business of converting relations to
|
||||
* views is just a kluge to allow dump/reload of views that
|
||||
* participate in circular dependencies.)
|
||||
*/
|
||||
if (event_relation->rd_rel->relkind != RELKIND_VIEW &&
|
||||
|
@ -170,22 +170,24 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
|
||||
* visibility of records) associated with multiple command types (see
|
||||
* specific cases below).
|
||||
*
|
||||
* When considering the order in which to apply these USING policies,
|
||||
* we prefer to apply higher privileged policies, those which allow the
|
||||
* user to lock records (UPDATE and DELETE), first, followed by policies
|
||||
* which don't (SELECT).
|
||||
* When considering the order in which to apply these USING policies, we
|
||||
* prefer to apply higher privileged policies, those which allow the user
|
||||
* to lock records (UPDATE and DELETE), first, followed by policies which
|
||||
* don't (SELECT).
|
||||
*
|
||||
* Note that the optimizer is free to push down and reorder quals which
|
||||
* use leakproof functions.
|
||||
*
|
||||
* In all cases, if there are no policy clauses allowing access to rows in
|
||||
* the table for the specific type of operation, then a single always-false
|
||||
* clause (a default-deny policy) will be added (see add_security_quals).
|
||||
* the table for the specific type of operation, then a single
|
||||
* always-false clause (a default-deny policy) will be added (see
|
||||
* add_security_quals).
|
||||
*/
|
||||
|
||||
/*
|
||||
* For a SELECT, if UPDATE privileges are required (eg: the user has
|
||||
* specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals first.
|
||||
* specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals
|
||||
* first.
|
||||
*
|
||||
* This way, we filter out any records from the SELECT FOR SHARE/UPDATE
|
||||
* which the user does not have access to via the UPDATE USING policies,
|
||||
@ -232,8 +234,8 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
|
||||
* a WHERE clause which involves columns from the relation), we collect up
|
||||
* CMD_SELECT policies and add them via add_security_quals first.
|
||||
*
|
||||
* This way, we filter out any records which are not visible through an ALL
|
||||
* or SELECT USING policy.
|
||||
* This way, we filter out any records which are not visible through an
|
||||
* ALL or SELECT USING policy.
|
||||
*/
|
||||
if ((commandType == CMD_UPDATE || commandType == CMD_DELETE) &&
|
||||
rte->requiredPerms & ACL_SELECT)
|
||||
@ -272,9 +274,9 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
|
||||
hasSubLinks);
|
||||
|
||||
/*
|
||||
* Get and add ALL/SELECT policies, if SELECT rights are required
|
||||
* for this relation (eg: when RETURNING is used). These are added as
|
||||
* WCO policies rather than security quals to ensure that an error is
|
||||
* Get and add ALL/SELECT policies, if SELECT rights are required for
|
||||
* this relation (eg: when RETURNING is used). These are added as WCO
|
||||
* policies rather than security quals to ensure that an error is
|
||||
* raised if a policy is violated; otherwise, we might end up silently
|
||||
* dropping rows to be added.
|
||||
*/
|
||||
@ -324,11 +326,11 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index,
|
||||
hasSubLinks);
|
||||
|
||||
/*
|
||||
* Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK
|
||||
* WCOs to ensure they are considered when taking the UPDATE
|
||||
* path of an INSERT .. ON CONFLICT DO UPDATE, if SELECT
|
||||
* rights are required for this relation, also as WCO policies,
|
||||
* again, to avoid silently dropping data. See above.
|
||||
* Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK WCOs
|
||||
* to ensure they are considered when taking the UPDATE path of an
|
||||
* INSERT .. ON CONFLICT DO UPDATE, if SELECT rights are required
|
||||
* for this relation, also as WCO policies, again, to avoid
|
||||
* silently dropping data. See above.
|
||||
*/
|
||||
if (rte->requiredPerms & ACL_SELECT)
|
||||
{
|
||||
@ -427,8 +429,8 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id,
|
||||
}
|
||||
|
||||
/*
|
||||
* Add this policy to the list of permissive policies if it
|
||||
* applies to the specified role.
|
||||
* Add this policy to the list of permissive policies if it applies to
|
||||
* the specified role.
|
||||
*/
|
||||
if (cmd_matches && check_role_for_policy(policy->roles, user_id))
|
||||
*permissive_policies = lappend(*permissive_policies, policy);
|
||||
@ -498,6 +500,7 @@ sort_policies_by_name(List *policies)
|
||||
foreach(item, policies)
|
||||
{
|
||||
RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item);
|
||||
|
||||
pols[ii++] = *policy;
|
||||
}
|
||||
|
||||
@ -551,8 +554,8 @@ add_security_quals(int rt_index,
|
||||
Expr *rowsec_expr;
|
||||
|
||||
/*
|
||||
* First collect up the permissive quals. If we do not find any permissive
|
||||
* policies then no rows are visible (this is handled below).
|
||||
* First collect up the permissive quals. If we do not find any
|
||||
* permissive policies then no rows are visible (this is handled below).
|
||||
*/
|
||||
foreach(item, permissive_policies)
|
||||
{
|
||||
@ -577,8 +580,8 @@ add_security_quals(int rt_index,
|
||||
/*
|
||||
* We now know that permissive policies exist, so we can now add
|
||||
* security quals based on the USING clauses from the restrictive
|
||||
* policies. Since these need to be "AND"d together, we can
|
||||
* just add them one at a time.
|
||||
* policies. Since these need to be "AND"d together, we can just add
|
||||
* them one at a time.
|
||||
*/
|
||||
foreach(item, restrictive_policies)
|
||||
{
|
||||
@ -608,6 +611,7 @@ add_security_quals(int rt_index,
|
||||
*securityQuals = list_append_unique(*securityQuals, rowsec_expr);
|
||||
}
|
||||
else
|
||||
|
||||
/*
|
||||
* A permissive policy must exist for rows to be visible at all.
|
||||
* Therefore, if there were no permissive policies found, return a
|
||||
@ -668,11 +672,11 @@ add_with_check_options(Relation rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* There must be at least one permissive qual found or no rows are
|
||||
* allowed to be added. This is the same as in add_security_quals.
|
||||
* There must be at least one permissive qual found or no rows are allowed
|
||||
* to be added. This is the same as in add_security_quals.
|
||||
*
|
||||
* If there are no permissive_quals then we fall through and return a single
|
||||
* 'false' WCO, preventing all new rows.
|
||||
* If there are no permissive_quals then we fall through and return a
|
||||
* single 'false' WCO, preventing all new rows.
|
||||
*/
|
||||
if (permissive_quals != NIL)
|
||||
{
|
||||
|
@ -187,11 +187,12 @@ BufferShmemSize(void)
|
||||
|
||||
/*
|
||||
* It would be nice to include the I/O locks in the BufferDesc, but that
|
||||
* would increase the size of a BufferDesc to more than one cache line, and
|
||||
* benchmarking has shown that keeping every BufferDesc aligned on a cache
|
||||
* line boundary is important for performance. So, instead, the array of
|
||||
* I/O locks is allocated in a separate tranche. Because those locks are
|
||||
* not highly contentended, we lay out the array with minimal padding.
|
||||
* would increase the size of a BufferDesc to more than one cache line,
|
||||
* and benchmarking has shown that keeping every BufferDesc aligned on a
|
||||
* cache line boundary is important for performance. So, instead, the
|
||||
* array of I/O locks is allocated in a separate tranche. Because those
|
||||
* locks are not highly contentended, we lay out the array with minimal
|
||||
* padding.
|
||||
*/
|
||||
size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
|
||||
/* to allow aligning the above */
|
||||
|
@ -219,9 +219,9 @@ UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum,
|
||||
fsm_update_recursive(rel, addr, new_cat);
|
||||
|
||||
/*
|
||||
* Get the last block number on this FSM page. If that's greater
|
||||
* than or equal to our endBlkNum, we're done. Otherwise, advance
|
||||
* to the first block on the next page.
|
||||
* Get the last block number on this FSM page. If that's greater than
|
||||
* or equal to our endBlkNum, we're done. Otherwise, advance to the
|
||||
* first block on the next page.
|
||||
*/
|
||||
lastBlkOnPage = fsm_get_lastblckno(rel, addr);
|
||||
if (lastBlkOnPage >= endBlkNum)
|
||||
@ -841,8 +841,8 @@ fsm_get_lastblckno(Relation rel, FSMAddress addr)
|
||||
int slot;
|
||||
|
||||
/*
|
||||
* Get the last slot number on the given address and convert that to
|
||||
* block number
|
||||
* Get the last slot number on the given address and convert that to block
|
||||
* number
|
||||
*/
|
||||
slot = SlotsPerFSMPage - 1;
|
||||
return fsm_get_heap_blk(addr, slot);
|
||||
@ -862,8 +862,8 @@ fsm_update_recursive(Relation rel, FSMAddress addr, uint8 new_cat)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Get the parent page and our slot in the parent page, and
|
||||
* update the information in that.
|
||||
* Get the parent page and our slot in the parent page, and update the
|
||||
* information in that.
|
||||
*/
|
||||
parent = fsm_get_parent(addr, &parentslot);
|
||||
fsm_set_and_search(rel, parent, parentslot, new_cat, 0);
|
||||
|
@ -245,8 +245,8 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, the control segment looks basically valid, so we can use it to
|
||||
* get a list of segments that need to be removed.
|
||||
* OK, the control segment looks basically valid, so we can use it to get
|
||||
* a list of segments that need to be removed.
|
||||
*/
|
||||
nitems = old_control->nitems;
|
||||
for (i = 0; i < nitems; ++i)
|
||||
|
@ -642,8 +642,8 @@ ProcArrayInitRecovery(TransactionId initializedUptoXID)
|
||||
Assert(TransactionIdIsNormal(initializedUptoXID));
|
||||
|
||||
/*
|
||||
* we set latestObservedXid to the xid SUBTRANS has been initialized up to,
|
||||
* so we can extend it from that point onwards in
|
||||
* we set latestObservedXid to the xid SUBTRANS has been initialized up
|
||||
* to, so we can extend it from that point onwards in
|
||||
* RecordKnownAssignedTransactionIds, and when we get consistent in
|
||||
* ProcArrayApplyRecoveryInfo().
|
||||
*/
|
||||
@ -2591,8 +2591,8 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid)
|
||||
/*
|
||||
* We ignore an invalid pxmin because this means that backend has
|
||||
* no snapshot currently. We hold a Share lock to avoid contention
|
||||
* with users taking snapshots. That is not a problem because
|
||||
* the current xmin is always at least one higher than the latest
|
||||
* with users taking snapshots. That is not a problem because the
|
||||
* current xmin is always at least one higher than the latest
|
||||
* removed xid, so any new snapshot would never conflict with the
|
||||
* test here.
|
||||
*/
|
||||
|
@ -370,6 +370,7 @@ ResolveRecoveryConflictWithLock(LOCKTAG locktag)
|
||||
* We're already behind, so clear a path as quickly as possible.
|
||||
*/
|
||||
VirtualTransactionId *backends;
|
||||
|
||||
backends = GetLockConflicts(&locktag, AccessExclusiveLock);
|
||||
ResolveRecoveryConflictWithVirtualXIDs(backends,
|
||||
PROCSIG_RECOVERY_CONFLICT_LOCK);
|
||||
|
@ -1153,13 +1153,13 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
|
||||
uint32 partition = LockHashPartition(hashcode);
|
||||
|
||||
/*
|
||||
* It might seem unsafe to access proclock->groupLeader without a lock,
|
||||
* but it's not really. Either we are initializing a proclock on our
|
||||
* own behalf, in which case our group leader isn't changing because
|
||||
* the group leader for a process can only ever be changed by the
|
||||
* process itself; or else we are transferring a fast-path lock to the
|
||||
* main lock table, in which case that process can't change it's lock
|
||||
* group leader without first releasing all of its locks (and in
|
||||
* It might seem unsafe to access proclock->groupLeader without a
|
||||
* lock, but it's not really. Either we are initializing a proclock
|
||||
* on our own behalf, in which case our group leader isn't changing
|
||||
* because the group leader for a process can only ever be changed by
|
||||
* the process itself; or else we are transferring a fast-path lock to
|
||||
* the main lock table, in which case that process can't change it's
|
||||
* lock group leader without first releasing all of its locks (and in
|
||||
* particular the one we are currently transferring).
|
||||
*/
|
||||
proclock->groupLeader = proc->lockGroupLeader != NULL ?
|
||||
@ -1319,10 +1319,9 @@ LockCheckConflicts(LockMethod lockMethodTable,
|
||||
}
|
||||
|
||||
/*
|
||||
* Rats. Something conflicts. But it could still be my own lock, or
|
||||
* a lock held by another member of my locking group. First, figure out
|
||||
* how many conflicts remain after subtracting out any locks I hold
|
||||
* myself.
|
||||
* Rats. Something conflicts. But it could still be my own lock, or a
|
||||
* lock held by another member of my locking group. First, figure out how
|
||||
* many conflicts remain after subtracting out any locks I hold myself.
|
||||
*/
|
||||
myLocks = proclock->holdMask;
|
||||
for (i = 1; i <= numLockModes; i++)
|
||||
@ -1357,9 +1356,10 @@ LockCheckConflicts(LockMethod lockMethodTable,
|
||||
/*
|
||||
* Locks held in conflicting modes by members of our own lock group are
|
||||
* not real conflicts; we can subtract those out and see if we still have
|
||||
* a conflict. This is O(N) in the number of processes holding or awaiting
|
||||
* locks on this object. We could improve that by making the shared memory
|
||||
* state more complex (and larger) but it doesn't seem worth it.
|
||||
* a conflict. This is O(N) in the number of processes holding or
|
||||
* awaiting locks on this object. We could improve that by making the
|
||||
* shared memory state more complex (and larger) but it doesn't seem worth
|
||||
* it.
|
||||
*/
|
||||
procLocks = &(lock->procLocks);
|
||||
otherproclock = (PROCLOCK *)
|
||||
@ -2583,8 +2583,8 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
|
||||
*
|
||||
* proc->databaseId is set at backend startup time and never changes
|
||||
* thereafter, so it might be safe to perform this test before
|
||||
* acquiring &proc->backendLock. In particular, it's certainly safe to
|
||||
* assume that if the target backend holds any fast-path locks, it
|
||||
* acquiring &proc->backendLock. In particular, it's certainly safe
|
||||
* to assume that if the target backend holds any fast-path locks, it
|
||||
* must have performed a memory-fencing operation (in particular, an
|
||||
* LWLock acquisition) since setting proc->databaseId. However, it's
|
||||
* less clear that our backend is certain to have performed a memory
|
||||
|
@ -760,8 +760,8 @@ GetLWLockIdentifier(uint8 classId, uint16 eventId)
|
||||
|
||||
/*
|
||||
* It is quite possible that user has registered tranche in one of the
|
||||
* backends (e.g. by allocating lwlocks in dynamic shared memory) but
|
||||
* not all of them, so we can't assume the tranche is registered here.
|
||||
* backends (e.g. by allocating lwlocks in dynamic shared memory) but not
|
||||
* all of them, so we can't assume the tranche is registered here.
|
||||
*/
|
||||
if (eventId >= LWLockTranchesAllocated ||
|
||||
LWLockTrancheArray[eventId]->name == NULL)
|
||||
|
@ -288,7 +288,7 @@ InitProcGlobal(void)
|
||||
void
|
||||
InitProcess(void)
|
||||
{
|
||||
PGPROC * volatile * procgloballist;
|
||||
PGPROC *volatile * procgloballist;
|
||||
|
||||
/*
|
||||
* ProcGlobal should be set up already (if we are a backend, we inherit
|
||||
@ -342,8 +342,8 @@ InitProcess(void)
|
||||
MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
|
||||
|
||||
/*
|
||||
* Cross-check that the PGPROC is of the type we expect; if this were
|
||||
* not the case, it would get returned to the wrong list.
|
||||
* Cross-check that the PGPROC is of the type we expect; if this were not
|
||||
* the case, it would get returned to the wrong list.
|
||||
*/
|
||||
Assert(MyProc->procgloballist == procgloballist);
|
||||
|
||||
@ -781,7 +781,7 @@ static void
|
||||
ProcKill(int code, Datum arg)
|
||||
{
|
||||
PGPROC *proc;
|
||||
PGPROC * volatile * procgloballist;
|
||||
PGPROC *volatile * procgloballist;
|
||||
|
||||
Assert(MyProc != NULL);
|
||||
|
||||
|
@ -358,7 +358,7 @@ getNextFlagFromString(IspellDict *Conf, char **sflagset, char *sflag)
|
||||
|
||||
maxstep = (Conf->flagMode == FM_LONG) ? 2 : 1;
|
||||
|
||||
while(**sflagset)
|
||||
while (**sflagset)
|
||||
{
|
||||
switch (Conf->flagMode)
|
||||
{
|
||||
@ -527,6 +527,7 @@ NIImportDictionary(IspellDict *Conf, const char *filename)
|
||||
{
|
||||
char *s,
|
||||
*pstr;
|
||||
|
||||
/* Set of affix flags */
|
||||
const char *flag;
|
||||
|
||||
@ -620,9 +621,9 @@ FindWord(IspellDict *Conf, const char *word, char *affixflag, int flag)
|
||||
if (flag == 0)
|
||||
{
|
||||
/*
|
||||
* The word can be formed only with another word.
|
||||
* And in the flag parameter there is not a sign
|
||||
* that we search compound words.
|
||||
* The word can be formed only with another word. And
|
||||
* in the flag parameter there is not a sign that we
|
||||
* search compound words.
|
||||
*/
|
||||
if (StopMiddle->compoundflag & FF_COMPOUNDONLY)
|
||||
return 0;
|
||||
@ -671,7 +672,7 @@ FindWord(IspellDict *Conf, const char *word, char *affixflag, int flag)
|
||||
* type: FF_SUFFIX or FF_PREFIX.
|
||||
*/
|
||||
static void
|
||||
NIAddAffix(IspellDict *Conf, const char* flag, char flagflags, const char *mask,
|
||||
NIAddAffix(IspellDict *Conf, const char *flag, char flagflags, const char *mask,
|
||||
const char *find, const char *repl, int type)
|
||||
{
|
||||
AFFIX *Affix;
|
||||
@ -1161,9 +1162,10 @@ getAffixFlagSet(IspellDict *Conf, char *s)
|
||||
errmsg("invalid affix alias \"%s\"", s)));
|
||||
|
||||
if (curaffix > 0 && curaffix <= Conf->nAffixData)
|
||||
|
||||
/*
|
||||
* Do not subtract 1 from curaffix
|
||||
* because empty string was added in NIImportOOAffixes
|
||||
* Do not subtract 1 from curaffix because empty string was added
|
||||
* in NIImportOOAffixes
|
||||
*/
|
||||
return Conf->AffixData[curaffix];
|
||||
else
|
||||
@ -1597,6 +1599,7 @@ static uint32
|
||||
makeCompoundFlags(IspellDict *Conf, int affix)
|
||||
{
|
||||
char *str = Conf->AffixData[affix];
|
||||
|
||||
return (getCompoundAffixFlagValue(Conf, str) & FF_COMPOUNDFLAGMASK);
|
||||
}
|
||||
|
||||
@ -1700,8 +1703,8 @@ NISortDictionary(IspellDict *Conf)
|
||||
/* compress affixes */
|
||||
|
||||
/*
|
||||
* If we use flag aliases then we need to use Conf->AffixData filled
|
||||
* in the NIImportOOAffixes().
|
||||
* If we use flag aliases then we need to use Conf->AffixData filled in
|
||||
* the NIImportOOAffixes().
|
||||
*/
|
||||
if (Conf->useFlagAliases)
|
||||
{
|
||||
|
@ -295,8 +295,8 @@ pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval,
|
||||
while (count < prs.curwords)
|
||||
{
|
||||
/*
|
||||
* Were any stop words removed? If so, fill empty positions
|
||||
* with placeholders linked by an appropriate operator.
|
||||
* Were any stop words removed? If so, fill empty positions with
|
||||
* placeholders linked by an appropriate operator.
|
||||
*/
|
||||
if (pos > 0 && pos + 1 < prs.words[count].pos.pos)
|
||||
{
|
||||
|
@ -267,6 +267,7 @@ datumEstimateSpace(Datum value, bool isnull, bool typByVal, int typLen)
|
||||
else if (VARATT_IS_EXTERNAL_EXPANDED(value))
|
||||
{
|
||||
ExpandedObjectHeader *eoh = DatumGetEOHP(value);
|
||||
|
||||
sz += EOH_get_flat_size(eoh);
|
||||
}
|
||||
else
|
||||
|
@ -254,7 +254,7 @@ contain2D(RangeBox *range_box, Range *query)
|
||||
|
||||
/* Can any rectangle from rect_box contain this argument? */
|
||||
static bool
|
||||
contain4D(RectBox *rect_box, RangeBox * query)
|
||||
contain4D(RectBox *rect_box, RangeBox *query)
|
||||
{
|
||||
return contain2D(&rect_box->range_box_x, &query->left) &&
|
||||
contain2D(&rect_box->range_box_y, &query->right);
|
||||
@ -442,8 +442,8 @@ spg_box_quad_picksplit(PG_FUNCTION_ARGS)
|
||||
out->leafTupleDatums = palloc(sizeof(Datum) * in->nTuples);
|
||||
|
||||
/*
|
||||
* Assign ranges to corresponding nodes according to quadrants
|
||||
* relative to the "centroid" range
|
||||
* Assign ranges to corresponding nodes according to quadrants relative to
|
||||
* the "centroid" range
|
||||
*/
|
||||
for (i = 0; i < in->nTuples; i++)
|
||||
{
|
||||
@ -484,8 +484,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
|
||||
}
|
||||
|
||||
/*
|
||||
* We are saving the traversal value or initialize it an unbounded
|
||||
* one, if we have just begun to walk the tree.
|
||||
* We are saving the traversal value or initialize it an unbounded one, if
|
||||
* we have just begun to walk the tree.
|
||||
*/
|
||||
if (in->traversalValue)
|
||||
rect_box = in->traversalValue;
|
||||
@ -493,8 +493,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
|
||||
rect_box = initRectBox();
|
||||
|
||||
/*
|
||||
* We are casting the prefix and queries to RangeBoxes for ease of
|
||||
* the following operations.
|
||||
* We are casting the prefix and queries to RangeBoxes for ease of the
|
||||
* following operations.
|
||||
*/
|
||||
centroid = getRangeBox(DatumGetBoxP(in->prefixDatum));
|
||||
queries = (RangeBox **) palloc(in->nkeys * sizeof(RangeBox *));
|
||||
@ -507,9 +507,9 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
|
||||
out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes);
|
||||
|
||||
/*
|
||||
* We switch memory context, because we want to allocate memory for
|
||||
* new traversal values (next_rect_box) and pass these pieces of
|
||||
* memory to further call of this function.
|
||||
* We switch memory context, because we want to allocate memory for new
|
||||
* traversal values (next_rect_box) and pass these pieces of memory to
|
||||
* further call of this function.
|
||||
*/
|
||||
old_ctx = MemoryContextSwitchTo(in->traversalMemoryContext);
|
||||
|
||||
@ -587,8 +587,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS)
|
||||
else
|
||||
{
|
||||
/*
|
||||
* If this node is not selected, we don't need to keep
|
||||
* the next traversal value in the memory context.
|
||||
* If this node is not selected, we don't need to keep the next
|
||||
* traversal value in the memory context.
|
||||
*/
|
||||
pfree(next_rect_box);
|
||||
}
|
||||
|
@ -1305,7 +1305,7 @@ compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar)
|
||||
case jbvBool:
|
||||
if (aScalar->val.boolean == bScalar->val.boolean)
|
||||
return 0;
|
||||
else if (aScalar->val.boolean >bScalar->val.boolean)
|
||||
else if (aScalar->val.boolean > bScalar->val.boolean)
|
||||
return 1;
|
||||
else
|
||||
return -1;
|
||||
|
@ -1002,8 +1002,8 @@ get_array_start(void *state)
|
||||
{
|
||||
/*
|
||||
* Special case: we should match the entire array. We only need this
|
||||
* at the outermost level because at nested levels the match will
|
||||
* have been started by the outer field or array element callback.
|
||||
* at the outermost level because at nested levels the match will have
|
||||
* been started by the outer field or array element callback.
|
||||
*/
|
||||
_state->result_start = _state->lex->token_start;
|
||||
}
|
||||
@ -3368,9 +3368,9 @@ jsonb_concat(PG_FUNCTION_ARGS)
|
||||
*it2;
|
||||
|
||||
/*
|
||||
* If one of the jsonb is empty, just return the other if it's not
|
||||
* scalar and both are of the same kind. If it's a scalar or they are
|
||||
* of different kinds we need to perform the concatenation even if one is
|
||||
* If one of the jsonb is empty, just return the other if it's not scalar
|
||||
* and both are of the same kind. If it's a scalar or they are of
|
||||
* different kinds we need to perform the concatenation even if one is
|
||||
* empty.
|
||||
*/
|
||||
if (JB_ROOT_IS_OBJECT(jb1) == JB_ROOT_IS_OBJECT(jb2))
|
||||
@ -3481,7 +3481,7 @@ jsonb_delete_idx(PG_FUNCTION_ARGS)
|
||||
it = JsonbIteratorInit(&in->root);
|
||||
|
||||
r = JsonbIteratorNext(&it, &v, false);
|
||||
Assert (r == WJB_BEGIN_ARRAY);
|
||||
Assert(r == WJB_BEGIN_ARRAY);
|
||||
n = v.val.array.nElems;
|
||||
|
||||
if (idx < 0)
|
||||
@ -3868,8 +3868,8 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
|
||||
if (level == path_len - 1)
|
||||
{
|
||||
/*
|
||||
* called from jsonb_insert(), it forbids redefining
|
||||
* an existsing value
|
||||
* called from jsonb_insert(), it forbids redefining an
|
||||
* existsing value
|
||||
*/
|
||||
if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_INSERT_AFTER))
|
||||
ereport(ERROR,
|
||||
@ -4005,8 +4005,8 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
|
||||
|
||||
/*
|
||||
* We should keep current value only in case of
|
||||
* JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER
|
||||
* because otherwise it should be deleted or replaced
|
||||
* JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER because
|
||||
* otherwise it should be deleted or replaced
|
||||
*/
|
||||
if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_INSERT_BEFORE))
|
||||
(void) pushJsonbValue(st, r, &v);
|
||||
|
@ -3397,8 +3397,8 @@ numeric_combine(PG_FUNCTION_ARGS)
|
||||
state1->NaNcount += state2->NaNcount;
|
||||
|
||||
/*
|
||||
* These are currently only needed for moving aggregates, but let's
|
||||
* do the right thing anyway...
|
||||
* These are currently only needed for moving aggregates, but let's do
|
||||
* the right thing anyway...
|
||||
*/
|
||||
if (state2->maxScale > state1->maxScale)
|
||||
{
|
||||
@ -3485,8 +3485,8 @@ numeric_avg_combine(PG_FUNCTION_ARGS)
|
||||
state1->NaNcount += state2->NaNcount;
|
||||
|
||||
/*
|
||||
* These are currently only needed for moving aggregates, but let's
|
||||
* do the right thing anyway...
|
||||
* These are currently only needed for moving aggregates, but let's do
|
||||
* the right thing anyway...
|
||||
*/
|
||||
if (state2->maxScale > state1->maxScale)
|
||||
{
|
||||
|
@ -613,14 +613,14 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS)
|
||||
if (has_privs_of_role(GetUserId(), beentry->st_userid))
|
||||
{
|
||||
values[2] = ObjectIdGetDatum(beentry->st_progress_command_target);
|
||||
for(i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++)
|
||||
values[i+3] = Int64GetDatum(beentry->st_progress_param[i]);
|
||||
for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++)
|
||||
values[i + 3] = Int64GetDatum(beentry->st_progress_param[i]);
|
||||
}
|
||||
else
|
||||
{
|
||||
nulls[2] = true;
|
||||
for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++)
|
||||
nulls[i+3] = true;
|
||||
nulls[i + 3] = true;
|
||||
}
|
||||
|
||||
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
|
||||
|
@ -762,11 +762,12 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Datum previousCentroid;
|
||||
|
||||
/* We know, that in->prefixDatum in this place is varlena,
|
||||
/*
|
||||
* We know, that in->prefixDatum in this place is varlena,
|
||||
* because it's range
|
||||
*/
|
||||
previousCentroid = datumCopy(in->prefixDatum, false, -1);
|
||||
out->traversalValues[out->nNodes] = (void *)previousCentroid;
|
||||
out->traversalValues[out->nNodes] = (void *) previousCentroid;
|
||||
}
|
||||
out->nodeNumbers[out->nNodes] = i - 1;
|
||||
out->nNodes++;
|
||||
|
@ -184,8 +184,8 @@ checkcondition_gin_internal(GinChkVal *gcv, QueryOperand *val, ExecPhraseData *d
|
||||
int j;
|
||||
|
||||
/*
|
||||
* if any val requiring a weight is used or caller
|
||||
* needs position information then set recheck flag
|
||||
* if any val requiring a weight is used or caller needs position
|
||||
* information then set recheck flag
|
||||
*/
|
||||
if (val->weight != 0 || data != NULL)
|
||||
*gcv->need_recheck = true;
|
||||
@ -236,9 +236,10 @@ TS_execute_ternary(GinChkVal *gcv, QueryItem *curitem)
|
||||
return !result;
|
||||
|
||||
case OP_PHRASE:
|
||||
|
||||
/*
|
||||
* GIN doesn't contain any information about positions,
|
||||
* treat OP_PHRASE as OP_AND with recheck requirement
|
||||
* GIN doesn't contain any information about positions, treat
|
||||
* OP_PHRASE as OP_AND with recheck requirement
|
||||
*/
|
||||
*gcv->need_recheck = true;
|
||||
/* FALL THRU */
|
||||
|
@ -136,7 +136,7 @@ parse_phrase_operator(char *buf, int16 *distance)
|
||||
|
||||
while (*ptr)
|
||||
{
|
||||
switch(state)
|
||||
switch (state)
|
||||
{
|
||||
case PHRASE_OPEN:
|
||||
Assert(t_iseq(ptr, '<'));
|
||||
@ -192,7 +192,7 @@ parse_phrase_operator(char *buf, int16 *distance)
|
||||
}
|
||||
}
|
||||
|
||||
err:
|
||||
err:
|
||||
*distance = -1;
|
||||
return buf;
|
||||
}
|
||||
@ -696,8 +696,8 @@ parse_tsquery(char *buf,
|
||||
findoprnd(ptr, query->size, &needcleanup);
|
||||
|
||||
/*
|
||||
* QI_VALSTOP nodes should be cleaned and
|
||||
* and OP_PHRASE should be pushed down
|
||||
* QI_VALSTOP nodes should be cleaned and and OP_PHRASE should be pushed
|
||||
* down
|
||||
*/
|
||||
if (needcleanup)
|
||||
return cleanup_fakeval_and_phrase(query);
|
||||
@ -852,7 +852,8 @@ infix(INFIX *in, int parentPriority)
|
||||
in->curpol++;
|
||||
if (priority < parentPriority ||
|
||||
(op == OP_PHRASE &&
|
||||
(priority == parentPriority || /* phrases are not commutative! */
|
||||
(priority == parentPriority || /* phrases are not
|
||||
* commutative! */
|
||||
parentPriority == OP_PRIORITY(OP_AND))))
|
||||
{
|
||||
needParenthesis = true;
|
||||
@ -874,7 +875,7 @@ infix(INFIX *in, int parentPriority)
|
||||
infix(in, priority);
|
||||
|
||||
/* print operator & right operand */
|
||||
RESIZEBUF(in, 3 + (2 + 10 /* distance */) + (nrm.cur - nrm.buf));
|
||||
RESIZEBUF(in, 3 + (2 + 10 /* distance */ ) + (nrm.cur - nrm.buf));
|
||||
switch (op)
|
||||
{
|
||||
case OP_OR:
|
||||
@ -923,7 +924,7 @@ tsqueryout(PG_FUNCTION_ARGS)
|
||||
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
|
||||
*(nrm.cur) = '\0';
|
||||
nrm.op = GETOPERAND(query);
|
||||
infix(&nrm, -1 /* lowest priority */);
|
||||
infix(&nrm, -1 /* lowest priority */ );
|
||||
|
||||
PG_FREE_IF_COPY(query, 0);
|
||||
PG_RETURN_CSTRING(nrm.buf);
|
||||
|
@ -257,7 +257,9 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
|
||||
else
|
||||
{
|
||||
NODE *res = node;
|
||||
int ndistance, ldistance = 0, rdistance = 0;
|
||||
int ndistance,
|
||||
ldistance = 0,
|
||||
rdistance = 0;
|
||||
|
||||
ndistance = (node->valnode->qoperator.oper == OP_PHRASE) ?
|
||||
node->valnode->qoperator.distance :
|
||||
@ -272,8 +274,8 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
|
||||
ndistance ? &rdistance : NULL);
|
||||
|
||||
/*
|
||||
* ndistance, ldistance and rdistance are greater than zero
|
||||
* if their corresponding nodes are OP_PHRASE
|
||||
* ndistance, ldistance and rdistance are greater than zero if their
|
||||
* corresponding nodes are OP_PHRASE
|
||||
*/
|
||||
|
||||
if (lresult == V_STOP && rresult == V_STOP)
|
||||
@ -287,9 +289,10 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
|
||||
else if (lresult == V_STOP)
|
||||
{
|
||||
res = node->right;
|
||||
|
||||
/*
|
||||
* propagate distance from current node to the
|
||||
* right upper subtree.
|
||||
* propagate distance from current node to the right upper
|
||||
* subtree.
|
||||
*/
|
||||
if (adddistance && ndistance)
|
||||
*adddistance = rdistance;
|
||||
@ -298,6 +301,7 @@ clean_fakeval_intree(NODE *node, char *result, int *adddistance)
|
||||
else if (rresult == V_STOP)
|
||||
{
|
||||
res = node->left;
|
||||
|
||||
/*
|
||||
* propagate distance from current node to the upper tree.
|
||||
*/
|
||||
@ -417,8 +421,8 @@ normalize_phrase_tree(NODE *node)
|
||||
return node;
|
||||
|
||||
/*
|
||||
* We can't swap left-right and works only with left child
|
||||
* because of a <-> b != b <-> a
|
||||
* We can't swap left-right and works only with left child because of
|
||||
* a <-> b != b <-> a
|
||||
*/
|
||||
|
||||
distance = node->valnode->qoperator.distance;
|
||||
@ -464,7 +468,7 @@ normalize_phrase_tree(NODE *node)
|
||||
/* no-op */
|
||||
break;
|
||||
default:
|
||||
elog(ERROR,"Wrong type of tsquery node: %d",
|
||||
elog(ERROR, "Wrong type of tsquery node: %d",
|
||||
node->right->valnode->qoperator.oper);
|
||||
}
|
||||
}
|
||||
@ -476,7 +480,7 @@ normalize_phrase_tree(NODE *node)
|
||||
* if the node is still OP_PHRASE, check the left subtree,
|
||||
* otherwise the whole node will be transformed later.
|
||||
*/
|
||||
switch(node->left->valnode->qoperator.oper)
|
||||
switch (node->left->valnode->qoperator.oper)
|
||||
{
|
||||
case OP_AND:
|
||||
/* (a & b) <-> c => (a <-> c) & (b <-> c) */
|
||||
@ -515,7 +519,7 @@ normalize_phrase_tree(NODE *node)
|
||||
/* no-op */
|
||||
break;
|
||||
default:
|
||||
elog(ERROR,"Wrong type of tsquery node: %d",
|
||||
elog(ERROR, "Wrong type of tsquery node: %d",
|
||||
node->left->valnode->qoperator.oper);
|
||||
}
|
||||
}
|
||||
|
@ -498,12 +498,16 @@ ts_rank_tt(PG_FUNCTION_ARGS)
|
||||
|
||||
typedef struct
|
||||
{
|
||||
union {
|
||||
struct { /* compiled doc representation */
|
||||
union
|
||||
{
|
||||
struct
|
||||
{ /* compiled doc representation */
|
||||
QueryItem **items;
|
||||
int16 nitem;
|
||||
} query;
|
||||
struct { /* struct is used for preparing doc representation */
|
||||
struct
|
||||
{ /* struct is used for preparing doc
|
||||
* representation */
|
||||
QueryItem *item;
|
||||
WordEntry *entry;
|
||||
} map;
|
||||
@ -537,8 +541,8 @@ compareDocR(const void *va, const void *vb)
|
||||
typedef struct
|
||||
{
|
||||
bool operandexists;
|
||||
bool reverseinsert; /* indicates insert order,
|
||||
true means descending order */
|
||||
bool reverseinsert; /* indicates insert order, true means
|
||||
* descending order */
|
||||
uint32 npos;
|
||||
WordEntryPos pos[MAXQROPOS];
|
||||
} QueryRepresentationOperand;
|
||||
@ -586,7 +590,7 @@ resetQueryRepresentation(QueryRepresentation *qr, bool reverseinsert)
|
||||
{
|
||||
int i;
|
||||
|
||||
for(i = 0; i < qr->query->size; i++)
|
||||
for (i = 0; i < qr->query->size; i++)
|
||||
{
|
||||
qr->operandData[i].operandexists = false;
|
||||
qr->operandData[i].reverseinsert = reverseinsert;
|
||||
@ -731,8 +735,8 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen)
|
||||
doc = (DocRepresentation *) palloc(sizeof(DocRepresentation) * len);
|
||||
|
||||
/*
|
||||
* Iterate through query to make DocRepresentaion for words and it's entries
|
||||
* satisfied by query
|
||||
* Iterate through query to make DocRepresentaion for words and it's
|
||||
* entries satisfied by query
|
||||
*/
|
||||
for (i = 0; i < qr->query->size; i++)
|
||||
{
|
||||
@ -806,8 +810,8 @@ get_docrep(TSVector txt, QueryRepresentation *qr, int *doclen)
|
||||
|
||||
while (rptr - doc < cur)
|
||||
{
|
||||
if (rptr->pos == (rptr-1)->pos &&
|
||||
rptr->data.map.entry == (rptr-1)->data.map.entry)
|
||||
if (rptr->pos == (rptr - 1)->pos &&
|
||||
rptr->data.map.entry == (rptr - 1)->data.map.entry)
|
||||
{
|
||||
storage.data.query.items[storage.data.query.nitem] = rptr->data.map.item;
|
||||
storage.data.query.nitem++;
|
||||
|
@ -276,16 +276,20 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
|
||||
|
||||
switch (char_weight)
|
||||
{
|
||||
case 'A': case 'a':
|
||||
case 'A':
|
||||
case 'a':
|
||||
weight = 3;
|
||||
break;
|
||||
case 'B': case 'b':
|
||||
case 'B':
|
||||
case 'b':
|
||||
weight = 2;
|
||||
break;
|
||||
case 'C': case 'c':
|
||||
case 'C':
|
||||
case 'c':
|
||||
weight = 1;
|
||||
break;
|
||||
case 'D': case 'd':
|
||||
case 'D':
|
||||
case 'd':
|
||||
weight = 0;
|
||||
break;
|
||||
default:
|
||||
@ -301,9 +305,9 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
|
||||
&dlexemes, &nulls, &nlexemes);
|
||||
|
||||
/*
|
||||
* Assuming that lexemes array is significantly shorter than tsvector
|
||||
* we can iterate through lexemes performing binary search
|
||||
* of each lexeme from lexemes in tsvector.
|
||||
* Assuming that lexemes array is significantly shorter than tsvector we
|
||||
* can iterate through lexemes performing binary search of each lexeme
|
||||
* from lexemes in tsvector.
|
||||
*/
|
||||
for (i = 0; i < nlexemes; i++)
|
||||
{
|
||||
@ -323,6 +327,7 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
|
||||
if (lex_pos >= 0 && (j = POSDATALEN(tsout, entry + lex_pos)) != 0)
|
||||
{
|
||||
WordEntryPos *p = POSDATAPTR(tsout, entry + lex_pos);
|
||||
|
||||
while (j--)
|
||||
{
|
||||
WEP_SETWEIGHT(*p, weight);
|
||||
@ -393,7 +398,7 @@ tsvector_bsearch(const TSVector tsv, char *lexeme, int lexeme_len)
|
||||
|
||||
while (StopLow < StopHigh)
|
||||
{
|
||||
StopMiddle = (StopLow + StopHigh)/2;
|
||||
StopMiddle = (StopLow + StopHigh) / 2;
|
||||
|
||||
cmp = tsCompareString(lexeme, lexeme_len,
|
||||
STRPTR(tsv) + arrin[StopMiddle].pos,
|
||||
@ -440,13 +445,15 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
|
||||
*arrout;
|
||||
char *data = STRPTR(tsv),
|
||||
*dataout;
|
||||
int i, j, k,
|
||||
int i,
|
||||
j,
|
||||
k,
|
||||
curoff;
|
||||
|
||||
/*
|
||||
* Here we overestimates tsout size, since we don't know exact size
|
||||
* occupied by positions and weights. We will set exact size later
|
||||
* after a pass through TSVector.
|
||||
* occupied by positions and weights. We will set exact size later after a
|
||||
* pass through TSVector.
|
||||
*/
|
||||
tsout = (TSVector) palloc0(VARSIZE(tsv));
|
||||
arrout = ARRPTR(tsout);
|
||||
@ -465,10 +472,11 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
|
||||
{
|
||||
/*
|
||||
* Here we should check whether current i is present in
|
||||
* indices_to_delete or not. Since indices_to_delete is already
|
||||
* sorted we can advance it index only when we have match.
|
||||
* indices_to_delete or not. Since indices_to_delete is already sorted
|
||||
* we can advance it index only when we have match.
|
||||
*/
|
||||
if (k < indices_count && i == indices_to_delete[k]){
|
||||
if (k < indices_count && i == indices_to_delete[k])
|
||||
{
|
||||
k++;
|
||||
continue;
|
||||
}
|
||||
@ -481,8 +489,9 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
|
||||
curoff += arrin[i].len;
|
||||
if (arrin[i].haspos)
|
||||
{
|
||||
int len = POSDATALEN(tsv, arrin+i) * sizeof(WordEntryPos) +
|
||||
int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos) +
|
||||
sizeof(uint16);
|
||||
|
||||
curoff = SHORTALIGN(curoff);
|
||||
memcpy(dataout + curoff,
|
||||
STRPTR(tsv) + SHORTALIGN(arrin[i].pos + arrin[i].len),
|
||||
@ -494,9 +503,10 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
|
||||
}
|
||||
|
||||
/*
|
||||
* After the pass through TSVector k should equals exactly to indices_count.
|
||||
* If it isn't then the caller provided us with indices outside of
|
||||
* [0, tsv->size) range and estimation of tsout's size is wrong.
|
||||
* After the pass through TSVector k should equals exactly to
|
||||
* indices_count. If it isn't then the caller provided us with indices
|
||||
* outside of [0, tsv->size) range and estimation of tsout's size is
|
||||
* wrong.
|
||||
*/
|
||||
Assert(k == indices_count);
|
||||
|
||||
@ -538,7 +548,8 @@ tsvector_delete_arr(PG_FUNCTION_ARGS)
|
||||
TSVector tsin = PG_GETARG_TSVECTOR(0),
|
||||
tsout;
|
||||
ArrayType *lexemes = PG_GETARG_ARRAYTYPE_P(1);
|
||||
int i, nlex,
|
||||
int i,
|
||||
nlex,
|
||||
skip_count,
|
||||
*skip_indices;
|
||||
Datum *dlexemes;
|
||||
@ -548,8 +559,8 @@ tsvector_delete_arr(PG_FUNCTION_ARGS)
|
||||
&dlexemes, &nulls, &nlex);
|
||||
|
||||
/*
|
||||
* In typical use case array of lexemes to delete is relatively small.
|
||||
* So here we optimizing things for that scenario: iterate through lexarr
|
||||
* In typical use case array of lexemes to delete is relatively small. So
|
||||
* here we optimizing things for that scenario: iterate through lexarr
|
||||
* performing binary search of each lexeme from lexarr in tsvector.
|
||||
*/
|
||||
skip_indices = palloc0(nlex * sizeof(int));
|
||||
@ -641,8 +652,8 @@ tsvector_unnest(PG_FUNCTION_ARGS)
|
||||
|
||||
/*
|
||||
* Internally tsvector stores position and weight in the same
|
||||
* uint16 (2 bits for weight, 14 for position). Here we extract that
|
||||
* in two separate arrays.
|
||||
* uint16 (2 bits for weight, 14 for position). Here we extract
|
||||
* that in two separate arrays.
|
||||
*/
|
||||
posv = _POSVECPTR(tsin, arrin + i);
|
||||
positions = palloc(posv->npos * sizeof(Datum));
|
||||
@ -772,7 +783,8 @@ tsvector_filter(PG_FUNCTION_ARGS)
|
||||
Datum *dweights;
|
||||
bool *nulls;
|
||||
int nweights;
|
||||
int i, j;
|
||||
int i,
|
||||
j;
|
||||
int cur_pos = 0;
|
||||
char mask = 0;
|
||||
|
||||
@ -791,16 +803,20 @@ tsvector_filter(PG_FUNCTION_ARGS)
|
||||
char_weight = DatumGetChar(dweights[i]);
|
||||
switch (char_weight)
|
||||
{
|
||||
case 'A': case 'a':
|
||||
case 'A':
|
||||
case 'a':
|
||||
mask = mask | 8;
|
||||
break;
|
||||
case 'B': case 'b':
|
||||
case 'B':
|
||||
case 'b':
|
||||
mask = mask | 4;
|
||||
break;
|
||||
case 'C': case 'c':
|
||||
case 'C':
|
||||
case 'c':
|
||||
mask = mask | 2;
|
||||
break;
|
||||
case 'D': case 'd':
|
||||
case 'D':
|
||||
case 'd':
|
||||
mask = mask | 1;
|
||||
break;
|
||||
default:
|
||||
@ -846,7 +862,7 @@ tsvector_filter(PG_FUNCTION_ARGS)
|
||||
memcpy(dataout + cur_pos, datain + arrin[i].pos, arrin[i].len);
|
||||
posvout->npos = npos;
|
||||
cur_pos += SHORTALIGN(arrin[i].len);
|
||||
cur_pos += POSDATALEN(tsout, arrout+j) * sizeof(WordEntryPos) +
|
||||
cur_pos += POSDATALEN(tsout, arrout + j) * sizeof(WordEntryPos) +
|
||||
sizeof(uint16);
|
||||
j++;
|
||||
}
|
||||
@ -1276,6 +1292,7 @@ checkcondition_str(void *checkval, QueryOperand *val, ExecPhraseData *data)
|
||||
WordEntryPos *allpos = NULL;
|
||||
int npos = 0,
|
||||
totalpos = 0;
|
||||
|
||||
/*
|
||||
* there was a failed exact search, so we should scan further to find
|
||||
* a prefix match. We also need to do so if caller needs position info
|
||||
@ -1371,22 +1388,24 @@ TS_phrase_execute(QueryItem *curitem,
|
||||
return false;
|
||||
|
||||
/*
|
||||
* if at least one of the operands has no position
|
||||
* information, fallback to AND operation.
|
||||
* if at least one of the operands has no position information,
|
||||
* fallback to AND operation.
|
||||
*/
|
||||
if (Ldata.npos == 0 || Rdata.npos == 0)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Result of the operation is a list of the
|
||||
* corresponding positions of RIGHT operand.
|
||||
* Result of the operation is a list of the corresponding positions of
|
||||
* RIGHT operand.
|
||||
*/
|
||||
if (data)
|
||||
{
|
||||
if (!Rdata.allocated)
|
||||
|
||||
/*
|
||||
* OP_PHRASE is based on the OP_AND, so the number of resulting
|
||||
* positions could not be greater than the total amount of operands.
|
||||
* OP_PHRASE is based on the OP_AND, so the number of
|
||||
* resulting positions could not be greater than the total
|
||||
* amount of operands.
|
||||
*/
|
||||
data->pos = palloc(sizeof(WordEntryPos) * Min(Ldata.npos, Rdata.npos));
|
||||
else
|
||||
@ -1439,8 +1458,8 @@ TS_phrase_execute(QueryItem *curitem,
|
||||
else
|
||||
{
|
||||
/*
|
||||
* Go to the next Rpos, because Lpos
|
||||
* is ahead of the current Rpos
|
||||
* Go to the next Rpos, because Lpos is ahead of the
|
||||
* current Rpos
|
||||
*/
|
||||
break;
|
||||
}
|
||||
@ -1484,7 +1503,7 @@ TS_execute(QueryItem *curitem, void *checkval, bool calcnot,
|
||||
|
||||
if (curitem->type == QI_VAL)
|
||||
return chkcond(checkval, (QueryOperand *) curitem,
|
||||
NULL /* we don't need position info */);
|
||||
NULL /* we don't need position info */ );
|
||||
|
||||
switch (curitem->qoperator.oper)
|
||||
{
|
||||
@ -1546,6 +1565,7 @@ tsquery_requires_match(QueryItem *curitem)
|
||||
return false;
|
||||
|
||||
case OP_PHRASE:
|
||||
|
||||
/*
|
||||
* Treat OP_PHRASE as OP_AND here
|
||||
*/
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user