mirror of
https://github.com/postgres/postgres.git
synced 2025-07-31 00:01:41 -04:00
Compare commits
11 Commits
2cca95e175
...
a3a836fb5e
Author | SHA1 | Date | |
---|---|---|---|
|
a3a836fb5e | ||
|
5e444a2526 | ||
|
25cd2d6402 | ||
|
5ddf997347 | ||
|
5eafacd279 | ||
|
f7cf9494ba | ||
|
8c9da1441d | ||
|
64444ce071 | ||
|
f2bf8fb048 | ||
|
bc397e5cdb | ||
|
bd5760df38 |
@ -50,7 +50,7 @@
|
||||
writing anything to the database when running on a standby.
|
||||
Also, it's recommended to avoid long-running queries in
|
||||
<literal>login</literal> event triggers. Notes that, for instance,
|
||||
cancelling connection in <application>psql</application> wouldn't cancel
|
||||
canceling connection in <application>psql</application> wouldn't cancel
|
||||
the in-progress <literal>login</literal> trigger.
|
||||
</para>
|
||||
|
||||
|
@ -7625,7 +7625,7 @@ defaultNoticeProcessor(void *arg, const char *message)
|
||||
is called. It is the ideal time to initialize any
|
||||
<literal>instanceData</literal> an event procedure may need. Only one
|
||||
register event will be fired per event handler per connection. If the
|
||||
event procedure fails (returns zero), the registration is cancelled.
|
||||
event procedure fails (returns zero), the registration is canceled.
|
||||
|
||||
<synopsis>
|
||||
typedef struct
|
||||
|
@ -838,6 +838,7 @@ lazy_scan_heap(LVRelState *vacrel)
|
||||
Page page;
|
||||
bool all_visible_according_to_vm;
|
||||
bool has_lpdead_items;
|
||||
bool got_cleanup_lock = false;
|
||||
|
||||
if (blkno == next_unskippable_block)
|
||||
{
|
||||
@ -931,63 +932,40 @@ lazy_scan_heap(LVRelState *vacrel)
|
||||
*/
|
||||
visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
|
||||
|
||||
buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
|
||||
vacrel->bstrategy);
|
||||
page = BufferGetPage(buf);
|
||||
|
||||
/*
|
||||
* We need a buffer cleanup lock to prune HOT chains and defragment
|
||||
* the page in lazy_scan_prune. But when it's not possible to acquire
|
||||
* a cleanup lock right away, we may be able to settle for reduced
|
||||
* processing using lazy_scan_noprune.
|
||||
*/
|
||||
buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
|
||||
vacrel->bstrategy);
|
||||
page = BufferGetPage(buf);
|
||||
if (!ConditionalLockBufferForCleanup(buf))
|
||||
{
|
||||
got_cleanup_lock = ConditionalLockBufferForCleanup(buf);
|
||||
|
||||
if (!got_cleanup_lock)
|
||||
LockBuffer(buf, BUFFER_LOCK_SHARE);
|
||||
|
||||
/* Check for new or empty pages before lazy_scan_noprune call */
|
||||
if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, true,
|
||||
vmbuffer))
|
||||
{
|
||||
/* Processed as new/empty page (lock and pin released) */
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Collect LP_DEAD items in dead_items array, count tuples,
|
||||
* determine if rel truncation is safe
|
||||
*/
|
||||
if (lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
|
||||
{
|
||||
Size freespace = 0;
|
||||
bool recordfreespace;
|
||||
|
||||
/*
|
||||
* We processed the page successfully (without a cleanup
|
||||
* lock).
|
||||
*
|
||||
* Update the FSM, just as we would in the case where
|
||||
* lazy_scan_prune() is called. Our goal is to update the
|
||||
* freespace map the last time we touch the page. If the
|
||||
* relation has no indexes, or if index vacuuming is disabled,
|
||||
* there will be no second heap pass; if this particular page
|
||||
* has no dead items, the second heap pass will not touch this
|
||||
* page. So, in those cases, update the FSM now.
|
||||
*
|
||||
* After a call to lazy_scan_prune(), we would also try to
|
||||
* adjust the page-level all-visible bit and the visibility
|
||||
* map, but we skip that step in this path.
|
||||
*/
|
||||
recordfreespace = vacrel->nindexes == 0
|
||||
|| !vacrel->do_index_vacuuming
|
||||
|| !has_lpdead_items;
|
||||
if (recordfreespace)
|
||||
freespace = PageGetHeapFreeSpace(page);
|
||||
UnlockReleaseBuffer(buf);
|
||||
if (recordfreespace)
|
||||
RecordPageWithFreeSpace(vacrel->rel, blkno, freespace);
|
||||
continue;
|
||||
}
|
||||
/* Check for new or empty pages before lazy_scan_[no]prune call */
|
||||
if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
|
||||
vmbuffer))
|
||||
{
|
||||
/* Processed as new/empty page (lock and pin released) */
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we didn't get the cleanup lock, we can still collect LP_DEAD
|
||||
* items in the dead_items array for later vacuuming, count live and
|
||||
* recently dead tuples for vacuum logging, and determine if this
|
||||
* block could later be truncated. If we encounter any xid/mxids that
|
||||
* require advancing the relfrozenxid/relminxid, we'll have to wait
|
||||
* for a cleanup lock and call lazy_scan_prune().
|
||||
*/
|
||||
if (!got_cleanup_lock &&
|
||||
!lazy_scan_noprune(vacrel, buf, blkno, page, &has_lpdead_items))
|
||||
{
|
||||
/*
|
||||
* lazy_scan_noprune could not do all required processing. Wait
|
||||
* for a cleanup lock, and call lazy_scan_prune in the usual way.
|
||||
@ -995,45 +973,45 @@ lazy_scan_heap(LVRelState *vacrel)
|
||||
Assert(vacrel->aggressive);
|
||||
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
|
||||
LockBufferForCleanup(buf);
|
||||
}
|
||||
|
||||
/* Check for new or empty pages before lazy_scan_prune call */
|
||||
if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, false, vmbuffer))
|
||||
{
|
||||
/* Processed as new/empty page (lock and pin released) */
|
||||
continue;
|
||||
got_cleanup_lock = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prune, freeze, and count tuples.
|
||||
* If we have a cleanup lock, we must now prune, freeze, and count
|
||||
* tuples. We may have acquired the cleanup lock originally, or we may
|
||||
* have gone back and acquired it after lazy_scan_noprune() returned
|
||||
* false. Either way, the page hasn't been processed yet.
|
||||
*
|
||||
* Accumulates details of remaining LP_DEAD line pointers on page in
|
||||
* dead_items array. This includes LP_DEAD line pointers that we
|
||||
* pruned ourselves, as well as existing LP_DEAD line pointers that
|
||||
* were pruned some time earlier. Also considers freezing XIDs in the
|
||||
* tuple headers of remaining items with storage. It also determines
|
||||
* if truncating this block is safe.
|
||||
* Like lazy_scan_noprune(), lazy_scan_prune() will count
|
||||
* recently_dead_tuples and live tuples for vacuum logging, determine
|
||||
* if the block can later be truncated, and accumulate the details of
|
||||
* remaining LP_DEAD line pointers on the page in the dead_items
|
||||
* array. These dead items include those pruned by lazy_scan_prune()
|
||||
* as well we line pointers previously marked LP_DEAD.
|
||||
*/
|
||||
lazy_scan_prune(vacrel, buf, blkno, page,
|
||||
vmbuffer, all_visible_according_to_vm,
|
||||
&has_lpdead_items);
|
||||
if (got_cleanup_lock)
|
||||
lazy_scan_prune(vacrel, buf, blkno, page,
|
||||
vmbuffer, all_visible_according_to_vm,
|
||||
&has_lpdead_items);
|
||||
|
||||
/*
|
||||
* Final steps for block: drop cleanup lock, record free space in the
|
||||
* FSM.
|
||||
* Now drop the buffer lock and, potentially, update the FSM.
|
||||
*
|
||||
* If we will likely do index vacuuming, wait until
|
||||
* lazy_vacuum_heap_rel() to save free space. This doesn't just save
|
||||
* us some cycles; it also allows us to record any additional free
|
||||
* space that lazy_vacuum_heap_page() will make available in cases
|
||||
* where it's possible to truncate the page's line pointer array.
|
||||
* Our goal is to update the freespace map the last time we touch the
|
||||
* page. If we'll process a block in the second pass, we may free up
|
||||
* additional space on the page, so it is better to update the FSM
|
||||
* after the second pass. If the relation has no indexes, or if index
|
||||
* vacuuming is disabled, there will be no second heap pass; if this
|
||||
* particular page has no dead items, the second heap pass will not
|
||||
* touch this page. So, in those cases, update the FSM now.
|
||||
*
|
||||
* Note: It's not in fact 100% certain that we really will call
|
||||
* lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip index
|
||||
* vacuuming (and so must skip heap vacuuming). This is deemed okay
|
||||
* because it only happens in emergencies, or when there is very
|
||||
* little free space anyway. (Besides, we start recording free space
|
||||
* in the FSM once index vacuuming has been abandoned.)
|
||||
* Note: In corner cases, it's possible to miss updating the FSM
|
||||
* entirely. If index vacuuming is currently enabled, we'll skip the
|
||||
* FSM update now. But if failsafe mode is later activated, or there
|
||||
* are so few dead tuples that index vacuuming is bypassed, there will
|
||||
* also be no opportunity to update the FSM later, because we'll never
|
||||
* revisit this page. Since updating the FSM is desirable but not
|
||||
* absolutely required, that's OK.
|
||||
*/
|
||||
if (vacrel->nindexes == 0
|
||||
|| !vacrel->do_index_vacuuming
|
||||
@ -1047,9 +1025,10 @@ lazy_scan_heap(LVRelState *vacrel)
|
||||
/*
|
||||
* Periodically perform FSM vacuuming to make newly-freed space
|
||||
* visible on upper FSM pages. This is done after vacuuming if the
|
||||
* table has indexes.
|
||||
* table has indexes. There will only be newly-freed space if we
|
||||
* held the cleanup lock and lazy_scan_prune() was called.
|
||||
*/
|
||||
if (vacrel->nindexes == 0 && has_lpdead_items &&
|
||||
if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
|
||||
blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
|
||||
{
|
||||
FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
|
||||
|
@ -251,8 +251,15 @@ RemoveWalSummaryIfOlderThan(WalSummaryFile *ws, time_t cutoff_time)
|
||||
ereport(ERROR,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not stat file \"%s\": %m", path)));
|
||||
/* XXX temporarily changed to debug buildfarm failures */
|
||||
#if 0
|
||||
ereport(DEBUG2,
|
||||
(errmsg_internal("removing file \"%s\"", path)));
|
||||
#else
|
||||
ereport(LOG,
|
||||
(errmsg_internal("removing file \"%s\" cutoff_time=%llu", path,
|
||||
(unsigned long long) cutoff_time)));
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3917,7 +3917,7 @@ reindex_relation(const ReindexStmt *stmt, Oid relid, int flags,
|
||||
Oid toast_relid;
|
||||
List *indexIds;
|
||||
char persistence;
|
||||
bool result;
|
||||
bool result = false;
|
||||
ListCell *indexId;
|
||||
int i;
|
||||
|
||||
@ -3947,9 +3947,8 @@ reindex_relation(const ReindexStmt *stmt, Oid relid, int flags,
|
||||
toast_relid = rel->rd_rel->reltoastrelid;
|
||||
|
||||
/*
|
||||
* Get the list of index OIDs for this relation. (We trust to the
|
||||
* relcache to get this with a sequential scan if ignoring system
|
||||
* indexes.)
|
||||
* Get the list of index OIDs for this relation. (We trust the relcache
|
||||
* to get this with a sequential scan if ignoring system indexes.)
|
||||
*/
|
||||
indexIds = RelationGetIndexList(rel);
|
||||
|
||||
@ -3965,6 +3964,35 @@ reindex_relation(const ReindexStmt *stmt, Oid relid, int flags,
|
||||
CommandCounterIncrement();
|
||||
}
|
||||
|
||||
/*
|
||||
* Reindex the toast table, if any, before the main table.
|
||||
*
|
||||
* This helps in cases where a corruption in the toast table's index would
|
||||
* otherwise error and stop REINDEX TABLE command when it tries to fetch a
|
||||
* toasted datum. This way. the toast table's index is rebuilt and fixed
|
||||
* before it is used for reindexing the main table.
|
||||
*
|
||||
* It is critical to call reindex_relation() *after* the call to
|
||||
* RelationGetIndexList() returning the list of indexes on the relation,
|
||||
* because reindex_relation() will call CommandCounterIncrement() after
|
||||
* every reindex_index(). See REINDEX_REL_SUPPRESS_INDEX_USE for more
|
||||
* details.
|
||||
*/
|
||||
if ((flags & REINDEX_REL_PROCESS_TOAST) && OidIsValid(toast_relid))
|
||||
{
|
||||
/*
|
||||
* Note that this should fail if the toast relation is missing, so
|
||||
* reset REINDEXOPT_MISSING_OK. Even if a new tablespace is set for
|
||||
* the parent relation, the indexes on its toast table are not moved.
|
||||
* This rule is enforced by setting tablespaceOid to InvalidOid.
|
||||
*/
|
||||
ReindexParams newparams = *params;
|
||||
|
||||
newparams.options &= ~(REINDEXOPT_MISSING_OK);
|
||||
newparams.tablespaceOid = InvalidOid;
|
||||
result |= reindex_relation(stmt, toast_relid, flags, &newparams);
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute persistence of indexes: same as that of owning rel, unless
|
||||
* caller specified otherwise.
|
||||
@ -4018,26 +4046,7 @@ reindex_relation(const ReindexStmt *stmt, Oid relid, int flags,
|
||||
*/
|
||||
table_close(rel, NoLock);
|
||||
|
||||
result = (indexIds != NIL);
|
||||
|
||||
/*
|
||||
* If the relation has a secondary toast rel, reindex that too while we
|
||||
* still hold the lock on the main table.
|
||||
*/
|
||||
if ((flags & REINDEX_REL_PROCESS_TOAST) && OidIsValid(toast_relid))
|
||||
{
|
||||
/*
|
||||
* Note that this should fail if the toast relation is missing, so
|
||||
* reset REINDEXOPT_MISSING_OK. Even if a new tablespace is set for
|
||||
* the parent relation, the indexes on its toast table are not moved.
|
||||
* This rule is enforced by setting tablespaceOid to InvalidOid.
|
||||
*/
|
||||
ReindexParams newparams = *params;
|
||||
|
||||
newparams.options &= ~(REINDEXOPT_MISSING_OK);
|
||||
newparams.tablespaceOid = InvalidOid;
|
||||
result |= reindex_relation(stmt, toast_relid, flags, &newparams);
|
||||
}
|
||||
result |= (indexIds != NIL);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -359,6 +359,8 @@ static List *MergeAttributes(List *columns, const List *supers, char relpersiste
|
||||
bool is_partition, List **supconstr,
|
||||
List **supnotnulls);
|
||||
static List *MergeCheckConstraint(List *constraints, const char *name, Node *expr);
|
||||
static void MergeChildAttribute(List *inh_columns, int exist_attno, int newcol_attno, const ColumnDef *newdef);
|
||||
static ColumnDef *MergeInheritedAttribute(List *inh_columns, int exist_attno, const ColumnDef *newdef);
|
||||
static void MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel, bool ispartition);
|
||||
static void MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel);
|
||||
static void StoreCatalogInheritance(Oid relationId, List *supers,
|
||||
@ -2705,7 +2707,7 @@ MergeAttributes(List *columns, const List *supers, char relpersistence,
|
||||
char *attributeName = NameStr(attribute->attname);
|
||||
int exist_attno;
|
||||
ColumnDef *newdef;
|
||||
ColumnDef *savedef;
|
||||
ColumnDef *mergeddef;
|
||||
|
||||
/*
|
||||
* Ignore dropped columns in the parent.
|
||||
@ -2739,136 +2741,18 @@ MergeAttributes(List *columns, const List *supers, char relpersistence,
|
||||
exist_attno = findAttrByName(attributeName, inh_columns);
|
||||
if (exist_attno > 0)
|
||||
{
|
||||
ColumnDef *prevdef;
|
||||
Oid prevtypeid,
|
||||
newtypeid;
|
||||
int32 prevtypmod,
|
||||
newtypmod;
|
||||
Oid prevcollid,
|
||||
newcollid;
|
||||
|
||||
/*
|
||||
* Partitions have only one parent and have no column
|
||||
* definitions of their own, so conflict should never occur.
|
||||
*/
|
||||
Assert(!is_partition);
|
||||
|
||||
/*
|
||||
* Yes, try to merge the two column definitions.
|
||||
*/
|
||||
ereport(NOTICE,
|
||||
(errmsg("merging multiple inherited definitions of column \"%s\"",
|
||||
attributeName)));
|
||||
prevdef = list_nth_node(ColumnDef, inh_columns, exist_attno - 1);
|
||||
|
||||
/*
|
||||
* Must have the same type and typmod
|
||||
*/
|
||||
typenameTypeIdAndMod(NULL, prevdef->typeName, &prevtypeid, &prevtypmod);
|
||||
typenameTypeIdAndMod(NULL, newdef->typeName, &newtypeid, &newtypmod);
|
||||
if (prevtypeid != newtypeid || prevtypmod != newtypmod)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("inherited column \"%s\" has a type conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s",
|
||||
format_type_with_typemod(prevtypeid, prevtypmod),
|
||||
format_type_with_typemod(newtypeid, newtypmod))));
|
||||
|
||||
/*
|
||||
* Must have the same collation
|
||||
*/
|
||||
prevcollid = GetColumnDefCollation(NULL, prevdef, prevtypeid);
|
||||
newcollid = GetColumnDefCollation(NULL, newdef, newtypeid);
|
||||
if (prevcollid != newcollid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_COLLATION_MISMATCH),
|
||||
errmsg("inherited column \"%s\" has a collation conflict",
|
||||
attributeName),
|
||||
errdetail("\"%s\" versus \"%s\"",
|
||||
get_collation_name(prevcollid),
|
||||
get_collation_name(newcollid))));
|
||||
|
||||
/*
|
||||
* Copy/check storage parameter
|
||||
*/
|
||||
if (prevdef->storage == 0)
|
||||
prevdef->storage = newdef->storage;
|
||||
else if (prevdef->storage != newdef->storage)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("inherited column \"%s\" has a storage parameter conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s",
|
||||
storage_name(prevdef->storage),
|
||||
storage_name(newdef->storage))));
|
||||
|
||||
/*
|
||||
* Copy/check compression parameter
|
||||
*/
|
||||
if (prevdef->compression == NULL)
|
||||
prevdef->compression = newdef->compression;
|
||||
else if (strcmp(prevdef->compression, newdef->compression) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("column \"%s\" has a compression method conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s", prevdef->compression, newdef->compression)));
|
||||
|
||||
/*
|
||||
* In regular inheritance, columns in the parent's primary key
|
||||
* get an extra not-null constraint.
|
||||
*/
|
||||
if (bms_is_member(parent_attno - FirstLowInvalidHeapAttributeNumber,
|
||||
pkattrs))
|
||||
{
|
||||
CookedConstraint *nn;
|
||||
|
||||
nn = palloc(sizeof(CookedConstraint));
|
||||
nn->contype = CONSTR_NOTNULL;
|
||||
nn->conoid = InvalidOid;
|
||||
nn->name = NULL;
|
||||
nn->attnum = exist_attno;
|
||||
nn->expr = NULL;
|
||||
nn->skip_validation = false;
|
||||
nn->is_local = false;
|
||||
nn->inhcount = 1;
|
||||
nn->is_no_inherit = false;
|
||||
|
||||
nnconstraints = lappend(nnconstraints, nn);
|
||||
}
|
||||
|
||||
/*
|
||||
* mark attnotnull if parent has it and it's not NO INHERIT
|
||||
*/
|
||||
if (bms_is_member(parent_attno, nncols) ||
|
||||
bms_is_member(parent_attno - FirstLowInvalidHeapAttributeNumber,
|
||||
pkattrs))
|
||||
prevdef->is_not_null = true;
|
||||
|
||||
/*
|
||||
* Check for GENERATED conflicts
|
||||
*/
|
||||
if (prevdef->generated != newdef->generated)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("inherited column \"%s\" has a generation conflict",
|
||||
attributeName)));
|
||||
|
||||
/*
|
||||
* Default and other constraints are handled below
|
||||
*/
|
||||
|
||||
prevdef->inhcount++;
|
||||
if (prevdef->inhcount < 0)
|
||||
ereport(ERROR,
|
||||
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("too many inheritance parents"));
|
||||
mergeddef = MergeInheritedAttribute(inh_columns, exist_attno, newdef);
|
||||
|
||||
newattmap->attnums[parent_attno - 1] = exist_attno;
|
||||
|
||||
/* remember for default processing below */
|
||||
savedef = prevdef;
|
||||
/*
|
||||
* Partitions have only one parent, so conflict should never
|
||||
* occur.
|
||||
*/
|
||||
Assert(!is_partition);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -2877,43 +2761,45 @@ MergeAttributes(List *columns, const List *supers, char relpersistence,
|
||||
*/
|
||||
newdef->inhcount = 1;
|
||||
newdef->is_local = false;
|
||||
/* mark attnotnull if parent has it and it's not NO INHERIT */
|
||||
if (bms_is_member(parent_attno, nncols) ||
|
||||
bms_is_member(parent_attno - FirstLowInvalidHeapAttributeNumber,
|
||||
pkattrs))
|
||||
newdef->is_not_null = true;
|
||||
inh_columns = lappend(inh_columns, newdef);
|
||||
|
||||
newattmap->attnums[parent_attno - 1] = ++child_attno;
|
||||
|
||||
/*
|
||||
* In regular inheritance, columns in the parent's primary key
|
||||
* get an extra not-null constraint. Partitioning doesn't
|
||||
* need this, because the PK itself is going to be cloned to
|
||||
* the partition.
|
||||
*/
|
||||
if (!is_partition &&
|
||||
bms_is_member(parent_attno -
|
||||
FirstLowInvalidHeapAttributeNumber,
|
||||
pkattrs))
|
||||
{
|
||||
CookedConstraint *nn;
|
||||
mergeddef = newdef;
|
||||
}
|
||||
|
||||
nn = palloc(sizeof(CookedConstraint));
|
||||
nn->contype = CONSTR_NOTNULL;
|
||||
nn->conoid = InvalidOid;
|
||||
nn->name = NULL;
|
||||
nn->attnum = newattmap->attnums[parent_attno - 1];
|
||||
nn->expr = NULL;
|
||||
nn->skip_validation = false;
|
||||
nn->is_local = false;
|
||||
nn->inhcount = 1;
|
||||
nn->is_no_inherit = false;
|
||||
/*
|
||||
* mark attnotnull if parent has it and it's not NO INHERIT
|
||||
*/
|
||||
if (bms_is_member(parent_attno, nncols) ||
|
||||
bms_is_member(parent_attno - FirstLowInvalidHeapAttributeNumber,
|
||||
pkattrs))
|
||||
mergeddef->is_not_null = true;
|
||||
|
||||
nnconstraints = lappend(nnconstraints, nn);
|
||||
}
|
||||
/*
|
||||
* In regular inheritance, columns in the parent's primary key get
|
||||
* an extra not-null constraint. Partitioning doesn't need this,
|
||||
* because the PK itself is going to be cloned to the partition.
|
||||
*/
|
||||
if (!is_partition &&
|
||||
bms_is_member(parent_attno -
|
||||
FirstLowInvalidHeapAttributeNumber,
|
||||
pkattrs))
|
||||
{
|
||||
CookedConstraint *nn;
|
||||
|
||||
/* remember for default processing below */
|
||||
savedef = newdef;
|
||||
nn = palloc(sizeof(CookedConstraint));
|
||||
nn->contype = CONSTR_NOTNULL;
|
||||
nn->conoid = InvalidOid;
|
||||
nn->name = NULL;
|
||||
nn->attnum = newattmap->attnums[parent_attno - 1];
|
||||
nn->expr = NULL;
|
||||
nn->skip_validation = false;
|
||||
nn->is_local = false;
|
||||
nn->inhcount = 1;
|
||||
nn->is_no_inherit = false;
|
||||
|
||||
nnconstraints = lappend(nnconstraints, nn);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2935,7 +2821,7 @@ MergeAttributes(List *columns, const List *supers, char relpersistence,
|
||||
* all the inherited default expressions for the moment.
|
||||
*/
|
||||
inherited_defaults = lappend(inherited_defaults, this_default);
|
||||
cols_with_defaults = lappend(cols_with_defaults, savedef);
|
||||
cols_with_defaults = lappend(cols_with_defaults, mergeddef);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3066,10 +2952,16 @@ MergeAttributes(List *columns, const List *supers, char relpersistence,
|
||||
|
||||
foreach(lc, columns)
|
||||
{
|
||||
ColumnDef *newdef = lfirst(lc);
|
||||
ColumnDef *newdef = lfirst_node(ColumnDef, lc);
|
||||
char *attributeName = newdef->colname;
|
||||
int exist_attno;
|
||||
|
||||
/*
|
||||
* Partitions have only one parent and have no column definitions
|
||||
* of their own, so conflict should never occur.
|
||||
*/
|
||||
Assert(!is_partition);
|
||||
|
||||
newcol_attno++;
|
||||
|
||||
/*
|
||||
@ -3078,155 +2970,15 @@ MergeAttributes(List *columns, const List *supers, char relpersistence,
|
||||
exist_attno = findAttrByName(attributeName, inh_columns);
|
||||
if (exist_attno > 0)
|
||||
{
|
||||
ColumnDef *inhdef;
|
||||
Oid inhtypeid,
|
||||
newtypeid;
|
||||
int32 inhtypmod,
|
||||
newtypmod;
|
||||
Oid inhcollid,
|
||||
newcollid;
|
||||
|
||||
/*
|
||||
* Partitions have only one parent and have no column
|
||||
* definitions of their own, so conflict should never occur.
|
||||
*/
|
||||
Assert(!is_partition);
|
||||
|
||||
/*
|
||||
* Yes, try to merge the two column definitions.
|
||||
*/
|
||||
if (exist_attno == newcol_attno)
|
||||
ereport(NOTICE,
|
||||
(errmsg("merging column \"%s\" with inherited definition",
|
||||
attributeName)));
|
||||
else
|
||||
ereport(NOTICE,
|
||||
(errmsg("moving and merging column \"%s\" with inherited definition", attributeName),
|
||||
errdetail("User-specified column moved to the position of the inherited column.")));
|
||||
inhdef = list_nth_node(ColumnDef, inh_columns, exist_attno - 1);
|
||||
|
||||
/*
|
||||
* Must have the same type and typmod
|
||||
*/
|
||||
typenameTypeIdAndMod(NULL, inhdef->typeName, &inhtypeid, &inhtypmod);
|
||||
typenameTypeIdAndMod(NULL, newdef->typeName, &newtypeid, &newtypmod);
|
||||
if (inhtypeid != newtypeid || inhtypmod != newtypmod)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("column \"%s\" has a type conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s",
|
||||
format_type_with_typemod(inhtypeid, inhtypmod),
|
||||
format_type_with_typemod(newtypeid, newtypmod))));
|
||||
|
||||
/*
|
||||
* Must have the same collation
|
||||
*/
|
||||
inhcollid = GetColumnDefCollation(NULL, inhdef, inhtypeid);
|
||||
newcollid = GetColumnDefCollation(NULL, newdef, newtypeid);
|
||||
if (inhcollid != newcollid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_COLLATION_MISMATCH),
|
||||
errmsg("column \"%s\" has a collation conflict",
|
||||
attributeName),
|
||||
errdetail("\"%s\" versus \"%s\"",
|
||||
get_collation_name(inhcollid),
|
||||
get_collation_name(newcollid))));
|
||||
|
||||
/*
|
||||
* Identity is never inherited. The new column can have an
|
||||
* identity definition, so we always just take that one.
|
||||
*/
|
||||
inhdef->identity = newdef->identity;
|
||||
|
||||
/*
|
||||
* Copy storage parameter
|
||||
*/
|
||||
if (inhdef->storage == 0)
|
||||
inhdef->storage = newdef->storage;
|
||||
else if (newdef->storage != 0 && inhdef->storage != newdef->storage)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("column \"%s\" has a storage parameter conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s",
|
||||
storage_name(inhdef->storage),
|
||||
storage_name(newdef->storage))));
|
||||
|
||||
/*
|
||||
* Copy compression parameter
|
||||
*/
|
||||
if (inhdef->compression == NULL)
|
||||
inhdef->compression = newdef->compression;
|
||||
else if (newdef->compression != NULL)
|
||||
{
|
||||
if (strcmp(inhdef->compression, newdef->compression) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("column \"%s\" has a compression method conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s", inhdef->compression, newdef->compression)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Merge of not-null constraints = OR 'em together
|
||||
*/
|
||||
inhdef->is_not_null |= newdef->is_not_null;
|
||||
|
||||
/*
|
||||
* Check for conflicts related to generated columns.
|
||||
*
|
||||
* If the parent column is generated, the child column will be
|
||||
* made a generated column if it isn't already. If it is a
|
||||
* generated column, we'll take its generation expression in
|
||||
* preference to the parent's. We must check that the child
|
||||
* column doesn't specify a default value or identity, which
|
||||
* matches the rules for a single column in parse_utilcmd.c.
|
||||
*
|
||||
* Conversely, if the parent column is not generated, the
|
||||
* child column can't be either. (We used to allow that, but
|
||||
* it results in being able to override the generation
|
||||
* expression via UPDATEs through the parent.)
|
||||
*/
|
||||
if (inhdef->generated)
|
||||
{
|
||||
if (newdef->raw_default && !newdef->generated)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_DEFINITION),
|
||||
errmsg("column \"%s\" inherits from generated column but specifies default",
|
||||
inhdef->colname)));
|
||||
if (newdef->identity)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_DEFINITION),
|
||||
errmsg("column \"%s\" inherits from generated column but specifies identity",
|
||||
inhdef->colname)));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (newdef->generated)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_DEFINITION),
|
||||
errmsg("child column \"%s\" specifies generation expression",
|
||||
inhdef->colname),
|
||||
errhint("A child table column cannot be generated unless its parent column is.")));
|
||||
}
|
||||
|
||||
/*
|
||||
* If new def has a default, override previous default
|
||||
*/
|
||||
if (newdef->raw_default != NULL)
|
||||
{
|
||||
inhdef->raw_default = newdef->raw_default;
|
||||
inhdef->cooked_default = newdef->cooked_default;
|
||||
}
|
||||
|
||||
/* Mark the column as locally defined */
|
||||
inhdef->is_local = true;
|
||||
MergeChildAttribute(inh_columns, exist_attno, newcol_attno, newdef);
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* No, attach new column to result columns
|
||||
* No, attach new column unchanged to result columns.
|
||||
*/
|
||||
inh_columns = lappend(inh_columns, newdef);
|
||||
}
|
||||
@ -3419,6 +3171,282 @@ MergeCheckConstraint(List *constraints, const char *name, Node *expr)
|
||||
return lappend(constraints, newcon);
|
||||
}
|
||||
|
||||
/*
|
||||
* MergeChildAttribute
|
||||
* Merge given child attribute definition into given inherited attribute.
|
||||
*
|
||||
* Input arguments:
|
||||
* 'inh_columns' is the list of inherited ColumnDefs.
|
||||
* 'exist_attno' is the number of the inherited attribute in inh_columns
|
||||
* 'newcol_attno' is the attribute number in child table's schema definition
|
||||
* 'newdef' is the column/attribute definition from the child table.
|
||||
*
|
||||
* The ColumnDef in 'inh_columns' list is modified. The child attribute's
|
||||
* ColumnDef remains unchanged.
|
||||
*
|
||||
* Notes:
|
||||
* - The attribute is merged according to the rules laid out in the prologue
|
||||
* of MergeAttributes().
|
||||
* - If matching inherited attribute exists but the child attribute can not be
|
||||
* merged into it, the function throws respective errors.
|
||||
* - A partition can not have its own column definitions. Hence this function
|
||||
* is applicable only to a regular inheritance child.
|
||||
*/
|
||||
static void
|
||||
MergeChildAttribute(List *inh_columns, int exist_attno, int newcol_attno, const ColumnDef *newdef)
|
||||
{
|
||||
char *attributeName = newdef->colname;
|
||||
ColumnDef *inhdef;
|
||||
Oid inhtypeid,
|
||||
newtypeid;
|
||||
int32 inhtypmod,
|
||||
newtypmod;
|
||||
Oid inhcollid,
|
||||
newcollid;
|
||||
|
||||
if (exist_attno == newcol_attno)
|
||||
ereport(NOTICE,
|
||||
(errmsg("merging column \"%s\" with inherited definition",
|
||||
attributeName)));
|
||||
else
|
||||
ereport(NOTICE,
|
||||
(errmsg("moving and merging column \"%s\" with inherited definition", attributeName),
|
||||
errdetail("User-specified column moved to the position of the inherited column.")));
|
||||
|
||||
inhdef = list_nth_node(ColumnDef, inh_columns, exist_attno - 1);
|
||||
|
||||
/*
|
||||
* Must have the same type and typmod
|
||||
*/
|
||||
typenameTypeIdAndMod(NULL, inhdef->typeName, &inhtypeid, &inhtypmod);
|
||||
typenameTypeIdAndMod(NULL, newdef->typeName, &newtypeid, &newtypmod);
|
||||
if (inhtypeid != newtypeid || inhtypmod != newtypmod)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("column \"%s\" has a type conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s",
|
||||
format_type_with_typemod(inhtypeid, inhtypmod),
|
||||
format_type_with_typemod(newtypeid, newtypmod))));
|
||||
|
||||
/*
|
||||
* Must have the same collation
|
||||
*/
|
||||
inhcollid = GetColumnDefCollation(NULL, inhdef, inhtypeid);
|
||||
newcollid = GetColumnDefCollation(NULL, newdef, newtypeid);
|
||||
if (inhcollid != newcollid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_COLLATION_MISMATCH),
|
||||
errmsg("column \"%s\" has a collation conflict",
|
||||
attributeName),
|
||||
errdetail("\"%s\" versus \"%s\"",
|
||||
get_collation_name(inhcollid),
|
||||
get_collation_name(newcollid))));
|
||||
|
||||
/*
|
||||
* Identity is never inherited by a regular inheritance child. Pick
|
||||
* child's identity definition if there's one.
|
||||
*/
|
||||
inhdef->identity = newdef->identity;
|
||||
|
||||
/*
|
||||
* Copy storage parameter
|
||||
*/
|
||||
if (inhdef->storage == 0)
|
||||
inhdef->storage = newdef->storage;
|
||||
else if (newdef->storage != 0 && inhdef->storage != newdef->storage)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("column \"%s\" has a storage parameter conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s",
|
||||
storage_name(inhdef->storage),
|
||||
storage_name(newdef->storage))));
|
||||
|
||||
/*
|
||||
* Copy compression parameter
|
||||
*/
|
||||
if (inhdef->compression == NULL)
|
||||
inhdef->compression = newdef->compression;
|
||||
else if (newdef->compression != NULL)
|
||||
{
|
||||
if (strcmp(inhdef->compression, newdef->compression) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("column \"%s\" has a compression method conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s", inhdef->compression, newdef->compression)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Merge of not-null constraints = OR 'em together
|
||||
*/
|
||||
inhdef->is_not_null |= newdef->is_not_null;
|
||||
|
||||
/*
|
||||
* Check for conflicts related to generated columns.
|
||||
*
|
||||
* If the parent column is generated, the child column will be made a
|
||||
* generated column if it isn't already. If it is a generated column,
|
||||
* we'll take its generation expression in preference to the parent's. We
|
||||
* must check that the child column doesn't specify a default value or
|
||||
* identity, which matches the rules for a single column in
|
||||
* parse_utilcmd.c.
|
||||
*
|
||||
* Conversely, if the parent column is not generated, the child column
|
||||
* can't be either. (We used to allow that, but it results in being able
|
||||
* to override the generation expression via UPDATEs through the parent.)
|
||||
*/
|
||||
if (inhdef->generated)
|
||||
{
|
||||
if (newdef->raw_default && !newdef->generated)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_DEFINITION),
|
||||
errmsg("column \"%s\" inherits from generated column but specifies default",
|
||||
inhdef->colname)));
|
||||
if (newdef->identity)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_DEFINITION),
|
||||
errmsg("column \"%s\" inherits from generated column but specifies identity",
|
||||
inhdef->colname)));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (newdef->generated)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_COLUMN_DEFINITION),
|
||||
errmsg("child column \"%s\" specifies generation expression",
|
||||
inhdef->colname),
|
||||
errhint("A child table column cannot be generated unless its parent column is.")));
|
||||
}
|
||||
|
||||
/*
|
||||
* If new def has a default, override previous default
|
||||
*/
|
||||
if (newdef->raw_default != NULL)
|
||||
{
|
||||
inhdef->raw_default = newdef->raw_default;
|
||||
inhdef->cooked_default = newdef->cooked_default;
|
||||
}
|
||||
|
||||
/* Mark the column as locally defined */
|
||||
inhdef->is_local = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* MergeInheritedAttribute
|
||||
* Merge given parent attribute definition into specified attribute
|
||||
* inherited from the previous parents.
|
||||
*
|
||||
* Input arguments:
|
||||
* 'inh_columns' is the list of previously inherited ColumnDefs.
|
||||
* 'exist_attno' is the number the existing matching attribute in inh_columns.
|
||||
* 'newdef' is the new parent column/attribute definition to be merged.
|
||||
*
|
||||
* The matching ColumnDef in 'inh_columns' list is modified and returned.
|
||||
*
|
||||
* Notes:
|
||||
* - The attribute is merged according to the rules laid out in the prologue
|
||||
* of MergeAttributes().
|
||||
* - If matching inherited attribute exists but the new attribute can not be
|
||||
* merged into it, the function throws respective errors.
|
||||
* - A partition inherits from only a single parent. Hence this function is
|
||||
* applicable only to a regular inheritance.
|
||||
*/
|
||||
static ColumnDef *
|
||||
MergeInheritedAttribute(List *inh_columns,
|
||||
int exist_attno,
|
||||
const ColumnDef *newdef)
|
||||
{
|
||||
char *attributeName = newdef->colname;
|
||||
ColumnDef *prevdef;
|
||||
Oid prevtypeid,
|
||||
newtypeid;
|
||||
int32 prevtypmod,
|
||||
newtypmod;
|
||||
Oid prevcollid,
|
||||
newcollid;
|
||||
|
||||
ereport(NOTICE,
|
||||
(errmsg("merging multiple inherited definitions of column \"%s\"",
|
||||
attributeName)));
|
||||
prevdef = list_nth_node(ColumnDef, inh_columns, exist_attno - 1);
|
||||
|
||||
/*
|
||||
* Must have the same type and typmod
|
||||
*/
|
||||
typenameTypeIdAndMod(NULL, prevdef->typeName, &prevtypeid, &prevtypmod);
|
||||
typenameTypeIdAndMod(NULL, newdef->typeName, &newtypeid, &newtypmod);
|
||||
if (prevtypeid != newtypeid || prevtypmod != newtypmod)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("inherited column \"%s\" has a type conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s",
|
||||
format_type_with_typemod(prevtypeid, prevtypmod),
|
||||
format_type_with_typemod(newtypeid, newtypmod))));
|
||||
|
||||
/*
|
||||
* Must have the same collation
|
||||
*/
|
||||
prevcollid = GetColumnDefCollation(NULL, prevdef, prevtypeid);
|
||||
newcollid = GetColumnDefCollation(NULL, newdef, newtypeid);
|
||||
if (prevcollid != newcollid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_COLLATION_MISMATCH),
|
||||
errmsg("inherited column \"%s\" has a collation conflict",
|
||||
attributeName),
|
||||
errdetail("\"%s\" versus \"%s\"",
|
||||
get_collation_name(prevcollid),
|
||||
get_collation_name(newcollid))));
|
||||
|
||||
/*
|
||||
* Copy/check storage parameter
|
||||
*/
|
||||
if (prevdef->storage == 0)
|
||||
prevdef->storage = newdef->storage;
|
||||
else if (prevdef->storage != newdef->storage)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("inherited column \"%s\" has a storage parameter conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s",
|
||||
storage_name(prevdef->storage),
|
||||
storage_name(newdef->storage))));
|
||||
|
||||
/*
|
||||
* Copy/check compression parameter
|
||||
*/
|
||||
if (prevdef->compression == NULL)
|
||||
prevdef->compression = newdef->compression;
|
||||
else if (strcmp(prevdef->compression, newdef->compression) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("column \"%s\" has a compression method conflict",
|
||||
attributeName),
|
||||
errdetail("%s versus %s", prevdef->compression, newdef->compression)));
|
||||
|
||||
/*
|
||||
* Check for GENERATED conflicts
|
||||
*/
|
||||
if (prevdef->generated != newdef->generated)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
||||
errmsg("inherited column \"%s\" has a generation conflict",
|
||||
attributeName)));
|
||||
|
||||
/*
|
||||
* Default and other constraints are handled by the caller.
|
||||
*/
|
||||
|
||||
prevdef->inhcount++;
|
||||
if (prevdef->inhcount < 0)
|
||||
ereport(ERROR,
|
||||
errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
||||
errmsg("too many inheritance parents"));
|
||||
|
||||
return prevdef;
|
||||
}
|
||||
|
||||
/*
|
||||
* StoreCatalogInheritance
|
||||
|
@ -492,8 +492,16 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
|
||||
return false;
|
||||
}
|
||||
|
||||
*operators = lappend_oid(*operators, hasheqoperator);
|
||||
*param_exprs = lappend(*param_exprs, expr);
|
||||
/*
|
||||
* 'expr' may already exist as a parameter from a previous item in
|
||||
* ppi_clauses. No need to include it again, however we'd better
|
||||
* ensure we do switch into binary mode if required. See below.
|
||||
*/
|
||||
if (!list_member(*param_exprs, expr))
|
||||
{
|
||||
*operators = lappend_oid(*operators, hasheqoperator);
|
||||
*param_exprs = lappend(*param_exprs, expr);
|
||||
}
|
||||
|
||||
/*
|
||||
* When the join operator is not hashable then it's possible that
|
||||
@ -536,8 +544,16 @@ paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
|
||||
return false;
|
||||
}
|
||||
|
||||
*operators = lappend_oid(*operators, typentry->eq_opr);
|
||||
*param_exprs = lappend(*param_exprs, expr);
|
||||
/*
|
||||
* 'expr' may already exist as a parameter from the ppi_clauses. No
|
||||
* need to include it again, however we'd better ensure we do switch
|
||||
* into binary mode.
|
||||
*/
|
||||
if (!list_member(*param_exprs, expr))
|
||||
{
|
||||
*operators = lappend_oid(*operators, typentry->eq_opr);
|
||||
*param_exprs = lappend(*param_exprs, expr);
|
||||
}
|
||||
|
||||
/*
|
||||
* We must go into binary mode as we don't have too much of an idea of
|
||||
|
@ -90,7 +90,8 @@ assign_param_for_var(PlannerInfo *root, Var *var)
|
||||
pvar->varattno == var->varattno &&
|
||||
pvar->vartype == var->vartype &&
|
||||
pvar->vartypmod == var->vartypmod &&
|
||||
pvar->varcollid == var->varcollid)
|
||||
pvar->varcollid == var->varcollid &&
|
||||
bms_equal(pvar->varnullingrels, var->varnullingrels))
|
||||
return pitem->paramId;
|
||||
}
|
||||
}
|
||||
|
@ -1353,7 +1353,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
|
||||
* coding means that there is a tiny chance that the process
|
||||
* terminates its current transaction and starts a different one
|
||||
* before we have a change to send the signal; the worst possible
|
||||
* consequence is that a for-wraparound vacuum is cancelled. But
|
||||
* consequence is that a for-wraparound vacuum is canceled. But
|
||||
* that could happen in any case unless we were to do kill() with
|
||||
* the lock held, which is much more undesirable.
|
||||
*/
|
||||
|
@ -3120,8 +3120,16 @@ timestamp_pl_interval(PG_FUNCTION_ARGS)
|
||||
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
|
||||
errmsg("timestamp out of range")));
|
||||
|
||||
/* Add days by converting to and from Julian */
|
||||
julian = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + span->day;
|
||||
/*
|
||||
* Add days by converting to and from Julian. We need an overflow
|
||||
* check here since j2date expects a non-negative integer input.
|
||||
*/
|
||||
julian = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday);
|
||||
if (pg_add_s32_overflow(julian, span->day, &julian) ||
|
||||
julian < 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
|
||||
errmsg("timestamp out of range")));
|
||||
j2date(julian, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
|
||||
|
||||
if (tm2timestamp(tm, fsec, NULL, ×tamp) != 0)
|
||||
@ -3256,8 +3264,19 @@ timestamptz_pl_interval_internal(TimestampTz timestamp,
|
||||
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
|
||||
errmsg("timestamp out of range")));
|
||||
|
||||
/* Add days by converting to and from Julian */
|
||||
julian = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + span->day;
|
||||
/*
|
||||
* Add days by converting to and from Julian. We need an overflow
|
||||
* check here since j2date expects a non-negative integer input.
|
||||
* In practice though, it will give correct answers for small
|
||||
* negative Julian dates; we should allow -1 to avoid
|
||||
* timezone-dependent failures, as discussed in timestamp.h.
|
||||
*/
|
||||
julian = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday);
|
||||
if (pg_add_s32_overflow(julian, span->day, &julian) ||
|
||||
julian < -1)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
|
||||
errmsg("timestamp out of range")));
|
||||
j2date(julian, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
|
||||
|
||||
tz = DetermineTimeZoneOffset(tm, attimezone);
|
||||
|
@ -48,6 +48,7 @@ SELECT summarized_tli, summarized_lsn FROM pg_get_wal_summarizer_state()
|
||||
EOM
|
||||
($summarized_tli, $summarized_lsn) = split(/\|/, $progress);
|
||||
note("after insert, summarized TLI $summarized_tli through $summarized_lsn");
|
||||
note_wal_summary_dir("after insert", $node1);
|
||||
|
||||
# Update a row in the first block of the table and trigger a checkpoint.
|
||||
$node1->safe_psql('postgres', <<EOM);
|
||||
@ -70,6 +71,7 @@ SELECT tli, start_lsn, end_lsn from pg_available_wal_summaries()
|
||||
EOM
|
||||
my ($tli, $start_lsn, $end_lsn) = split(/\|/, $details);
|
||||
note("examining summary for TLI $tli from $start_lsn to $end_lsn");
|
||||
note_wal_summary_dir("after new summary", $node1);
|
||||
|
||||
# Reconstruct the full pathname for the WAL summary file.
|
||||
my $filename = sprintf "%s/pg_wal/summaries/%08s%08s%08s%08s%08s.summary",
|
||||
@ -77,6 +79,7 @@ my $filename = sprintf "%s/pg_wal/summaries/%08s%08s%08s%08s%08s.summary",
|
||||
split(m@/@, $start_lsn),
|
||||
split(m@/@, $end_lsn);
|
||||
ok(-f $filename, "WAL summary file exists");
|
||||
note_wal_summary_dir("after existence check", $node1);
|
||||
|
||||
# Run pg_walsummary on it. We expect block 0 to be modified, but depending
|
||||
# on where the new tuple ends up, block 1 might also be modified, so we
|
||||
@ -84,5 +87,16 @@ ok(-f $filename, "WAL summary file exists");
|
||||
my ($stdout, $stderr) = run_command([ 'pg_walsummary', '-i', $filename ]);
|
||||
like($stdout, qr/FORK main: block 0$/m, "stdout shows block 0 modified");
|
||||
is($stderr, '', 'stderr is empty');
|
||||
note_wal_summary_dir("after pg_walsummary run", $node1);
|
||||
|
||||
done_testing();
|
||||
|
||||
# XXX. Temporary debugging code.
|
||||
sub note_wal_summary_dir
|
||||
{
|
||||
my ($flair, $node) = @_;
|
||||
|
||||
my $wsdir = sprintf "%s/pg_wal/summaries", $node->data_dir;
|
||||
my @wsfiles = grep { $_ ne '.' && $_ ne '..' } slurp_dir($wsdir);
|
||||
note("$flair pg_wal/summaries has: @wsfiles");
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
# This test exercises behavior of foreign keys in the face of concurrent
|
||||
# detach of partitions in the referenced table.
|
||||
# (The cases where the detaching transaction is cancelled is interesting
|
||||
# (The cases where the detaching transaction is canceled is interesting
|
||||
# because the locking situation is completely different. I didn't verify
|
||||
# that keeping both variants adds any extra coverage.)
|
||||
#
|
||||
|
@ -601,7 +601,7 @@ is( $node_primary->poll_query_until(
|
||||
ok( pump_until(
|
||||
$sigchld_bb, $sigchld_bb_timeout,
|
||||
\$sigchld_bb_stderr, qr/backup is not in progress/),
|
||||
'base backup cleanly cancelled');
|
||||
'base backup cleanly canceled');
|
||||
$sigchld_bb->finish();
|
||||
|
||||
done_testing();
|
||||
|
@ -482,6 +482,8 @@ SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '109203489 days'
|
||||
Sun Dec 31 00:00:00 294276
|
||||
(1 row)
|
||||
|
||||
SELECT timestamp without time zone '2000-01-01' - interval '2483590 days' AS "out of range";
|
||||
ERROR: timestamp out of range
|
||||
SELECT timestamp without time zone '12/31/294276' - timestamp without time zone '12/23/1999' AS "106751991 Days";
|
||||
106751991 Days
|
||||
------------------
|
||||
@ -742,6 +744,8 @@ SELECT timestamp with time zone '1999-12-01' + interval '1 month - 1 second' AS
|
||||
Fri Dec 31 23:59:59 1999 PST
|
||||
(1 row)
|
||||
|
||||
SELECT timestamp with time zone '2000-01-01' - interval '2483590 days' AS "out of range";
|
||||
ERROR: timestamp out of range
|
||||
SELECT (timestamp with time zone 'today' = (timestamp with time zone 'yesterday' + interval '1 day')) as "True";
|
||||
True
|
||||
------
|
||||
|
@ -96,37 +96,37 @@ WHERE t1.unique1 < 1000;
|
||||
SELECT explain_memoize('
|
||||
SELECT COUNT(*),AVG(t2.t1two) FROM tenk1 t1 LEFT JOIN
|
||||
LATERAL (
|
||||
SELECT t1.two as t1two, * FROM tenk1 t2 WHERE t2.unique1 < 5 OFFSET 0
|
||||
SELECT t1.two as t1two, * FROM tenk1 t2 WHERE t2.unique1 < 4 OFFSET 0
|
||||
) t2
|
||||
ON t1.two = t2.two
|
||||
WHERE t1.unique1 < 10;', false);
|
||||
explain_memoize
|
||||
----------------------------------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=N)
|
||||
-> Nested Loop Left Join (actual rows=25 loops=N)
|
||||
-> Nested Loop Left Join (actual rows=20 loops=N)
|
||||
-> Index Scan using tenk1_unique1 on tenk1 t1 (actual rows=10 loops=N)
|
||||
Index Cond: (unique1 < 10)
|
||||
-> Memoize (actual rows=2 loops=N)
|
||||
Cache Key: t1.two, t1.two
|
||||
Cache Key: t1.two
|
||||
Cache Mode: binary
|
||||
Hits: 8 Misses: 2 Evictions: Zero Overflows: 0 Memory Usage: NkB
|
||||
-> Subquery Scan on t2 (actual rows=2 loops=N)
|
||||
Filter: (t1.two = t2.two)
|
||||
Rows Removed by Filter: 2
|
||||
-> Index Scan using tenk1_unique1 on tenk1 t2_1 (actual rows=5 loops=N)
|
||||
Index Cond: (unique1 < 5)
|
||||
-> Index Scan using tenk1_unique1 on tenk1 t2_1 (actual rows=4 loops=N)
|
||||
Index Cond: (unique1 < 4)
|
||||
(13 rows)
|
||||
|
||||
-- And check we get the expected results.
|
||||
SELECT COUNT(*),AVG(t2.t1two) FROM tenk1 t1 LEFT JOIN
|
||||
LATERAL (
|
||||
SELECT t1.two as t1two, * FROM tenk1 t2 WHERE t2.unique1 < 5 OFFSET 0
|
||||
SELECT t1.two as t1two, * FROM tenk1 t2 WHERE t2.unique1 < 4 OFFSET 0
|
||||
) t2
|
||||
ON t1.two = t2.two
|
||||
WHERE t1.unique1 < 10;
|
||||
count | avg
|
||||
-------+------------------------
|
||||
25 | 0.40000000000000000000
|
||||
20 | 0.50000000000000000000
|
||||
(1 row)
|
||||
|
||||
-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions
|
||||
|
@ -120,6 +120,7 @@ SELECT timestamp without time zone '1999-12-01' + interval '1 month - 1 second'
|
||||
SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '106000000 days' AS "Feb 23, 285506";
|
||||
SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '107000000 days' AS "Jan 20, 288244";
|
||||
SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '109203489 days' AS "Dec 31, 294276";
|
||||
SELECT timestamp without time zone '2000-01-01' - interval '2483590 days' AS "out of range";
|
||||
SELECT timestamp without time zone '12/31/294276' - timestamp without time zone '12/23/1999' AS "106751991 Days";
|
||||
|
||||
-- Shorthand values
|
||||
@ -151,6 +152,7 @@ SELECT timestamp with time zone '1996-03-01' - interval '1 second' AS "Feb 29";
|
||||
SELECT timestamp with time zone '1999-03-01' - interval '1 second' AS "Feb 28";
|
||||
SELECT timestamp with time zone '2000-03-01' - interval '1 second' AS "Feb 29";
|
||||
SELECT timestamp with time zone '1999-12-01' + interval '1 month - 1 second' AS "Dec 31";
|
||||
SELECT timestamp with time zone '2000-01-01' - interval '2483590 days' AS "out of range";
|
||||
|
||||
SELECT (timestamp with time zone 'today' = (timestamp with time zone 'yesterday' + interval '1 day')) as "True";
|
||||
SELECT (timestamp with time zone 'today' = (timestamp with time zone 'tomorrow' - interval '1 day')) as "True";
|
||||
|
@ -61,7 +61,7 @@ WHERE t1.unique1 < 1000;
|
||||
SELECT explain_memoize('
|
||||
SELECT COUNT(*),AVG(t2.t1two) FROM tenk1 t1 LEFT JOIN
|
||||
LATERAL (
|
||||
SELECT t1.two as t1two, * FROM tenk1 t2 WHERE t2.unique1 < 5 OFFSET 0
|
||||
SELECT t1.two as t1two, * FROM tenk1 t2 WHERE t2.unique1 < 4 OFFSET 0
|
||||
) t2
|
||||
ON t1.two = t2.two
|
||||
WHERE t1.unique1 < 10;', false);
|
||||
@ -69,7 +69,7 @@ WHERE t1.unique1 < 10;', false);
|
||||
-- And check we get the expected results.
|
||||
SELECT COUNT(*),AVG(t2.t1two) FROM tenk1 t1 LEFT JOIN
|
||||
LATERAL (
|
||||
SELECT t1.two as t1two, * FROM tenk1 t2 WHERE t2.unique1 < 5 OFFSET 0
|
||||
SELECT t1.two as t1two, * FROM tenk1 t2 WHERE t2.unique1 < 4 OFFSET 0
|
||||
) t2
|
||||
ON t1.two = t2.two
|
||||
WHERE t1.unique1 < 10;
|
||||
|
Loading…
x
Reference in New Issue
Block a user