diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c index ce5515dbcb5..81a206eeb72 100644 --- a/src/backend/access/hash/hashsearch.c +++ b/src/backend/access/hash/hashsearch.c @@ -463,12 +463,6 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) opaque = (HashPageOpaque) PageGetSpecialPointer(page); so->currPos.buf = buf; - - /* - * We save the LSN of the page as we read it, so that we know whether it - * is safe to apply LP_DEAD hints to the page later. - */ - so->currPos.lsn = PageGetLSN(page); so->currPos.currPage = BufferGetBlockNumber(buf); if (ScanDirectionIsForward(dir)) @@ -508,7 +502,6 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) { so->currPos.buf = buf; so->currPos.currPage = BufferGetBlockNumber(buf); - so->currPos.lsn = PageGetLSN(page); } else { @@ -562,7 +555,6 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) { so->currPos.buf = buf; so->currPos.currPage = BufferGetBlockNumber(buf); - so->currPos.lsn = PageGetLSN(page); } else { diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index a825b82706a..f2a1c5b6abc 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -532,12 +532,13 @@ _hash_get_newbucket_from_oldbucket(Relation rel, Bucket old_bucket, * We match items by heap TID before assuming they are the right ones to * delete. * - * Note that we keep the pin on the bucket page throughout the scan. Hence, - * there is no chance of VACUUM deleting any items from that page. However, - * having pin on the overflow page doesn't guarantee that vacuum won't delete - * any items. - * - * See _bt_killitems() for more details. + * There are never any scans active in a bucket at the time VACUUM begins, + * because VACUUM takes a cleanup lock on the primary bucket page and scans + * hold a pin. A scan can begin after VACUUM leaves the primary bucket page + * but before it finishes the entire bucket, but it can never pass VACUUM, + * because VACUUM always locks the next page before releasing the lock on + * the previous one. Therefore, we don't have to worry about accidentally + * killing a TID that has been reused for an unrelated tuple. */ void _hash_kill_items(IndexScanDesc scan) @@ -579,21 +580,7 @@ _hash_kill_items(IndexScanDesc scan) else buf = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE); - /* - * If page LSN differs it means that the page was modified since the last - * read. killedItems could be not valid so applying LP_DEAD hints is not - * safe. - */ page = BufferGetPage(buf); - if (PageGetLSN(page) != so->currPos.lsn) - { - if (havePin) - LockBuffer(buf, BUFFER_LOCK_UNLOCK); - else - _hash_relbuf(rel, buf); - return; - } - opaque = (HashPageOpaque) PageGetSpecialPointer(page); maxoff = PageGetMaxOffsetNumber(page); diff --git a/src/include/access/hash.h b/src/include/access/hash.h index 0e0f3e17a7c..e3135c1738e 100644 --- a/src/include/access/hash.h +++ b/src/include/access/hash.h @@ -117,7 +117,6 @@ typedef struct HashScanPosItem /* what we remember about each match */ typedef struct HashScanPosData { Buffer buf; /* if valid, the buffer is pinned */ - XLogRecPtr lsn; /* pos in the WAL stream when page was read */ BlockNumber currPage; /* current hash index page */ BlockNumber nextPage; /* next overflow page */ BlockNumber prevPage; /* prev overflow or bucket page */ @@ -153,7 +152,6 @@ typedef struct HashScanPosData #define HashScanPosInvalidate(scanpos) \ do { \ (scanpos).buf = InvalidBuffer; \ - (scanpos).lsn = InvalidXLogRecPtr; \ (scanpos).currPage = InvalidBlockNumber; \ (scanpos).nextPage = InvalidBlockNumber; \ (scanpos).prevPage = InvalidBlockNumber; \