Optimize iteration over PGPROC for fast-path lock searches.

This commit improves efficiency in FastPathTransferRelationLocks()
and GetLockConflicts(), which iterate over PGPROCs to search for
fast-path locks.

Previously, these functions recalculated the fast-path group during
every loop iteration, even though it remained constant. This update
optimizes the process by calculating the group once and reusing it
throughout the loop.

The functions also now skip empty fast-path groups, avoiding
unnecessary scans of their slots. Additionally, groups belonging to
inactive backends (with pid=0) are always empty, so checking
the group is sufficient to bypass these backends, further enhancing
performance.

Author: Fujii Masao <masao.fujii@gmail.com>
Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Reviewed-by: Ashutosh Bapat <ashutosh.bapat.oss@gmail.com>
Discussion: https://postgr.es/m/07d5fd6a-71f1-4ce8-8602-4cc6883f4bd1@oss.nttdata.com
This commit is contained in:
Fujii Masao 2025-03-14 22:49:29 +09:00
parent a359d37019
commit e80171d57c

View File

@ -2774,6 +2774,9 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
Oid relid = locktag->locktag_field2;
uint32 i;
/* fast-path group the lock belongs to */
uint32 group = FAST_PATH_REL_GROUP(relid);
/*
* Every PGPROC that can potentially hold a fast-path lock is present in
* ProcGlobal->allProcs. Prepared transactions are not, but any
@ -2783,8 +2786,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
for (i = 0; i < ProcGlobal->allProcCount; i++)
{
PGPROC *proc = &ProcGlobal->allProcs[i];
uint32 j,
group;
uint32 j;
LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
@ -2802,16 +2804,16 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
* less clear that our backend is certain to have performed a memory
* fencing operation since the other backend set proc->databaseId. So
* for now, we test it after acquiring the LWLock just to be safe.
*
* Also skip groups without any registered fast-path locks.
*/
if (proc->databaseId != locktag->locktag_field1)
if (proc->databaseId != locktag->locktag_field1 ||
proc->fpLockBits[group] == 0)
{
LWLockRelease(&proc->fpInfoLock);
continue;
}
/* fast-path group the lock belongs to */
group = FAST_PATH_REL_GROUP(relid);
for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
{
uint32 lockmode;
@ -3027,6 +3029,9 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Oid relid = locktag->locktag_field2;
VirtualTransactionId vxid;
/* fast-path group the lock belongs to */
uint32 group = FAST_PATH_REL_GROUP(relid);
/*
* Iterate over relevant PGPROCs. Anything held by a prepared
* transaction will have been transferred to the primary lock table,
@ -3040,8 +3045,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
for (i = 0; i < ProcGlobal->allProcCount; i++)
{
PGPROC *proc = &ProcGlobal->allProcs[i];
uint32 j,
group;
uint32 j;
/* A backend never blocks itself */
if (proc == MyProc)
@ -3056,16 +3060,16 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
*
* See FastPathTransferRelationLocks() for discussion of why we do
* this test after acquiring the lock.
*
* Also skip groups without any registered fast-path locks.
*/
if (proc->databaseId != locktag->locktag_field1)
if (proc->databaseId != locktag->locktag_field1 ||
proc->fpLockBits[group] == 0)
{
LWLockRelease(&proc->fpInfoLock);
continue;
}
/* fast-path group the lock belongs to */
group = FAST_PATH_REL_GROUP(relid);
for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
{
uint32 lockmask;