From 8a8a9b678ad011bd55005bb10c28bd5c67d80455 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Sun, 17 Nov 2024 12:05:42 +0900 Subject: [PATCH v2] Optimize iteration over PGPROC for fast-path lock searches. This commit improves efficiency in FastPathTransferRelationLocks() and GetLockConflicts(), which iterate over PGPROCs to search for fast-path locks. Previously, these functions recalculated the fast-path group during every loop iteration, even though it remained constant. This update optimizes the process by calculating the group once and reusing it throughout the loop. The functions also now skip empty fast-path groups, avoiding unnecessary scans of their slots. Additionally, groups belonging to inactive backends (with pid=0) are always empty, so checking the group is sufficient to bypass these backends, further enhancing performance. Author: Fujii Masao Reviewed-by: Heikki Linnakangas Discussion: https://postgr.es/m/07d5fd6a-71f1-4ce8-8602-4cc6883f4bd1@oss.nttdata.com --- src/backend/storage/lmgr/lock.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index edc5020c6a..a1272e376f 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -2773,6 +2773,10 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag LWLock *partitionLock = LockHashPartitionLock(hashcode); Oid relid = locktag->locktag_field2; uint32 i; + uint32 group; + + /* fast-path group the lock belongs to */ + group = FAST_PATH_REL_GROUP(relid); /* * Every PGPROC that can potentially hold a fast-path lock is present in @@ -2783,8 +2787,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag for (i = 0; i < ProcGlobal->allProcCount; i++) { PGPROC *proc = &ProcGlobal->allProcs[i]; - uint32 j, - group; + uint32 j; LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE); @@ -2802,16 +2805,16 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag * less clear that our backend is certain to have performed a memory * fencing operation since the other backend set proc->databaseId. So * for now, we test it after acquiring the LWLock just to be safe. + * + * Also skip groups without any registered fast-path locks. */ - if (proc->databaseId != locktag->locktag_field1) + if (proc->databaseId != locktag->locktag_field1 || + proc->fpLockBits[group] == 0) { LWLockRelease(&proc->fpInfoLock); continue; } - /* fast-path group the lock belongs to */ - group = FAST_PATH_REL_GROUP(relid); - for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++) { uint32 lockmode; @@ -3025,8 +3028,12 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp) { int i; Oid relid = locktag->locktag_field2; + uint32 group; VirtualTransactionId vxid; + /* fast-path group the lock belongs to */ + group = FAST_PATH_REL_GROUP(relid); + /* * Iterate over relevant PGPROCs. Anything held by a prepared * transaction will have been transferred to the primary lock table, @@ -3040,8 +3047,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp) for (i = 0; i < ProcGlobal->allProcCount; i++) { PGPROC *proc = &ProcGlobal->allProcs[i]; - uint32 j, - group; + uint32 j; /* A backend never blocks itself */ if (proc == MyProc) @@ -3056,16 +3062,16 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp) * * See FastPathTransferRelationLocks() for discussion of why we do * this test after acquiring the lock. + * + * Also skip groups without any registered fast-path locks. */ - if (proc->databaseId != locktag->locktag_field1) + if (proc->databaseId != locktag->locktag_field1 || + proc->fpLockBits[group] == 0) { LWLockRelease(&proc->fpInfoLock); continue; } - /* fast-path group the lock belongs to */ - group = FAST_PATH_REL_GROUP(relid); - for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++) { uint32 lockmask; -- 2.47.0