From 449df66162775adf27f5297f381f0ae199938064 Mon Sep 17 00:00:00 2001
From: Alvaro Herrera <alvherre@alvh.no-ip.org>
Date: Fri, 10 Apr 2015 12:46:56 -0300
Subject: [PATCH] Optimize locking a tuple already locked by another subxact

This is equivalent to the fix in 27846f02c176eebe7e08ce51e, except less
invasive: we don't change the API of HeapTupleSatisfiesUpdate here, but
instead cope with the possibility by checking for multixacts separately.
This patch, for 9.3 and 9.4, is much simpler and restores performance
almost to what it was in 9.2 for the test case supplied by Oskari
Saarenmaa in bug #8470.
---
 src/backend/access/heap/heapam.c | 70 ++++++++++++++++++++++++++++++++++++++--
 1 file changed, 67 insertions(+), 3 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 2209db1..70a084c 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -4134,6 +4134,7 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
 				new_infomask,
 				new_infomask2;
 	bool		have_tuple_lock = false;
+	bool		checked_our_multi = false;
 
 	*buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
 	LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
@@ -4535,9 +4536,8 @@ failed:
 	/*
 	 * We might already hold the desired lock (or stronger), possibly under a
 	 * different subtransaction of the current top transaction.  If so, there
-	 * is no need to change state or issue a WAL record.  We already handled
-	 * the case where this is true for xmax being a MultiXactId, so now check
-	 * for cases where it is a plain TransactionId.
+	 * is no need to change state or issue a WAL record.  First check
+	 * for cases where xmax is a plain TransactionId.
 	 *
 	 * Note in particular that this covers the case where we already hold
 	 * exclusive lock on the tuple and the caller only wants key share or
@@ -4564,6 +4564,70 @@ failed:
 	}
 
 	/*
+	 * Now check the multixact case.  It was already checked above in the
+	 * HeapTupleBeingUpdated case, but it's possible to get
+	 * HeapTupleMayBeUpdated from HTSU in some cases.
+	 */
+	if (!checked_our_multi &&
+		(old_infomask & HEAP_XMAX_IS_MULTI))
+	{
+		int			nmembers;
+		MultiXactMember *members;
+		uint16		infomask;
+		TransactionId xwait;
+
+		/* must copy state data before unlocking buffer */
+		infomask = tuple->t_data->t_infomask;
+		xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
+
+		LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
+
+		nmembers = GetMultiXactIdMembers(xmax, &members,
+										 !(infomask & HEAP_LOCK_MASK) &&
+										 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
+		if (nmembers > 0)
+		{
+			int			i;
+
+			for (i = 0; i < nmembers; i++)
+			{
+				if (TransactionIdIsCurrentTransactionId(members[i].xid))
+				{
+					LockTupleMode membermode;
+
+					membermode = TUPLOCK_from_mxstatus(members[i].status);
+
+					if (membermode >= mode)
+					{
+						if (have_tuple_lock)
+							UnlockTupleTuplock(relation, tid, mode);
+
+						pfree(members);
+						return HeapTupleMayBeUpdated;
+					}
+				}
+			}
+
+			pfree(members);
+		}
+
+		/* make sure to only do this once */
+		checked_our_multi = true;
+
+		/*
+		 * If we reach this point, then we know the optimization doesn't apply
+		 * and we need to fall through to lock the tuple.  However, the xmax
+		 * might have changed in the meantime, so recheck and start again if so.
+		 */
+		LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
+		if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
+			!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+								  xwait))
+			goto l3;
+
+	}
+
+	/*
 	 * If this is the first possibly-multixact-able operation in the current
 	 * transaction, set my per-backend OldestMemberMXactId setting. We can be
 	 * certain that the transaction will never become a member of any older
-- 
2.1.4

