Author: Noah Misch Commit: Noah Misch Fix inplace update buffer self-deadlock. A CacheInvalidateHeapTuple* callee might call CatalogCacheInitializeCache(), which needs a relcache entry. Acquiring a valid relcache entry might scan pg_class. Hence, to prevent undetected LWLock self-deadlock, CacheInvalidateHeapTuple* callers must not hold BUFFER_LOCK_EXCLUSIVE on buffers of pg_class. Move the CacheInvalidateHeapTupleInplace() before the BUFFER_LOCK_EXCLUSIVE. Commit 243e9b40f1b2dd09d6e5bf91ebf6e822a2cd3704 introduced this regression. No back-patch, since I've reverted the culprit from non-master branches. Reported by Alexander Lakhin. Reviewed by FIXME. Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 1748eaf..9a31cdc 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6214,6 +6214,17 @@ heap_inplace_lock(Relation relation, Assert(BufferIsValid(buffer)); + /* + * Construct shared cache inval if necessary. Because we pass a tuple + * version without our own inplace changes or inplace changes other + * sessions complete while we wait for locks, inplace update mustn't + * change catcache lookup keys. But we aren't bothering with index + * updates either, so that's true a fortiori. After LockBuffer(), it + * would be too late, because this might reach a + * CatalogCacheInitializeCache() that locks "buffer". + */ + CacheInvalidateHeapTupleInplace(relation, oldtup_ptr, NULL); + LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); @@ -6309,6 +6320,7 @@ heap_inplace_lock(Relation relation, if (!ret) { UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock); + ForgetInplace_Inval(); InvalidateCatalogSnapshot(); } return ret; @@ -6345,14 +6357,6 @@ heap_inplace_update_and_unlock(Relation relation, dst = (char *) htup + htup->t_hoff; src = (char *) tuple->t_data + tuple->t_data->t_hoff; - /* - * Construct shared cache inval if necessary. Note that because we only - * pass the new version of the tuple, this mustn't be used for any - * operations that could change catcache lookup keys. But we aren't - * bothering with index updates either, so that's true a fortiori. - */ - CacheInvalidateHeapTupleInplace(relation, tuple, NULL); - /* Like RecordTransactionCommit(), log only if needed */ if (XLogStandbyInfoActive()) nmsgs = inplaceGetInvalidationMessages(&invalMessages, @@ -6481,6 +6485,7 @@ heap_inplace_unlock(Relation relation, { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock); + ForgetInplace_Inval(); } #define FRM_NOOP 0x0001 diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index 986850c..fc972ed 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -1202,6 +1202,18 @@ AtInplace_Inval(void) } /* + * ForgetInplace_Inval + * Alternative to PreInplace_Inval()+AtInplace_Inval(): discard queued-up + * invalidations. This lets inplace update enumerate invalidations + * optimistically, before locking the buffer. + */ +void +ForgetInplace_Inval(void) +{ + inplaceInvalInfo = NULL; +} + +/* * AtEOSubXact_Inval * Process queued-up invalidation messages at end of subtransaction. * diff --git a/src/include/utils/inval.h b/src/include/utils/inval.h index 3390e7a..299cd75 100644 --- a/src/include/utils/inval.h +++ b/src/include/utils/inval.h @@ -30,6 +30,7 @@ extern void AtEOXact_Inval(bool isCommit); extern void PreInplace_Inval(void); extern void AtInplace_Inval(void); +extern void ForgetInplace_Inval(void); extern void AtEOSubXact_Inval(bool isCommit);