diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 64eec91..4e99093 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -1036,8 +1036,11 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
 	 * Increasing nbatch will not fix it since there's no way to subdivide the
 	 * group any more finely. We have to just gut it out and hope the server
 	 * has enough RAM.
+	 * Also avoid increasing nbatch if an additional nbatch would cause
+	 * overhead of batchFiles alone to exceed work_mem.
 	 */
 	if (nfreed == 0 || nfreed == ninmemory)
+		// && (1<<hashtable->nbatch > hashtable->spaceAllowed/8192))
 	{
 		hashtable->growEnabled = false;
 #ifdef HJDEBUG
@@ -1655,8 +1658,18 @@ ExecHashTableInsert(HashJoinTable hashtable,
 			hashtable->spacePeak = hashtable->spaceUsed;
 		if (hashtable->spaceUsed +
 			hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
-			> hashtable->spaceAllowed)
-			ExecHashIncreaseNumBatches(hashtable);
+			> hashtable->spaceAllowed && hashtable->growEnabled) {
+			/*
+			 * spaceUsed doesn't include the overhead of
+			 * BatchFile structure.  If the overhead of an
+			 * additional 2x batch files would use more than the
+			 * space itself, do not grow...
+			 */
+			if (1<<hashtable->nbatch < hashtable->spaceUsed/8192)
+				ExecHashIncreaseNumBatches(hashtable);
+			else
+				hashtable->growEnabled = false;
+		}
 	}
 	else
 	{
