From ebc7d928a761a6e1d56fc23caf1079722821dc0e Mon Sep 17 00:00:00 2001
From: Tom Lane <tgl@sss.pgh.pa.us>
Date: Sun, 4 Oct 2015 15:55:07 -0400
Subject: [PATCH] Further twiddling of nodeHash.c hashtable sizing calculation.

On reflection, the submitted patch didn't really work to prevent the
request size from exceeding MaxAllocSize, because of the fact that we'd
happily round nbuckets up to the next power of 2 after we'd limited it to
max_pointers.  The simplest way to enforce the limit correctly is to
round max_pointers down to a power of 2 when it isn't one already.

(Note that the constraint to INT_MAX / 2, if it were doing anything useful
at all, is properly applied after that.)
---
 src/backend/executor/nodeHash.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 62724325c4b..b10a58b4248 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -396,6 +396,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 	long		hash_table_bytes;
 	long		skew_table_bytes;
 	long		max_pointers;
+	long		mppow2;
 	int			nbatch;
 	int			nbuckets;
 	int			i;
@@ -463,7 +464,12 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 	 */
 	max_pointers = (work_mem * 1024L) / sizeof(HashJoinTuple);
 	max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
-	/* also ensure we avoid integer overflow in nbatch and nbuckets */
+	/* If max_pointers isn't a power of 2, must round it down to one */
+	mppow2 = 1L << my_log2(max_pointers);
+	if (max_pointers != mppow2)
+		max_pointers = mppow2 / 2;
+
+	/* Also ensure we avoid integer overflow in nbatch and nbuckets */
 	/* (this step is redundant given the current value of MaxAllocSize) */
 	max_pointers = Min(max_pointers, INT_MAX / 2);
 
-- 
GitLab