From fd3e3cf500de7f8f625744fbd8a413b27f500abe Mon Sep 17 00:00:00 2001 From: Tom Lane <tgl@sss.pgh.pa.us> Date: Sun, 4 Oct 2015 14:16:59 -0400 Subject: [PATCH] Fix possible "invalid memory alloc request size" failure in nodeHash.c. Limit the size of the hashtable pointer array to not more than MaxAllocSize. We've seen reports of failures due to this in HEAD/9.5, and it seems possible in older branches as well. The change in NTUP_PER_BUCKET in 9.5 may have made the problem more likely, but surely it didn't introduce it. Tomas Vondra, slightly modified by me --- src/backend/executor/nodeHash.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 26eff5eb8a5..62724325c4b 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -459,10 +459,12 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when * memory is filled. Set nbatch to the smallest power of 2 that appears * sufficient. The Min() steps limit the results so that the pointer - * arrays we'll try to allocate do not exceed work_mem. + * arrays we'll try to allocate do not exceed work_mem nor MaxAllocSize. */ - max_pointers = (work_mem * 1024L) / sizeof(void *); + max_pointers = (work_mem * 1024L) / sizeof(HashJoinTuple); + max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple)); /* also ensure we avoid integer overflow in nbatch and nbuckets */ + /* (this step is redundant given the current value of MaxAllocSize) */ max_pointers = Min(max_pointers, INT_MAX / 2); if (inner_rel_bytes > hash_table_bytes) -- GitLab