From e1bf6527f60a53f1d4abfb9fa6ad71e8694218b3 Mon Sep 17 00:00:00 2001
From: Neil Conway <neilc@samurai.com>
Date: Tue, 16 Nov 2004 23:34:26 +0000
Subject: [PATCH] Prevent a backend crash when processing CREATE TABLE commands
 with more than 65K columns, or when the created table has more than 65K
 columns due to adding inherited columns from parent relations. Fix a similar
 crash when processing SELECT queries with more than 65K target list entries.
 In all three cases we would eventually detect the error and elog, but the
 check was being made too late.

---
 src/backend/commands/tablecmds.c | 29 ++++++++++++++++++++++++++++-
 src/backend/parser/analyze.c     | 14 +++++++++++++-
 2 files changed, 41 insertions(+), 2 deletions(-)

diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index e4001f0102a..2cf721645b0 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.139 2004/11/05 19:15:57 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.140 2004/11/16 23:34:22 neilc Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -680,6 +680,23 @@ MergeAttributes(List *schema, List *supers, bool istemp,
 												 * defaults */
 	int			child_attno;
 
+	/*
+	 * Check for and reject tables with too many columns. We perform
+	 * this check relatively early for two reasons: (a) we don't run
+	 * the risk of overflowing an AttrNumber in subsequent code (b) an
+	 * O(n^2) algorithm is okay if we're processing <= 1600 columns,
+	 * but could take minutes to execute if the user attempts to
+	 * create a table with hundreds of thousands of columns.
+	 *
+	 * Note that we also need to check that any we do not exceed this
+	 * figure after including columns from inherited relations.
+	 */
+	if (list_length(schema) > MaxHeapAttributeNumber)
+		ereport(ERROR,
+				(errcode(ERRCODE_TOO_MANY_COLUMNS),
+				 errmsg("tables can have at most %d columns",
+						MaxHeapAttributeNumber)));
+
 	/*
 	 * Check for duplicate names in the explicit list of attributes.
 	 *
@@ -979,6 +996,16 @@ MergeAttributes(List *schema, List *supers, bool istemp,
 		}
 
 		schema = inhSchema;
+
+		/*
+		 * Check that we haven't exceeded the legal # of columns after
+		 * merging in inherited columns.
+		 */
+		if (list_length(schema) > MaxHeapAttributeNumber)
+			ereport(ERROR,
+					(errcode(ERRCODE_TOO_MANY_COLUMNS),
+					 errmsg("tables can have at most %d columns",
+							MaxHeapAttributeNumber)));
 	}
 
 	/*
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index c3b547dd5b7..b68f28f8556 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- *	$PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.312 2004/09/27 04:12:02 neilc Exp $
+ *	$PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.313 2004/11/16 23:34:26 neilc Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -396,6 +396,18 @@ transformStmt(ParseState *pstate, Node *parseTree,
 	result->querySource = QSRC_ORIGINAL;
 	result->canSetTag = true;
 
+	/*
+	 * Check that we did not produce too many resnos; at the very
+	 * least we cannot allow more than 2^16, since that would exceed
+	 * the range of a AttrNumber. It seems safest to use
+	 * MaxTupleAttributeNumber.
+	 */
+	if (pstate->p_next_resno - 1 > MaxTupleAttributeNumber)
+		ereport(ERROR,
+				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+				 errmsg("target lists can have at most %d entries",
+						MaxTupleAttributeNumber)));
+
 	return result;
 }
 
-- 
GitLab