diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 8bd55026d42e76fa34f0fa52609741c85987598a..2394060a67f45663cc286e5f64d05ce4ffa1e505 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.19 2001/10/01 05:36:13 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.20 2001/10/05 17:28:11 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -15,9 +15,9 @@
 #include "access/htup.h"
 #include "access/xlogutils.h"
 #include "catalog/pg_database.h"
-#include "lib/hasht.h"
 #include "storage/bufpage.h"
 #include "storage/smgr.h"
+#include "utils/hsearch.h"
 #include "utils/relcache.h"
 
 
@@ -233,27 +233,22 @@ _xl_init_rel_cache(void)
 	ctl.entrysize = sizeof(XLogRelCacheEntry);
 	ctl.hash = tag_hash;
 
-	_xlrelcache = hash_create(_XLOG_RELCACHESIZE, &ctl,
-							  HASH_ELEM | HASH_FUNCTION);
+	_xlrelcache = hash_create("XLOG relcache", _XLOG_RELCACHESIZE,
+							  &ctl, HASH_ELEM | HASH_FUNCTION);
 }
 
 static void
-_xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
+_xl_remove_hash_entry(XLogRelDesc *rdesc)
 {
-	XLogRelCacheEntry *hentry;
-	bool		found;
-	XLogRelDesc *rdesc = *edata;
 	Form_pg_class tpgc = rdesc->reldata.rd_rel;
+	XLogRelCacheEntry *hentry;
 
 	rdesc->lessRecently->moreRecently = rdesc->moreRecently;
 	rdesc->moreRecently->lessRecently = rdesc->lessRecently;
 
 	hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache,
-				(void *) &(rdesc->reldata.rd_node), HASH_REMOVE, &found);
-
+				(void *) &(rdesc->reldata.rd_node), HASH_REMOVE, NULL);
 	if (hentry == NULL)
-		elog(STOP, "_xl_remove_hash_entry: can't delete from cache");
-	if (!found)
 		elog(STOP, "_xl_remove_hash_entry: file was not found in cache");
 
 	if (rdesc->reldata.rd_fd >= 0)
@@ -281,7 +276,7 @@ _xl_new_reldesc(void)
 	/* reuse */
 	res = _xlrelarr[0].moreRecently;
 
-	_xl_remove_hash_entry(&res, 0);
+	_xl_remove_hash_entry(res);
 
 	_xlast--;
 	return (res);
@@ -298,13 +293,21 @@ XLogInitRelationCache(void)
 void
 XLogCloseRelationCache(void)
 {
+	HASH_SEQ_STATUS status;
+	XLogRelCacheEntry *hentry;
 
 	DestroyDummyCaches();
 
 	if (!_xlrelarr)
 		return;
 
-	HashTableWalk(_xlrelcache, (HashtFunc) _xl_remove_hash_entry, 0);
+	hash_seq_init(&status, _xlrelcache);
+
+	while ((hentry = (XLogRelCacheEntry *) hash_seq_search(&status)) != NULL)
+	{
+		_xl_remove_hash_entry(hentry->rdesc);
+	}
+
 	hash_destroy(_xlrelcache);
 
 	free(_xlrelarr);
@@ -321,12 +324,9 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
 	bool		found;
 
 	hentry = (XLogRelCacheEntry *)
-		hash_search(_xlrelcache, (void *) &rnode, HASH_FIND, &found);
-
-	if (hentry == NULL)
-		elog(STOP, "XLogOpenRelation: error in cache");
+		hash_search(_xlrelcache, (void *) &rnode, HASH_FIND, NULL);
 
-	if (found)
+	if (hentry)
 	{
 		res = hentry->rdesc;
 
@@ -348,7 +348,7 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
 			hash_search(_xlrelcache, (void *) &rnode, HASH_ENTER, &found);
 
 		if (hentry == NULL)
-			elog(STOP, "XLogOpenRelation: can't insert into cache");
+			elog(STOP, "XLogOpenRelation: out of memory for cache");
 
 		if (found)
 			elog(STOP, "XLogOpenRelation: file found on insert into cache");
diff --git a/src/backend/commands/command.c b/src/backend/commands/command.c
index 36cf79971038dc82ec46e52ff9aa2eb9db70ddf6..2b3cc08e2d4abed76663bc6554e287f46445a9d9 100644
--- a/src/backend/commands/command.c
+++ b/src/backend/commands/command.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.142 2001/09/07 21:57:53 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.143 2001/10/05 17:28:11 tgl Exp $
  *
  * NOTES
  *	  The PerformAddAttribute() code, like most of the relation
@@ -259,7 +259,7 @@ PerformPortalClose(char *name, CommandDest dest)
 	/*
 	 * Note: PortalCleanup is called as a side-effect
 	 */
-	PortalDrop(&portal);
+	PortalDrop(portal);
 }
 
 /* ----------------
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 8c6420ae4ccfab712bf16a600a54f6ba7c5f5d53..29e4729351d8657f6bdf525025a291c68450bc23 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/executor/spi.c,v 1.57 2001/08/02 18:08:43 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/executor/spi.c,v 1.58 2001/10/05 17:28:12 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -783,12 +783,10 @@ SPI_cursor_move(Portal portal, bool forward, int count)
 void
 SPI_cursor_close(Portal portal)
 {
-	Portal	my_portal = portal;
-
-	if (!PortalIsValid(my_portal))
+	if (!PortalIsValid(portal))
 		elog(ERROR, "invalid portal in SPI cursor operation");
 
-	PortalDrop(&my_portal);
+	PortalDrop(portal);
 }
 
 /* =================== private functions =================== */
diff --git a/src/backend/lib/Makefile b/src/backend/lib/Makefile
index ed190d6d2b4e8fd8115a9ced125d655c9def241d..58e47f6714456b85abbd691a1322473e4f255ce4 100644
--- a/src/backend/lib/Makefile
+++ b/src/backend/lib/Makefile
@@ -4,7 +4,7 @@
 #    Makefile for lib (miscellaneous stuff)
 #
 # IDENTIFICATION
-#    $Header: /cvsroot/pgsql/src/backend/lib/Makefile,v 1.15 2000/08/31 16:09:59 petere Exp $
+#    $Header: /cvsroot/pgsql/src/backend/lib/Makefile,v 1.16 2001/10/05 17:28:12 tgl Exp $
 #
 #-------------------------------------------------------------------------
 
@@ -12,7 +12,7 @@ subdir = src/backend/lib
 top_builddir = ../../..
 include $(top_builddir)/src/Makefile.global
 
-OBJS = bit.o hasht.o lispsort.o stringinfo.o dllist.o
+OBJS = bit.o dllist.o lispsort.o stringinfo.o
 
 all: SUBSYS.o
 
diff --git a/src/backend/lib/hasht.c b/src/backend/lib/hasht.c
deleted file mode 100644
index 5825a39cc5eea87a4725d75dc8cc0aafbf5eeebe..0000000000000000000000000000000000000000
--- a/src/backend/lib/hasht.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * hasht.c
- *	  hash table related functions that are not directly supported
- *	  by the hashing packages under utils/hash.
- *
- * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- *
- * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/lib/Attic/hasht.c,v 1.15 2001/01/24 19:42:55 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include "lib/hasht.h"
-#include "utils/memutils.h"
-
-/* -----------------------------------
- *		HashTableWalk
- *
- * call given function on every element in hashtable
- *
- * one extra argument (arg) may be supplied
- *
- * NOTE: it is allowed for the given function to delete the hashtable entry
- * it is passed.  However, deleting any other element while the scan is
- * in progress is UNDEFINED (see hash_seq functions).  Also, if elements are
- * added to the table while the scan is in progress, it is unspecified
- * whether they will be visited by the scan or not.
- * -----------------------------------
- */
-void
-HashTableWalk(HTAB *hashtable, HashtFunc function, Datum arg)
-{
-	HASH_SEQ_STATUS status;
-	long	   *hashent;
-	void	   *data;
-	int			keysize;
-
-	hash_seq_init(&status, hashtable);
-	keysize = hashtable->hctl->keysize;
-
-	while ((hashent = hash_seq_search(&status)) != (long *) TRUE)
-	{
-		if (hashent == NULL)
-			elog(FATAL, "error in HashTableWalk");
-
-		/*
-		 * XXX the corresponding hash table insertion does NOT LONGALIGN
-		 * -- make sure the keysize is ok
-		 */
-		data = (void *) LONGALIGN((char *) hashent + keysize);
-		(*function) (data, arg);
-	}
-}
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index fb6a61b647672354ddca5b3c3ab0e1e6a106ab00..68c5d2a64b2c10dc9a8076f2ba9754e27d09b3c4 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -16,7 +16,7 @@
  *
  *	Copyright (c) 2001, PostgreSQL Global Development Group
  *
- *	$Header: /cvsroot/pgsql/src/backend/postmaster/pgstat.c,v 1.9 2001/10/01 16:48:37 tgl Exp $
+ *	$Header: /cvsroot/pgsql/src/backend/postmaster/pgstat.c,v 1.10 2001/10/05 17:28:12 tgl Exp $
  * ----------
  */
 #include "postgres.h"
@@ -500,11 +500,9 @@ pgstat_vacuum_tabstat(void)
 	int						dbidalloc;
 	int						dbidused;
 	HASH_SEQ_STATUS			hstat;
-	long				   *hentry;
 	PgStat_StatDBEntry	   *dbentry;
 	PgStat_StatTabEntry	   *tabentry;
 	HeapTuple				reltup;
-	bool					found;
 	int						nobjects = 0;
 	PgStat_MsgTabpurge		msg;
 	int						len;
@@ -537,8 +535,8 @@ pgstat_vacuum_tabstat(void)
 	 */
 	dbentry = (PgStat_StatDBEntry *)hash_search(pgStatDBHash,
 												(void *) &MyDatabaseId,
-												HASH_FIND, &found);
-	if (!found || dbentry == NULL)
+												HASH_FIND, NULL);
+	if (dbentry == NULL)
 		return -1;
 
 	if (dbentry->tables == NULL)
@@ -553,17 +551,13 @@ pgstat_vacuum_tabstat(void)
 	 * Check for all tables if they still exist.
 	 */
     hash_seq_init(&hstat, dbentry->tables);
-	while((hentry = hash_seq_search(&hstat)) != (long *)TRUE)
+	while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&hstat)) != NULL)
 	{
-		if (hentry == NULL)
-			return -1;
-
 		/*
 		 * Check if this relation is still alive by
 		 * looking up it's pg_class tuple in the 
 		 * system catalog cache.
 		 */
-		tabentry = (PgStat_StatTabEntry *)hentry;
 		reltup = SearchSysCache(RELOID,
 						ObjectIdGetDatum(tabentry->tableid),
 						0, 0, 0);
@@ -631,15 +625,9 @@ pgstat_vacuum_tabstat(void)
 	 * tell the collector to drop them as well.
 	 */
 	hash_seq_init(&hstat, pgStatDBHash);
-	while((hentry = hash_seq_search(&hstat)) != (long *)TRUE)
+	while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
 	{
-		Oid		dbid;
-
-		if (hentry == NULL)
-			break;
-		
-		dbentry = (PgStat_StatDBEntry *)hentry;
-		dbid = dbentry->databaseid;
+		Oid		dbid = dbentry->databaseid;
 
 		for (i = 0; i < dbidused; i++)
 		{
@@ -935,7 +923,6 @@ PgStat_StatDBEntry *
 pgstat_fetch_stat_dbentry(Oid dbid)
 {
 	PgStat_StatDBEntry	   *dbentry;
-	bool					found;
 
 	/*
 	 * If not done for this transaction, read the statistics collector
@@ -954,8 +941,8 @@ pgstat_fetch_stat_dbentry(Oid dbid)
 	 */
 	dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
 												 (void *) &dbid,
-												 HASH_FIND, &found);
-	if (!found || dbentry == NULL)
+												 HASH_FIND, NULL);
+	if (dbentry == NULL)
 		return NULL;
 
 	return dbentry;
@@ -976,7 +963,6 @@ pgstat_fetch_stat_tabentry(Oid relid)
 {
 	PgStat_StatDBEntry	   *dbentry;
 	PgStat_StatTabEntry	   *tabentry;
-	bool					found;
 
 	/*
 	 * If not done for this transaction, read the statistics collector
@@ -995,8 +981,8 @@ pgstat_fetch_stat_tabentry(Oid relid)
 	 */
 	dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
 												 (void *) &MyDatabaseId,
-												 HASH_FIND, &found);
-	if (!found || dbentry == NULL)
+												 HASH_FIND, NULL);
+	if (dbentry == NULL)
 		return NULL;
 
 	/*
@@ -1006,8 +992,8 @@ pgstat_fetch_stat_tabentry(Oid relid)
 		return NULL;
 	tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
 												   (void *) &relid,
-												   HASH_FIND, &found);
-	if (!found || tabentry == NULL)
+												   HASH_FIND, NULL);
+	if (tabentry == NULL)
 		return NULL;
 
 	return tabentry;
@@ -1229,8 +1215,8 @@ pgstat_main(int real_argc, char *real_argv[])
 	hash_ctl.keysize   = sizeof(int);
 	hash_ctl.entrysize = sizeof(PgStat_StatBeDead);
 	hash_ctl.hash      = tag_hash;
-	pgStatBeDead = hash_create(PGSTAT_BE_HASH_SIZE, &hash_ctl, 
-							HASH_ELEM | HASH_FUNCTION);
+	pgStatBeDead = hash_create("Dead Backends", PGSTAT_BE_HASH_SIZE,
+							   &hash_ctl, HASH_ELEM | HASH_FUNCTION);
 	if (pgStatBeDead == NULL)
 	{
 		fprintf(stderr, 
@@ -1751,13 +1737,8 @@ pgstat_add_backend(PgStat_MsgHdr *msg)
 	 */
 	deadbe = (PgStat_StatBeDead *) hash_search(pgStatBeDead,
 											   (void *) &(msg->m_procpid),
-											   HASH_FIND, &found);
-	if (deadbe == NULL)
-	{
-		fprintf(stderr, "PGSTAT: Dead backend table corrupted - abort\n");
-		exit(1);
-	}
-	if (found)
+											   HASH_FIND, NULL);
+	if (deadbe)
 		return 1;
 
 	/*
@@ -1782,7 +1763,7 @@ pgstat_add_backend(PgStat_MsgHdr *msg)
 												 HASH_ENTER, &found);
     if (dbentry == NULL)
 	{
-		fprintf(stderr, "PGSTAT: DB hash table corrupted - abort\n");
+		fprintf(stderr, "PGSTAT: DB hash table out of memory - abort\n");
 		exit(1);
 	}
 
@@ -1805,8 +1786,10 @@ pgstat_add_backend(PgStat_MsgHdr *msg)
 		hash_ctl.keysize   = sizeof(Oid);
 		hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
 		hash_ctl.hash      = tag_hash;
-		dbentry->tables = hash_create(PGSTAT_TAB_HASH_SIZE, &hash_ctl,
-							HASH_ELEM | HASH_FUNCTION);
+		dbentry->tables = hash_create("Per-database table",
+									  PGSTAT_TAB_HASH_SIZE,
+									  &hash_ctl,
+									  HASH_ELEM | HASH_FUNCTION);
 		if (dbentry->tables == NULL)
 		{
 			fprintf(stderr, "PGSTAT: failed to initialize hash table for "
@@ -1862,7 +1845,7 @@ pgstat_sub_backend(int procpid)
 													   HASH_ENTER, &found);
 			if (deadbe == NULL)
 			{
-				fprintf(stderr, "PGSTAT: dead backend hash table corrupted "
+				fprintf(stderr, "PGSTAT: dead backend hash table out of memory "
 								"- abort\n");
 				exit(1);
 			}
@@ -1902,8 +1885,6 @@ pgstat_write_statsfile(void)
 	PgStat_StatTabEntry		   *tabentry;
 	PgStat_StatBeDead		   *deadbe;
 	FILE					   *fpout;
-	long					   *hentry;
-	bool						found;
 	int							i;
 
 	/*
@@ -1923,16 +1904,8 @@ pgstat_write_statsfile(void)
 	 * Walk through the database table.
 	 */
 	hash_seq_init(&hstat, pgStatDBHash);
-	while ((hentry = hash_seq_search(&hstat)) != (long *)TRUE)
+	while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
 	{
-		if (hentry == NULL)
-		{
-			fprintf(stderr, "PGSTAT: database hash table corrupted "
-							"- abort\n");
-			exit(1);
-		}
-		dbentry = (PgStat_StatDBEntry *)hentry;
-
 		/*
 		 * If this database is marked destroyed, count down and do
 		 * so if it reaches 0.
@@ -1944,10 +1917,9 @@ pgstat_write_statsfile(void)
 				if (dbentry->tables != NULL)
 					hash_destroy(dbentry->tables);
 
-				hentry = hash_search(pgStatDBHash, 
-									 (void *) &(dbentry->databaseid),
-									 HASH_REMOVE, &found);
-				if (hentry == NULL)
+				if (hash_search(pgStatDBHash, 
+								(void *) &(dbentry->databaseid),
+								HASH_REMOVE, NULL) == NULL)
 				{
 					fprintf(stderr, "PGSTAT: database hash table corrupted "
 									"during cleanup - abort\n");
@@ -1970,17 +1942,8 @@ pgstat_write_statsfile(void)
 		 * Walk through the databases access stats per table.
 		 */
 		hash_seq_init(&tstat, dbentry->tables);
-		while((hentry = hash_seq_search(&tstat)) != (long *)TRUE)
+		while ((tabentry = (PgStat_StatTabEntry *) hash_seq_search(&tstat)) != NULL)
 		{
-			if (hentry == NULL)
-			{
-				fprintf(stderr, "PGSTAT: tables hash table for database "
-								"%d corrupted - abort\n",
-								dbentry->databaseid);
-				exit(1);
-			}
-			tabentry = (PgStat_StatTabEntry *)hentry;
-
 			/*
 			 * If table entry marked for destruction, same as above
 			 * for the database entry.
@@ -1989,10 +1952,9 @@ pgstat_write_statsfile(void)
 			{
 				if (--(tabentry->destroy) == 0)
 				{
-					hentry = hash_search(dbentry->tables,
+					if (hash_search(dbentry->tables,
 									(void *) &(tabentry->tableid),
-									HASH_REMOVE, &found);
-					if (hentry == NULL)
+									HASH_REMOVE, NULL) == NULL)
 					{
 						fprintf(stderr, "PGSTAT: tables hash table for "
 										"database %d corrupted during "
@@ -2062,26 +2024,17 @@ pgstat_write_statsfile(void)
 	 * Clear out the dead backends table
 	 */
 	hash_seq_init(&hstat, pgStatBeDead);
-	while ((hentry = hash_seq_search(&hstat)) != (long *)TRUE)
+	while ((deadbe = (PgStat_StatBeDead *) hash_seq_search(&hstat)) != NULL)
 	{
-		if (hentry == NULL)
-		{
-			fprintf(stderr, "PGSTAT: dead backend hash table corrupted "
-							"during cleanup - abort\n");
-			exit(1);
-		}
-		deadbe = (PgStat_StatBeDead *)hentry;
-
 		/*
 		 * Count down the destroy delay and remove entries where
 		 * it reaches 0.
 		 */
 		if (--(deadbe->destroy) <= 0)
 		{
-			hentry = hash_search(pgStatBeDead,
-								 (void *) &(deadbe->procpid),
-							HASH_REMOVE, &found);
-			if (hentry == NULL)
+			if (hash_search(pgStatBeDead,
+							(void *) &(deadbe->procpid),
+							HASH_REMOVE, NULL) == NULL)
 			{
 				fprintf(stderr, "PGSTAT: dead backend hash table corrupted "
 								"during cleanup - abort\n");
@@ -2143,7 +2096,7 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
 	hash_ctl.entrysize = sizeof(PgStat_StatDBEntry);
 	hash_ctl.hash      = tag_hash;
 	hash_ctl.hcxt      = use_mcxt;
-	*dbhash = hash_create(PGSTAT_DB_HASH_SIZE, &hash_ctl, 
+	*dbhash = hash_create("Databases hash", PGSTAT_DB_HASH_SIZE, &hash_ctl, 
 							HASH_ELEM | HASH_FUNCTION | mcxt_flags);
 	if (pgStatDBHash == NULL)
 	{
@@ -2214,13 +2167,13 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
 				{
 					if (pgStatRunningInCollector)
 					{
-						fprintf(stderr, "PGSTAT: DB hash table corrupted\n");
+						fprintf(stderr, "PGSTAT: DB hash table out of memory\n");
 						exit(1);
 					}
 					else
 					{
 						fclose(fpin);
-						elog(ERROR, "PGSTAT: DB hash table corrupted");
+						elog(ERROR, "PGSTAT: DB hash table out of memory");
 					}
 				}
 				if (found)
@@ -2258,7 +2211,9 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
 				hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
 				hash_ctl.hash      = tag_hash;
 				hash_ctl.hcxt      = use_mcxt;
-				dbentry->tables = hash_create(PGSTAT_TAB_HASH_SIZE, &hash_ctl,
+				dbentry->tables = hash_create("Per-database table",
+											  PGSTAT_TAB_HASH_SIZE,
+											  &hash_ctl,
 									HASH_ELEM | HASH_FUNCTION | mcxt_flags);
 				if (dbentry->tables == NULL)
 				{
@@ -2325,13 +2280,13 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
 				{
 					if (pgStatRunningInCollector)
 					{
-						fprintf(stderr, "PGSTAT: Tab hash table corrupted\n");
+						fprintf(stderr, "PGSTAT: Tab hash table out of memory\n");
 						exit(1);
 					}
 					else
 					{
 						fclose(fpin);
-						elog(ERROR, "PGSTAT: Tab hash table corrupted");
+						elog(ERROR, "PGSTAT: Tab hash table out of memory");
 					}
 				}
 
@@ -2444,8 +2399,8 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
 				 */
 				dbentry = (PgStat_StatDBEntry *)hash_search(*dbhash,
 								(void *) &((*betab)[havebackends].databaseid),
-								HASH_FIND, &found);
-				if (found)
+								HASH_FIND, NULL);
+				if (dbentry)
 					dbentry->n_backends++;
 
 				havebackends++;
@@ -2559,13 +2514,8 @@ pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len)
 	 */
 	dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
 							(void *) &(msg->m_hdr.m_databaseid),
-							HASH_FIND, &found);
-	if (dbentry == NULL)
-	{
-		fprintf(stderr, "PGSTAT: database hash table corrupted - abort\n");
-		exit(1);
-	}
-	if (!found)
+							HASH_FIND, NULL);
+	if (!dbentry)
 		return;
 
 	/*
@@ -2588,7 +2538,7 @@ pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len)
 						HASH_ENTER, &found);
 		if (tabentry == NULL)
 		{
-			fprintf(stderr, "PGSTAT: tables hash table corrupted for "
+			fprintf(stderr, "PGSTAT: tables hash table out of memory for "
 							"database %d - abort\n", dbentry->databaseid);
 			exit(1);
 		}
@@ -2646,7 +2596,6 @@ pgstat_recv_tabpurge(PgStat_MsgTabpurge *msg, int len)
 	PgStat_StatDBEntry     *dbentry;
 	PgStat_StatTabEntry    *tabentry;
 	int						i;
-	bool					found;
 
 	/*
 	 * Make sure the backend is counted for.
@@ -2659,13 +2608,8 @@ pgstat_recv_tabpurge(PgStat_MsgTabpurge *msg, int len)
 	 */
 	dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
 							(void *) &(msg->m_hdr.m_databaseid),
-							HASH_FIND, &found);
-	if (dbentry == NULL)
-	{
-		fprintf(stderr, "PGSTAT: database hash table corrupted - abort\n");
-		exit(1);
-	}
-	if (!found)
+							HASH_FIND, NULL);
+	if (!dbentry)
 		return;
 
 	/*
@@ -2682,15 +2626,8 @@ pgstat_recv_tabpurge(PgStat_MsgTabpurge *msg, int len)
 	{
 		tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
 						(void *) &(msg->m_tableid[i]), 
-						HASH_FIND, &found);
-		if (tabentry == NULL)
-		{
-			fprintf(stderr, "PGSTAT: tables hash table corrupted for "
-							"database %d - abort\n", dbentry->databaseid);
-			exit(1);
-		}
-
-		if (found)
+						HASH_FIND, NULL);
+		if (tabentry)
 			tabentry->destroy = PGSTAT_DESTROY_COUNT;
 	}
 }
@@ -2706,7 +2643,6 @@ static void
 pgstat_recv_dropdb(PgStat_MsgDropdb *msg, int len)
 {
 	PgStat_StatDBEntry     *dbentry;
-	bool					found;
 
 	/*
 	 * Make sure the backend is counted for.
@@ -2719,13 +2655,8 @@ pgstat_recv_dropdb(PgStat_MsgDropdb *msg, int len)
 	 */
 	dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
 							(void *) &(msg->m_databaseid),
-							HASH_FIND, &found);
-	if (dbentry == NULL)
-	{
-		fprintf(stderr, "PGSTAT: database hash table corrupted - abort\n");
-		exit(1);
-	}
-	if (!found)
+							HASH_FIND, NULL);
+	if (!dbentry)
 		return;
 
 	/*
@@ -2746,7 +2677,6 @@ pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len)
 {
 	HASHCTL					hash_ctl;
 	PgStat_StatDBEntry     *dbentry;
-	bool					found;
 
 	/*
 	 * Make sure the backend is counted for.
@@ -2759,13 +2689,8 @@ pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len)
 	 */
 	dbentry = (PgStat_StatDBEntry *) hash_search(pgStatDBHash,
 							(void *) &(msg->m_hdr.m_databaseid),
-							HASH_FIND, &found);
-	if (dbentry == NULL)
-	{
-		fprintf(stderr, "PGSTAT: database hash table corrupted - abort\n");
-		exit(1);
-	}
-	if (!found)
+							HASH_FIND, NULL);
+	if (!dbentry)
 		return;
 
 	/*
@@ -2787,8 +2712,10 @@ pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len)
 	hash_ctl.keysize  = sizeof(Oid);
 	hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
 	hash_ctl.hash     = tag_hash;
-	dbentry->tables = hash_create(PGSTAT_TAB_HASH_SIZE, &hash_ctl,
-						HASH_ELEM | HASH_FUNCTION);
+	dbentry->tables = hash_create("Per-database table",
+								  PGSTAT_TAB_HASH_SIZE,
+								  &hash_ctl,
+								  HASH_ELEM | HASH_FUNCTION);
 	if (dbentry->tables == NULL)
 	{
 		fprintf(stderr, "PGSTAT: failed to reinitialize hash table for "
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index 85b747b442f8f08bb5acbb9bc366a37e40e392b3..d54d5e6915c7ec6b4737ecc6be5e94723f1d97a4 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_table.c,v 1.23 2001/10/01 05:36:13 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_table.c,v 1.24 2001/10/05 17:28:12 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -62,21 +62,15 @@ BufferDesc *
 BufTableLookup(BufferTag *tagPtr)
 {
 	BufferLookupEnt  *result;
-	bool		found;
 
 	if (tagPtr->blockNum == P_NEW)
 		return NULL;
 
 	result = (BufferLookupEnt *)
-		hash_search(SharedBufHash, (void *) tagPtr, HASH_FIND, &found);
-
+		hash_search(SharedBufHash, (void *) tagPtr, HASH_FIND, NULL);
 	if (!result)
-	{
-		elog(ERROR, "BufTableLookup: BufferLookup table corrupted");
-		return NULL;
-	}
-	if (!found)
 		return NULL;
+
 	return &(BufferDescriptors[result->id]);
 }
 
@@ -87,7 +81,6 @@ bool
 BufTableDelete(BufferDesc *buf)
 {
 	BufferLookupEnt  *result;
-	bool		found;
 
 	/*
 	 * buffer not initialized or has been removed from table already.
@@ -99,10 +92,11 @@ BufTableDelete(BufferDesc *buf)
 	buf->flags |= BM_DELETED;
 
 	result = (BufferLookupEnt *)
-		hash_search(SharedBufHash, (void *) &(buf->tag), HASH_REMOVE, &found);
+		hash_search(SharedBufHash, (void *) &(buf->tag), HASH_REMOVE, NULL);
 
-	if (!(result && found))
+	if (!result)
 	{
+		/* shouldn't happen */
 		elog(ERROR, "BufTableDelete: BufferLookup table corrupted");
 		return FALSE;
 	}
@@ -134,14 +128,13 @@ BufTableInsert(BufferDesc *buf)
 
 	if (!result)
 	{
-		Assert(0);
-		elog(ERROR, "BufTableInsert: BufferLookup table corrupted");
+		elog(ERROR, "BufTableInsert: BufferLookup table out of memory");
 		return FALSE;
 	}
+
 	/* found something else in the table ! */
 	if (found)
 	{
-		Assert(0);
 		elog(ERROR, "BufTableInsert: BufferLookup table corrupted");
 		return FALSE;
 	}
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index f8fefee8a09d97f6d1169984c02847e4ffa4d19e..b51b1fb230913cbe90a2e80d231fd99d7c6551b0 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.6 2001/10/01 05:36:14 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.7 2001/10/05 17:28:12 tgl Exp $
  *
  *
  * NOTES:
@@ -490,16 +490,12 @@ static FSMRelation *
 lookup_fsm_rel(RelFileNode *rel)
 {
 	FSMRelation *fsmrel;
-	bool		found;
 
 	fsmrel = (FSMRelation *) hash_search(FreeSpaceMap->relHash,
 										 (void *) rel,
 										 HASH_FIND,
-										 &found);
+										 NULL);
 	if (!fsmrel)
-		elog(ERROR, "FreeSpaceMap hashtable corrupted");
-
-	if (!found)
 		return NULL;
 
 	return fsmrel;
@@ -523,7 +519,7 @@ create_fsm_rel(RelFileNode *rel)
 										 HASH_ENTER,
 										 &found);
 	if (!fsmrel)
-		elog(ERROR, "FreeSpaceMap hashtable corrupted");
+		elog(ERROR, "FreeSpaceMap hashtable out of memory");
 
 	if (!found)
 	{
@@ -584,7 +580,6 @@ static void
 delete_fsm_rel(FSMRelation *fsmrel)
 {
 	FSMRelation *result;
-	bool		found;
 
 	free_chunk_chain(fsmrel->relChunks);
 	unlink_fsm_rel(fsmrel);
@@ -592,8 +587,8 @@ delete_fsm_rel(FSMRelation *fsmrel)
 	result = (FSMRelation *) hash_search(FreeSpaceMap->relHash,
 										 (void *) &(fsmrel->key),
 										 HASH_REMOVE,
-										 &found);
-	if (!result || !found)
+										 NULL);
+	if (!result)
 		elog(ERROR, "FreeSpaceMap hashtable corrupted");
 }
 
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 024db5bd5334dbfe41f1df79f0161f0577d2117f..32f2cf98a1dc51577ba1d00e70eef7f146da228f 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.60 2001/10/01 05:36:14 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.61 2001/10/05 17:28:12 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -210,7 +210,7 @@ InitShmemIndex(void)
 	result = (ShmemIndexEnt *)
 		hash_search(ShmemIndex, (void *) &item, HASH_ENTER, &found);
 	if (!result)
-		elog(FATAL, "InitShmemIndex: corrupted shmem index");
+		elog(FATAL, "InitShmemIndex: Shmem Index out of memory");
 
 	Assert(ShmemBootstrap && !found);
 
@@ -234,7 +234,7 @@ InitShmemIndex(void)
  * table at once.
  */
 HTAB *
-ShmemInitHash(char *name,		/* table string name for shmem index */
+ShmemInitHash(const char *name,	/* table string name for shmem index */
 			  long init_size,	/* initial table size */
 			  long max_size,	/* max size of the table */
 			  HASHCTL *infoP,	/* info about key and bucket size */
@@ -277,7 +277,7 @@ ShmemInitHash(char *name,		/* table string name for shmem index */
 	infoP->hctl = (HASHHDR *) location;
 	infoP->dir = (HASHSEGMENT *) (((char *) location) + sizeof(HASHHDR));
 
-	return hash_create(init_size, infoP, hash_flags);
+	return hash_create(name, init_size, infoP, hash_flags);
 }
 
 /*
@@ -295,7 +295,7 @@ ShmemInitHash(char *name,		/* table string name for shmem index */
  *		initialized).
  */
 void *
-ShmemInitStruct(char *name, Size size, bool *foundPtr)
+ShmemInitStruct(const char *name, Size size, bool *foundPtr)
 {
 	ShmemIndexEnt *result,
 				item;
@@ -328,7 +328,7 @@ ShmemInitStruct(char *name, Size size, bool *foundPtr)
 	if (!result)
 	{
 		LWLockRelease(ShmemIndexLock);
-		elog(ERROR, "ShmemInitStruct: Shmem Index corrupted");
+		elog(ERROR, "ShmemInitStruct: Shmem Index out of memory");
 		return NULL;
 	}
 
@@ -357,12 +357,12 @@ ShmemInitStruct(char *name, Size size, bool *foundPtr)
 		{
 			/* out of memory */
 			Assert(ShmemIndex);
-			hash_search(ShmemIndex, (void *) &item, HASH_REMOVE, foundPtr);
+			hash_search(ShmemIndex, (void *) &item, HASH_REMOVE, NULL);
 			LWLockRelease(ShmemIndexLock);
-			*foundPtr = FALSE;
 
 			elog(NOTICE, "ShmemInitStruct: cannot allocate '%s'",
 				 name);
+			*foundPtr = FALSE;
 			return NULL;
 		}
 		result->size = size;
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 84204411faccbed630da2759f4974f6f700e705d..8c569710d04a4683a5c327497a22f6b35bc1dd7a 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.99 2001/10/01 05:36:14 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.100 2001/10/05 17:28:12 tgl Exp $
  *
  * NOTES
  *	  Outside modules can create a lock table and acquire/release
@@ -491,7 +491,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
 	if (!lock)
 	{
 		LWLockRelease(masterLock);
-		elog(FATAL, "LockAcquire: lock table %d is corrupted", lockmethod);
+		elog(ERROR, "LockAcquire: lock table %d is out of memory",
+			 lockmethod);
 		return FALSE;
 	}
 
@@ -537,7 +538,7 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
 	if (!holder)
 	{
 		LWLockRelease(masterLock);
-		elog(FATAL, "LockAcquire: holder table corrupted");
+		elog(ERROR, "LockAcquire: holder table out of memory");
 		return FALSE;
 	}
 
@@ -658,8 +659,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
 				SHMQueueDelete(&holder->procLink);
 				holder = (HOLDER *) hash_search(holderTable,
 												(void *) holder,
-												HASH_REMOVE, &found);
-				if (!holder || !found)
+												HASH_REMOVE, NULL);
+				if (!holder)
 					elog(NOTICE, "LockAcquire: remove holder, table corrupted");
 			}
 			else
@@ -991,7 +992,6 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
 {
 	LOCK	   *lock;
 	LWLockId	masterLock;
-	bool		found;
 	LOCKMETHODTABLE *lockMethodTable;
 	HOLDER	   *holder;
 	HOLDERTAG	holdertag;
@@ -1023,20 +1023,13 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
 	Assert(lockMethodTable->lockHash->hash == tag_hash);
 	lock = (LOCK *) hash_search(lockMethodTable->lockHash,
 								(void *) locktag,
-								HASH_FIND, &found);
+								HASH_FIND, NULL);
 
 	/*
 	 * let the caller print its own error message, too. Do not
 	 * elog(ERROR).
 	 */
 	if (!lock)
-	{
-		LWLockRelease(masterLock);
-		elog(NOTICE, "LockRelease: locktable corrupted");
-		return FALSE;
-	}
-
-	if (!found)
 	{
 		LWLockRelease(masterLock);
 		elog(NOTICE, "LockRelease: no such lock");
@@ -1056,12 +1049,12 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
 	holderTable = lockMethodTable->holderHash;
 	holder = (HOLDER *) hash_search(holderTable,
 									(void *) &holdertag,
-									HASH_FIND_SAVE, &found);
-	if (!holder || !found)
+									HASH_FIND_SAVE, NULL);
+	if (!holder)
 	{
 		LWLockRelease(masterLock);
 #ifdef USER_LOCKS
-		if (!found && lockmethod == USER_LOCKMETHOD)
+		if (lockmethod == USER_LOCKMETHOD)
 			elog(NOTICE, "LockRelease: no lock with this tag");
 		else
 #endif
@@ -1130,8 +1123,8 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
 		lock = (LOCK *) hash_search(lockMethodTable->lockHash,
 									(void *) &(lock->tag),
 									HASH_REMOVE,
-									&found);
-		if (!lock || !found)
+									NULL);
+		if (!lock)
 		{
 			LWLockRelease(masterLock);
 			elog(NOTICE, "LockRelease: remove lock, table corrupted");
@@ -1159,8 +1152,8 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
 		SHMQueueDelete(&holder->procLink);
 		holder = (HOLDER *) hash_search(holderTable,
 										(void *) &holder,
-										HASH_REMOVE_SAVED, &found);
-		if (!holder || !found)
+										HASH_REMOVE_SAVED, NULL);
+		if (!holder)
 		{
 			LWLockRelease(masterLock);
 			elog(NOTICE, "LockRelease: remove holder, table corrupted");
@@ -1201,7 +1194,6 @@ LockReleaseAll(LOCKMETHOD lockmethod, PROC *proc,
 	int			i,
 				numLockModes;
 	LOCK	   *lock;
-	bool		found;
 
 #ifdef LOCK_DEBUG
 	if (lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
@@ -1313,8 +1305,8 @@ LockReleaseAll(LOCKMETHOD lockmethod, PROC *proc,
 		holder = (HOLDER *) hash_search(lockMethodTable->holderHash,
 										(void *) holder,
 										HASH_REMOVE,
-										&found);
-		if (!holder || !found)
+										NULL);
+		if (!holder)
 		{
 			LWLockRelease(masterLock);
 			elog(NOTICE, "LockReleaseAll: holder table corrupted");
@@ -1323,7 +1315,6 @@ LockReleaseAll(LOCKMETHOD lockmethod, PROC *proc,
 
 		if (lock->nRequested == 0)
 		{
-
 			/*
 			 * We've just released the last lock, so garbage-collect the
 			 * lock object.
@@ -1332,8 +1323,8 @@ LockReleaseAll(LOCKMETHOD lockmethod, PROC *proc,
 			Assert(lockMethodTable->lockHash->hash == tag_hash);
 			lock = (LOCK *) hash_search(lockMethodTable->lockHash,
 										(void *) &(lock->tag),
-										HASH_REMOVE, &found);
-			if (!lock || !found)
+										HASH_REMOVE, NULL);
+			if (!lock)
 			{
 				LWLockRelease(masterLock);
 				elog(NOTICE, "LockReleaseAll: cannot remove lock from HTAB");
@@ -1438,7 +1429,7 @@ void
 DumpAllLocks(void)
 {
 	PROC	   *proc;
-	HOLDER	   *holder = NULL;
+	HOLDER	   *holder;
 	LOCK	   *lock;
 	int			lockmethod = DEFAULT_LOCKMETHOD;
 	LOCKMETHODTABLE *lockMethodTable;
@@ -1460,8 +1451,7 @@ DumpAllLocks(void)
 		LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
 
 	hash_seq_init(&status, holderTable);
-	while ((holder = (HOLDER *) hash_seq_search(&status)) &&
-		   (holder != (HOLDER *) TRUE))
+	while ((holder = (HOLDER *) hash_seq_search(&status)) != NULL)
 	{
 		HOLDER_PRINT("DumpAllLocks", holder);
 
diff --git a/src/backend/storage/smgr/mm.c b/src/backend/storage/smgr/mm.c
index c6d084dada58159a478e8cbad3481569acf65518..fcb282f916eac5bc701ef9fe32773b629f1ce471 100644
--- a/src/backend/storage/smgr/mm.c
+++ b/src/backend/storage/smgr/mm.c
@@ -11,7 +11,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/storage/smgr/Attic/mm.c,v 1.26 2001/10/01 05:36:15 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/storage/smgr/Attic/mm.c,v 1.27 2001/10/05 17:28:12 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -186,7 +186,7 @@ mmcreate(Relation reln)
 	if (entry == (MMRelHashEntry *) NULL)
 	{
 		LWLockRelease(MMCacheLock);
-		elog(FATAL, "main memory storage mgr rel cache hash table corrupt");
+		elog(FATAL, "main memory storage mgr hash table out of memory");
 	}
 
 	if (found)
@@ -214,7 +214,6 @@ mmunlink(RelFileNode rnode)
 	int			i;
 	MMHashEntry *entry;
 	MMRelHashEntry *rentry;
-	bool		found;
 	MMRelTag	rtag;
 
 	LWLockAcquire(MMCacheLock, LW_EXCLUSIVE);
@@ -226,8 +225,8 @@ mmunlink(RelFileNode rnode)
 		{
 			entry = (MMHashEntry *) hash_search(MMCacheHT,
 												(void *) &MMBlockTags[i],
-												HASH_REMOVE, &found);
-			if (entry == (MMHashEntry *) NULL || !found)
+												HASH_REMOVE, NULL);
+			if (entry == (MMHashEntry *) NULL)
 			{
 				LWLockRelease(MMCacheLock);
 				elog(FATAL, "mmunlink: cache hash table corrupted");
@@ -242,9 +241,9 @@ mmunlink(RelFileNode rnode)
 
 	rentry = (MMRelHashEntry *) hash_search(MMRelCacheHT,
 											(void *) &rtag,
-											HASH_REMOVE, &found);
+											HASH_REMOVE, NULL);
 
-	if (rentry == (MMRelHashEntry *) NULL || !found)
+	if (rentry == (MMRelHashEntry *) NULL)
 	{
 		LWLockRelease(MMCacheLock);
 		elog(FATAL, "mmunlink: rel cache hash table corrupted");
@@ -306,8 +305,8 @@ mmextend(Relation reln, BlockNumber blocknum, char *buffer)
 
 	rentry = (MMRelHashEntry *) hash_search(MMRelCacheHT,
 											(void *) &rtag,
-											HASH_FIND, &found);
-	if (rentry == (MMRelHashEntry *) NULL || !found)
+											HASH_FIND, NULL);
+	if (rentry == (MMRelHashEntry *) NULL)
 	{
 		LWLockRelease(MMCacheLock);
 		elog(FATAL, "mmextend: rel cache hash table corrupt");
@@ -372,7 +371,6 @@ int
 mmread(Relation reln, BlockNumber blocknum, char *buffer)
 {
 	MMHashEntry *entry;
-	bool		found;
 	int			offset;
 	MMCacheTag	tag;
 
@@ -387,15 +385,9 @@ mmread(Relation reln, BlockNumber blocknum, char *buffer)
 	LWLockAcquire(MMCacheLock, LW_EXCLUSIVE);
 	entry = (MMHashEntry *) hash_search(MMCacheHT,
 										(void *) &tag,
-										HASH_FIND, &found);
+										HASH_FIND, NULL);
 
 	if (entry == (MMHashEntry *) NULL)
-	{
-		LWLockRelease(MMCacheLock);
-		elog(FATAL, "mmread: hash table corrupt");
-	}
-
-	if (!found)
 	{
 		/* reading nonexistent pages is defined to fill them with zeroes */
 		LWLockRelease(MMCacheLock);
@@ -420,7 +412,6 @@ int
 mmwrite(Relation reln, BlockNumber blocknum, char *buffer)
 {
 	MMHashEntry *entry;
-	bool		found;
 	int			offset;
 	MMCacheTag	tag;
 
@@ -435,15 +426,9 @@ mmwrite(Relation reln, BlockNumber blocknum, char *buffer)
 	LWLockAcquire(MMCacheLock, LW_EXCLUSIVE);
 	entry = (MMHashEntry *) hash_search(MMCacheHT,
 										(void *) &tag,
-										HASH_FIND, &found);
+										HASH_FIND, NULL);
 
 	if (entry == (MMHashEntry *) NULL)
-	{
-		LWLockRelease(MMCacheLock);
-		elog(FATAL, "mmread: hash table corrupt");
-	}
-
-	if (!found)
 	{
 		LWLockRelease(MMCacheLock);
 		elog(FATAL, "mmwrite: hash table missing requested page");
@@ -496,7 +481,6 @@ mmnblocks(Relation reln)
 {
 	MMRelTag	rtag;
 	MMRelHashEntry *rentry;
-	bool		found;
 	BlockNumber	nblocks;
 
 	if (reln->rd_rel->relisshared)
@@ -510,15 +494,9 @@ mmnblocks(Relation reln)
 
 	rentry = (MMRelHashEntry *) hash_search(MMRelCacheHT,
 											(void *) &rtag,
-											HASH_FIND, &found);
-
-	if (rentry == (MMRelHashEntry *) NULL)
-	{
-		LWLockRelease(MMCacheLock);
-		elog(FATAL, "mmnblocks: rel cache hash table corrupt");
-	}
+											HASH_FIND, NULL);
 
-	if (found)
+	if (rentry)
 		nblocks = rentry->mmrhe_nblocks;
 	else
 		nblocks = InvalidBlockNumber;
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 48df3f980a3874979172b4dbace75ad6060bd127..c7a33a3bc2943711eb7ef55ca7c9164d8e63e6d9 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/tcop/pquery.c,v 1.44 2001/03/22 06:16:17 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/tcop/pquery.c,v 1.45 2001/10/05 17:28:12 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -147,7 +147,7 @@ PreparePortal(char *portalName)
 		 */
 		elog(NOTICE, "Closing pre-existing portal \"%s\"",
 			 portalName);
-		PortalDrop(&portal);
+		PortalDrop(portal);
 	}
 
 	/*
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 6ab9871648e1be6f39f86f9885dd0cd1019a2c6d..ebbb8b07ee8f14ba933aa65a5667ea9368f532a1 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -18,7 +18,7 @@
  * Portions Copyright (c) 2000-2001, PostgreSQL Global Development Group
  * Copyright 1999 Jan Wieck
  *
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/ri_triggers.c,v 1.26 2001/10/01 05:36:16 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/ri_triggers.c,v 1.27 2001/10/05 17:28:12 tgl Exp $
  *
  * ----------
  */
@@ -2990,14 +2990,14 @@ ri_InitHashTables(void)
 	ctl.keysize = sizeof(RI_QueryKey);
 	ctl.entrysize = sizeof(RI_QueryHashEntry);
 	ctl.hash = tag_hash;
-	ri_query_cache = hash_create(RI_INIT_QUERYHASHSIZE, &ctl,
-								 HASH_ELEM | HASH_FUNCTION);
+	ri_query_cache = hash_create("RI query cache", RI_INIT_QUERYHASHSIZE,
+								 &ctl, HASH_ELEM | HASH_FUNCTION);
 
 	ctl.keysize = sizeof(Oid);
 	ctl.entrysize = sizeof(RI_OpreqHashEntry);
 	ctl.hash = tag_hash;
-	ri_opreq_cache = hash_create(RI_INIT_OPREQHASHSIZE, &ctl,
-								 HASH_ELEM | HASH_FUNCTION);
+	ri_opreq_cache = hash_create("RI OpReq cache", RI_INIT_OPREQHASHSIZE,
+								 &ctl, HASH_ELEM | HASH_FUNCTION);
 }
 
 
@@ -3012,7 +3012,6 @@ static void *
 ri_FetchPreparedPlan(RI_QueryKey *key)
 {
 	RI_QueryHashEntry *entry;
-	bool		found;
 
 	/*
 	 * On the first call initialize the hashtable
@@ -3025,10 +3024,8 @@ ri_FetchPreparedPlan(RI_QueryKey *key)
 	 */
 	entry = (RI_QueryHashEntry *) hash_search(ri_query_cache,
 											  (void *) key,
-											  HASH_FIND, &found);
+											  HASH_FIND, NULL);
 	if (entry == NULL)
-		elog(FATAL, "error in RI plan cache");
-	if (!found)
 		return NULL;
 	return entry->plan;
 }
@@ -3059,7 +3056,7 @@ ri_HashPreparedPlan(RI_QueryKey *key, void *plan)
 											  (void *) key,
 											  HASH_ENTER, &found);
 	if (entry == NULL)
-		elog(FATAL, "can't insert into RI plan cache");
+		elog(ERROR, "out of memory for RI plan cache");
 	entry->plan = plan;
 }
 
@@ -3235,16 +3232,14 @@ ri_AttributesEqual(Oid typeid, Datum oldvalue, Datum newvalue)
 	 */
 	entry = (RI_OpreqHashEntry *) hash_search(ri_opreq_cache,
 											  (void *) &typeid,
-											  HASH_FIND, &found);
-	if (entry == NULL)
-		elog(FATAL, "error in RI operator cache");
+											  HASH_FIND, NULL);
 
 	/*
 	 * If not found, lookup the OPERNAME system cache for it to get the
 	 * func OID, then do the function manager lookup, and remember that
 	 * info.
 	 */
-	if (!found)
+	if (!entry)
 	{
 		HeapTuple	opr_tup;
 		Oid			opr_proc;
@@ -3278,7 +3273,7 @@ ri_AttributesEqual(Oid typeid, Datum oldvalue, Datum newvalue)
 												  (void *) &typeid,
 												  HASH_ENTER, &found);
 		if (entry == NULL)
-			elog(FATAL, "can't insert into RI operator cache");
+			elog(ERROR, "out of memory for RI operator cache");
 
 		entry->typeid = typeid;
 		memcpy(&(entry->oprfmgrinfo), &finfo, sizeof(FmgrInfo));
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 628e96842d851313c01b8e0d534ac491c2464f6e..8d8548713943a43e0ed33c08bb1f9ddc266a7611 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.144 2001/10/01 05:36:16 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.145 2001/10/05 17:28:12 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -48,12 +48,12 @@
 #include "catalog/pg_rewrite.h"
 #include "catalog/pg_type.h"
 #include "commands/trigger.h"
-#include "lib/hasht.h"
 #include "miscadmin.h"
 #include "storage/smgr.h"
 #include "utils/builtins.h"
 #include "utils/catcache.h"
 #include "utils/fmgroids.h"
+#include "utils/hsearch.h"
 #include "utils/memutils.h"
 #include "utils/relcache.h"
 #include "utils/temprel.h"
@@ -144,38 +144,33 @@ do { \
 											   HASH_ENTER, \
 											   &found); \
 	if (namehentry == NULL) \
-		elog(FATAL, "can't insert into relation descriptor cache"); \
-	if (found && !IsBootstrapProcessingMode()) \
-		/* used to give notice -- now just keep quiet */ ; \
+		elog(ERROR, "out of memory for relation descriptor cache"); \
+	/* used to give notice if found -- now just keep quiet */ ; \
 	namehentry->reldesc = RELATION; \
 	idhentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
 										   (void *) &(RELATION->rd_id), \
 										   HASH_ENTER, \
 										   &found); \
 	if (idhentry == NULL) \
-		elog(FATAL, "can't insert into relation descriptor cache"); \
-	if (found && !IsBootstrapProcessingMode()) \
-		/* used to give notice -- now just keep quiet */ ; \
+		elog(ERROR, "out of memory for relation descriptor cache"); \
+	/* used to give notice if found -- now just keep quiet */ ; \
 	idhentry->reldesc = RELATION; \
 	nodentry = (RelNodeCacheEnt*)hash_search(RelationNodeCache, \
 										   (void *) &(RELATION->rd_node), \
 										   HASH_ENTER, \
 										   &found); \
 	if (nodentry == NULL) \
-		elog(FATAL, "can't insert into relation descriptor cache"); \
-	if (found && !IsBootstrapProcessingMode()) \
-		/* used to give notice -- now just keep quiet */ ; \
+		elog(ERROR, "out of memory for relation descriptor cache"); \
+	/* used to give notice if found -- now just keep quiet */ ; \
 	nodentry->reldesc = RELATION; \
 } while(0)
 
 #define RelationNameCacheLookup(NAME, RELATION) \
 do { \
-	RelNameCacheEnt *hentry; bool found; \
+	RelNameCacheEnt *hentry; \
 	hentry = (RelNameCacheEnt*)hash_search(RelationNameCache, \
-										   (void *) (NAME),HASH_FIND,&found); \
-	if (hentry == NULL) \
-		elog(FATAL, "error in CACHE"); \
-	if (found) \
+										   (void *) (NAME), HASH_FIND,NULL); \
+	if (hentry) \
 		RELATION = hentry->reldesc; \
 	else \
 		RELATION = NULL; \
@@ -184,12 +179,9 @@ do { \
 #define RelationIdCacheLookup(ID, RELATION) \
 do { \
 	RelIdCacheEnt *hentry; \
-	bool found; \
 	hentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
-										 (void *)&(ID),HASH_FIND, &found); \
-	if (hentry == NULL) \
-		elog(FATAL, "error in CACHE"); \
-	if (found) \
+										 (void *)&(ID), HASH_FIND,NULL); \
+	if (hentry) \
 		RELATION = hentry->reldesc; \
 	else \
 		RELATION = NULL; \
@@ -198,12 +190,9 @@ do { \
 #define RelationNodeCacheLookup(NODE, RELATION) \
 do { \
 	RelNodeCacheEnt *hentry; \
-	bool found; \
 	hentry = (RelNodeCacheEnt*)hash_search(RelationNodeCache, \
-									 (void *)&(NODE),HASH_FIND, &found); \
-	if (hentry == NULL) \
-		elog(FATAL, "error in CACHE"); \
-	if (found) \
+										   (void *)&(NODE), HASH_FIND,NULL); \
+	if (hentry) \
 		RELATION = hentry->reldesc; \
 	else \
 		RELATION = NULL; \
@@ -212,29 +201,22 @@ do { \
 #define RelationCacheDelete(RELATION) \
 do { \
 	RelNameCacheEnt *namehentry; RelIdCacheEnt *idhentry; \
-	char *relname; RelNodeCacheEnt *nodentry; bool found; \
+	char *relname; RelNodeCacheEnt *nodentry; \
 	relname = RelationGetPhysicalRelationName(RELATION); \
 	namehentry = (RelNameCacheEnt*)hash_search(RelationNameCache, \
 											   relname, \
-											   HASH_REMOVE, \
-											   &found); \
+											   HASH_REMOVE, NULL); \
 	if (namehentry == NULL) \
-		elog(FATAL, "can't delete from relation descriptor cache"); \
-	if (!found) \
 		elog(NOTICE, "trying to delete a reldesc that does not exist."); \
 	idhentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
 										   (void *)&(RELATION->rd_id), \
-										   HASH_REMOVE, &found); \
+										   HASH_REMOVE, NULL); \
 	if (idhentry == NULL) \
-		elog(FATAL, "can't delete from relation descriptor cache"); \
-	if (!found) \
 		elog(NOTICE, "trying to delete a reldesc that does not exist."); \
 	nodentry = (RelNodeCacheEnt*)hash_search(RelationNodeCache, \
 										   (void *)&(RELATION->rd_node), \
-										   HASH_REMOVE, &found); \
+										   HASH_REMOVE, NULL); \
 	if (nodentry == NULL) \
-		elog(FATAL, "can't delete from relation descriptor cache"); \
-	if (!found) \
 		elog(NOTICE, "trying to delete a reldesc that does not exist."); \
 } while(0)
 
@@ -248,8 +230,6 @@ static void RelationReloadClassinfo(Relation relation);
 #endif	 /* ENABLE_REINDEX_NAILED_RELATIONS */
 static void RelationFlushRelation(Relation relation);
 static Relation RelationNameCacheGetRelation(const char *relationName);
-static void RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp);
-static void RelationCacheAbortWalker(Relation *relationPtr, Datum dummy);
 static void init_irels(void);
 static void write_irels(void);
 
@@ -1842,56 +1822,54 @@ RelationFlushIndexes(Relation *r,
  *
  *	 We do this in two phases: the first pass deletes deletable items, and
  *	 the second one rebuilds the rebuildable items.  This is essential for
- *	 safety, because HashTableWalk only copes with concurrent deletion of
+ *	 safety, because hash_seq_search only copes with concurrent deletion of
  *	 the element it is currently visiting.	If a second SI overflow were to
  *	 occur while we are walking the table, resulting in recursive entry to
  *	 this routine, we could crash because the inner invocation blows away
  *	 the entry next to be visited by the outer scan.  But this way is OK,
  *	 because (a) during the first pass we won't process any more SI messages,
- *	 so HashTableWalk will complete safely; (b) during the second pass we
+ *	 so hash_seq_search will complete safely; (b) during the second pass we
  *	 only hold onto pointers to nondeletable entries.
  */
 void
 RelationCacheInvalidate(void)
 {
+	HASH_SEQ_STATUS status;
+	RelNameCacheEnt *namehentry;
+	Relation	relation;
 	List	   *rebuildList = NIL;
 	List	   *l;
 
 	/* Phase 1 */
-	HashTableWalk(RelationNameCache,
-				  (HashtFunc) RelationCacheInvalidateWalker,
-				  PointerGetDatum(&rebuildList));
+	hash_seq_init(&status, RelationNameCache);
 
-	/* Phase 2: rebuild the items found to need rebuild in phase 1 */
-	foreach(l, rebuildList)
+	while ((namehentry = (RelNameCacheEnt *) hash_seq_search(&status)) != NULL)
 	{
-		Relation	relation = (Relation) lfirst(l);
-
-		RelationClearRelation(relation, true);
-	}
-	freeList(rebuildList);
-}
+		relation = namehentry->reldesc;
 
-static void
-RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp)
-{
-	Relation	relation = *relationPtr;
-	List	  **rebuildList = (List **) DatumGetPointer(listp);
-
-	/* We can ignore xact-local relations, since they are never SI targets */
-	if (relation->rd_myxactonly)
-		return;
+		/* Ignore xact-local relations, since they are never SI targets */
+		if (relation->rd_myxactonly)
+			continue;
 
-	if (RelationHasReferenceCountZero(relation))
-	{
-		/* Delete this entry immediately */
-		RelationClearRelation(relation, false);
+		if (RelationHasReferenceCountZero(relation))
+		{
+			/* Delete this entry immediately */
+			RelationClearRelation(relation, false);
+		}
+		else
+		{
+			/* Add entry to list of stuff to rebuild in second pass */
+			rebuildList = lcons(relation, rebuildList);
+		}
 	}
-	else
+
+	/* Phase 2: rebuild the items found to need rebuild in phase 1 */
+	foreach(l, rebuildList)
 	{
-		/* Add entry to list of stuff to rebuild in second pass */
-		*rebuildList = lcons(relation, *rebuildList);
+		relation = (Relation) lfirst(l);
+		RelationClearRelation(relation, true);
 	}
+	freeList(rebuildList);
 }
 
 /*
@@ -1910,20 +1888,20 @@ RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp)
 void
 RelationCacheAbort(void)
 {
-	HashTableWalk(RelationNameCache,
-				  (HashtFunc) RelationCacheAbortWalker,
-				  0);
-}
+	HASH_SEQ_STATUS status;
+	RelNameCacheEnt *namehentry;
 
-static void
-RelationCacheAbortWalker(Relation *relationPtr, Datum dummy)
-{
-	Relation	relation = *relationPtr;
+	hash_seq_init(&status, RelationNameCache);
 
-	if (relation->rd_isnailed)
-		RelationSetReferenceCount(relation, 1);
-	else
-		RelationSetReferenceCount(relation, 0);
+	while ((namehentry = (RelNameCacheEnt *) hash_seq_search(&status)) != NULL)
+	{
+		Relation	relation = namehentry->reldesc;
+
+		if (relation->rd_isnailed)
+			RelationSetReferenceCount(relation, 1);
+		else
+			RelationSetReferenceCount(relation, 0);
+	}
 }
 
 /*
@@ -2095,19 +2073,20 @@ RelationCacheInitialize(void)
 	MemSet(&ctl, 0, sizeof(ctl));
 	ctl.keysize = sizeof(NameData);
 	ctl.entrysize = sizeof(RelNameCacheEnt);
-	RelationNameCache = hash_create(INITRELCACHESIZE, &ctl, HASH_ELEM);
+	RelationNameCache = hash_create("Relcache by name", INITRELCACHESIZE,
+									&ctl, HASH_ELEM);
 
 	ctl.keysize = sizeof(Oid);
 	ctl.entrysize = sizeof(RelIdCacheEnt);
 	ctl.hash = tag_hash;
-	RelationIdCache = hash_create(INITRELCACHESIZE, &ctl,
-								  HASH_ELEM | HASH_FUNCTION);
+	RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE,
+								  &ctl, HASH_ELEM | HASH_FUNCTION);
 
 	ctl.keysize = sizeof(RelFileNode);
 	ctl.entrysize = sizeof(RelNodeCacheEnt);
 	ctl.hash = tag_hash;
-	RelationNodeCache = hash_create(INITRELCACHESIZE, &ctl,
-									HASH_ELEM | HASH_FUNCTION);
+	RelationNodeCache = hash_create("Relcache by rnode", INITRELCACHESIZE,
+									&ctl, HASH_ELEM | HASH_FUNCTION);
 
 	/*
 	 * initialize the cache with pre-made relation descriptors for some of
@@ -2187,19 +2166,21 @@ CreateDummyCaches(void)
 	MemSet(&ctl, 0, sizeof(ctl));
 	ctl.keysize = sizeof(NameData);
 	ctl.entrysize = sizeof(RelNameCacheEnt);
-	RelationNameCache = hash_create(INITRELCACHESIZE, &ctl, HASH_ELEM);
+	RelationNameCache = hash_create("Relcache by name", INITRELCACHESIZE,
+									&ctl, HASH_ELEM);
 
 	ctl.keysize = sizeof(Oid);
 	ctl.entrysize = sizeof(RelIdCacheEnt);
 	ctl.hash = tag_hash;
-	RelationIdCache = hash_create(INITRELCACHESIZE, &ctl,
-								  HASH_ELEM | HASH_FUNCTION);
+	RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE,
+								  &ctl, HASH_ELEM | HASH_FUNCTION);
 
 	ctl.keysize = sizeof(RelFileNode);
 	ctl.entrysize = sizeof(RelNodeCacheEnt);
 	ctl.hash = tag_hash;
-	RelationNodeCache = hash_create(INITRELCACHESIZE, &ctl,
-									HASH_ELEM | HASH_FUNCTION);
+	RelationNodeCache = hash_create("Relcache by rnode", INITRELCACHESIZE,
+									&ctl, HASH_ELEM | HASH_FUNCTION);
+
 	MemoryContextSwitchTo(oldcxt);
 }
 
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 92e775bfe86e334e894e3d868bde088adf70d678..2d5a79551edfee4ceadeca3f6e19da03da0b9075 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.37 2001/10/01 05:36:16 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.38 2001/10/05 17:28:13 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -71,6 +71,7 @@ static bool	dir_realloc(HTAB *hashp);
 static bool	expand_table(HTAB *hashp);
 static bool	hdefault(HTAB *hashp);
 static bool	init_htab(HTAB *hashp, long nelem);
+static void hash_corrupted(HTAB *hashp);
 
 
 /*
@@ -100,7 +101,7 @@ static long hash_accesses,
 /************************** CREATE ROUTINES **********************/
 
 HTAB *
-hash_create(long nelem, HASHCTL *info, int flags)
+hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
 {
 	HTAB	   *hashp;
 	HASHHDR	   *hctl;
@@ -125,6 +126,9 @@ hash_create(long nelem, HASHCTL *info, int flags)
 		return NULL;
 	MemSet(hashp, 0, sizeof(HTAB));
 
+	hashp->tabname = (char *) MEM_ALLOC(strlen(tabname) + 1);
+	strcpy(hashp->tabname, tabname);
+
 	if (flags & HASH_FUNCTION)
 		hashp->hash = info->hash;
 	else
@@ -140,6 +144,7 @@ hash_create(long nelem, HASHCTL *info, int flags)
 		hashp->dir = info->dir;
 		hashp->alloc = info->alloc;
 		hashp->hcxt = NULL;
+		hashp->isshared = true;
 
 		/* hash table already exists, we're just attaching to it */
 		if (flags & HASH_ATTACH)
@@ -152,6 +157,7 @@ hash_create(long nelem, HASHCTL *info, int flags)
 		hashp->dir = NULL;
 		hashp->alloc = MEM_ALLOC;
 		hashp->hcxt = DynaHashCxt;
+		hashp->isshared = false;
 	}
 
 	if (!hashp->hctl)
@@ -434,12 +440,13 @@ hash_destroy(HTAB *hashp)
 		 * by the caller of hash_create()).
 		 */
 		MEM_FREE(hashp->hctl);
+		MEM_FREE(hashp->tabname);
 		MEM_FREE(hashp);
 	}
 }
 
 void
-hash_stats(char *where, HTAB *hashp)
+hash_stats(const char *where, HTAB *hashp)
 {
 #if HASH_STATISTICS
 
@@ -476,24 +483,37 @@ call_hash(HTAB *hashp, void *k)
 	return (uint32) bucket;
 }
 
-/*
+/*----------
  * hash_search -- look up key in table and perform action
  *
- * action is one of HASH_FIND/HASH_ENTER/HASH_REMOVE
+ * action is one of:
+ *		HASH_FIND: look up key in table
+ *		HASH_ENTER: look up key in table, creating entry if not present
+ *		HASH_REMOVE: look up key in table, remove entry if present
+ *		HASH_FIND_SAVE: look up key in table, also save in static var
+ *		HASH_REMOVE_SAVED: remove entry saved by HASH_FIND_SAVE
+ *
+ * Return value is a pointer to the element found/entered/removed if any,
+ * or NULL if no match was found.  (NB: in the case of the REMOVE actions,
+ * the result is a dangling pointer that shouldn't be dereferenced!)
+ * A NULL result for HASH_ENTER implies we ran out of memory.
+ *
+ * If foundPtr isn't NULL, then *foundPtr is set TRUE if we found an
+ * existing entry in the table, FALSE otherwise.  This is needed in the
+ * HASH_ENTER case, but is redundant with the return value otherwise.
  *
- * RETURNS: NULL if table is corrupted, a pointer to the element
- *		found/removed/entered if applicable, TRUE otherwise.
- *		foundPtr is TRUE if we found an element in the table
- *		(FALSE if we entered one).
+ * The HASH_FIND_SAVE/HASH_REMOVE_SAVED interface is a hack to save one
+ * table lookup in a find/process/remove scenario.  Note that no other
+ * addition or removal in the table can safely happen in between.
+ *----------
  */
 void *
 hash_search(HTAB *hashp,
 			void *keyPtr,
-			HASHACTION action,	/* HASH_FIND / HASH_ENTER / HASH_REMOVE
-								 * HASH_FIND_SAVE / HASH_REMOVE_SAVED */
+			HASHACTION action,
 			bool *foundPtr)
 {
-	HASHHDR	   *hctl;
+	HASHHDR	   *hctl = hashp->hctl;
 	uint32		bucket;
 	long		segment_num;
 	long		segment_ndx;
@@ -507,21 +527,14 @@ hash_search(HTAB *hashp,
 		HASHBUCKET *prevBucketPtr;
 	}			saveState;
 
-	Assert(hashp);
-	Assert(keyPtr);
-	Assert((action == HASH_FIND) ||
-		   (action == HASH_REMOVE) ||
-		   (action == HASH_ENTER) ||
-		   (action == HASH_FIND_SAVE) ||
-		   (action == HASH_REMOVE_SAVED));
-
-	hctl = hashp->hctl;
-
 #if HASH_STATISTICS
 	hash_accesses++;
-	hashp->hctl->accesses++;
+	hctl->accesses++;
 #endif
 
+	/*
+	 * Do the initial lookup (or recall result of prior lookup)
+	 */
 	if (action == HASH_REMOVE_SAVED)
 	{
 		currBucket = saveState.currBucket;
@@ -540,7 +553,8 @@ hash_search(HTAB *hashp,
 
 		segp = hashp->dir[segment_num];
 
-		Assert(segp);
+		if (segp == NULL)
+			hash_corrupted(hashp);
 
 		prevBucketPtr = &segp[segment_ndx];
 		currBucket = *prevBucketPtr;
@@ -556,23 +570,32 @@ hash_search(HTAB *hashp,
 			currBucket = *prevBucketPtr;
 #if HASH_STATISTICS
 			hash_collisions++;
-			hashp->hctl->collisions++;
+			hctl->collisions++;
 #endif
 		}
 	}
 
+	if (foundPtr)
+		*foundPtr = (bool) (currBucket != NULL);
+
 	/*
-	 * if we found an entry or if we weren't trying to insert, we're done
-	 * now.
+	 * OK, now what?
 	 */
-	*foundPtr = (bool) (currBucket != NULL);
-
 	switch (action)
 	{
-		case HASH_ENTER:
+		case HASH_FIND:
 			if (currBucket != NULL)
 				return (void *) ELEMENTKEY(currBucket);
-			break;
+			return NULL;
+
+		case HASH_FIND_SAVE:
+			if (currBucket != NULL)
+			{
+				saveState.currBucket = currBucket;
+				saveState.prevBucketPtr = prevBucketPtr;
+				return (void *) ELEMENTKEY(currBucket);
+			}
+			return NULL;
 
 		case HASH_REMOVE:
 		case HASH_REMOVE_SAVED:
@@ -595,78 +618,57 @@ hash_search(HTAB *hashp,
 				 */
 				return (void *) ELEMENTKEY(currBucket);
 			}
-			return (void *) TRUE;
+			return NULL;
 
-		case HASH_FIND:
+		case HASH_ENTER:
+			/* Return existing element if found, else create one */
 			if (currBucket != NULL)
 				return (void *) ELEMENTKEY(currBucket);
-			return (void *) TRUE;
 
-		case HASH_FIND_SAVE:
-			if (currBucket != NULL)
+			/* get the next free element */
+			currBucket = hctl->freeList;
+			if (currBucket == NULL)
 			{
-				saveState.currBucket = currBucket;
-				saveState.prevBucketPtr = prevBucketPtr;
-				return (void *) ELEMENTKEY(currBucket);
+				/* no free elements.  allocate another chunk of buckets */
+				if (!element_alloc(hashp))
+					return NULL; /* out of memory */
+				currBucket = hctl->freeList;
+				Assert(currBucket != NULL);
 			}
-			return (void *) TRUE;
-
-		default:
-			/* can't get here */
-			return NULL;
-	}
-
-	/*
-	 * If we got here, then we didn't find the element and we have to
-	 * insert it into the hash table
-	 */
-	Assert(currBucket == NULL);
 
-	/* get the next free bucket */
-	currBucket = hctl->freeList;
-	if (currBucket == NULL)
-	{
-		/* no free elements.  allocate another chunk of buckets */
-		if (!element_alloc(hashp))
-			return NULL;
-		currBucket = hctl->freeList;
-	}
-	Assert(currBucket != NULL);
-
-	hctl->freeList = currBucket->link;
+			hctl->freeList = currBucket->link;
 
-	/* link into chain */
-	*prevBucketPtr = currBucket;
-	currBucket->link = NULL;
+			/* link into hashbucket chain */
+			*prevBucketPtr = currBucket;
+			currBucket->link = NULL;
 
-	/* copy key into record */
-	memcpy(ELEMENTKEY(currBucket), keyPtr, hctl->keysize);
+			/* copy key into record */
+			memcpy(ELEMENTKEY(currBucket), keyPtr, hctl->keysize);
 
-	/*
-	 * let the caller initialize the data field after hash_search returns.
-	 */
+			/* caller is expected to fill the data field on return */
 
-	/*
-	 * Check if it is time to split the segment
-	 */
-	if (++hctl->nentries / (hctl->max_bucket + 1) > hctl->ffactor)
-	{
+			/* Check if it is time to split the segment */
+			if (++hctl->nentries / (hctl->max_bucket + 1) > hctl->ffactor)
+			{
+				/*
+				 * NOTE: failure to expand table is not a fatal error, it just
+				 * means we have to run at higher fill factor than we wanted.
+				 */
+				expand_table(hashp);
+			}
 
-		/*
-		 * NOTE: failure to expand table is not a fatal error, it just
-		 * means we have to run at higher fill factor than we wanted.
-		 */
-		expand_table(hashp);
+			return (void *) ELEMENTKEY(currBucket);
 	}
 
-	return (void *) ELEMENTKEY(currBucket);
+	elog(ERROR, "hash_search: bogus action %d", (int) action);
+
+	return NULL;				/* keep compiler quiet */
 }
 
 /*
  * hash_seq_init/_search
  *			Sequentially search through hash table and return
- *			all the elements one by one, return NULL on error and
- *			return (void *) TRUE in the end.
+ *			all the elements one by one, return NULL when no more.
  *
  * NOTE: caller may delete the returned element before continuing the scan.
  * However, deleting any other element while the scan is in progress is
@@ -717,8 +719,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
 		 */
 		segp = hashp->dir[segment_num];
 		if (segp == NULL)
-			/* this is probably an error */
-			return NULL;
+			hash_corrupted(hashp);
 
 		/*
 		 * now find the right index into the segment for the first item in
@@ -734,7 +735,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
 			++status->curBucket;
 	}
 
-	return (void *) TRUE;		/* out of buckets */
+	return NULL;				/* out of buckets */
 }
 
 
@@ -923,6 +924,20 @@ element_alloc(HTAB *hashp)
 	return true;
 }
 
+/* complain when we have detected a corrupted hashtable */
+static void
+hash_corrupted(HTAB *hashp)
+{
+	/*
+	 * If the corruption is in a shared hashtable, we'd better force a
+	 * systemwide restart.  Otherwise, just shut down this one backend.
+	 */
+	if (hashp->isshared)
+		elog(STOP, "Hash table '%s' corrupted", hashp->tabname);
+	else
+		elog(FATAL, "Hash table '%s' corrupted", hashp->tabname);
+}
+
 /* calculate ceil(log base 2) of num */
 int
 my_log2(long num)
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 7e1aac193632f3a9017a612a9794657f9610414e..bd48394b8c8feaf2a3af3274aa2f82a81f1532e9 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.42 2001/10/01 05:36:16 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.43 2001/10/05 17:28:13 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -33,7 +33,7 @@
 
 #include "postgres.h"
 
-#include "lib/hasht.h"
+#include "utils/hsearch.h"
 #include "utils/memutils.h"
 #include "utils/portal.h"
 
@@ -42,7 +42,7 @@
  * ----------------
  */
 
-#define MAX_PORTALNAME_LEN		64		/* XXX LONGALIGNable value */
+#define MAX_PORTALNAME_LEN		64
 
 typedef struct portalhashent
 {
@@ -54,15 +54,13 @@ static HTAB *PortalHashTable = NULL;
 
 #define PortalHashTableLookup(NAME, PORTAL) \
 do { \
-	PortalHashEnt *hentry; bool found; char key[MAX_PORTALNAME_LEN]; \
+	PortalHashEnt *hentry; char key[MAX_PORTALNAME_LEN]; \
 	\
 	MemSet(key, 0, MAX_PORTALNAME_LEN); \
 	snprintf(key, MAX_PORTALNAME_LEN - 1, "%s", NAME); \
 	hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
-										 key, HASH_FIND, &found); \
-	if (hentry == NULL) \
-		elog(FATAL, "error in PortalHashTable"); \
-	if (found) \
+										 key, HASH_FIND, NULL); \
+	if (hentry) \
 		PORTAL = hentry->portal; \
 	else \
 		PORTAL = NULL; \
@@ -77,7 +75,7 @@ do { \
 	hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
 										 key, HASH_ENTER, &found); \
 	if (hentry == NULL) \
-		elog(FATAL, "error in PortalHashTable"); \
+		elog(ERROR, "out of memory in PortalHashTable"); \
 	if (found) \
 		elog(NOTICE, "trying to insert a portal name that exists."); \
 	hentry->portal = PORTAL; \
@@ -85,15 +83,13 @@ do { \
 
 #define PortalHashTableDelete(PORTAL) \
 do { \
-	PortalHashEnt *hentry; bool found; char key[MAX_PORTALNAME_LEN]; \
+	PortalHashEnt *hentry; char key[MAX_PORTALNAME_LEN]; \
 	\
 	MemSet(key, 0, MAX_PORTALNAME_LEN); \
 	snprintf(key, MAX_PORTALNAME_LEN - 1, "%s", PORTAL->name); \
 	hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
-										 key, HASH_REMOVE, &found); \
+										 key, HASH_REMOVE, NULL); \
 	if (hentry == NULL) \
-		elog(FATAL, "error in PortalHashTable"); \
-	if (!found) \
 		elog(NOTICE, "trying to delete portal name that does not exist."); \
 } while(0)
 
@@ -129,7 +125,8 @@ EnablePortalManager(void)
 	 * use PORTALS_PER_USER, defined in utils/portal.h as a guess of how
 	 * many hash table entries to create, initially
 	 */
-	PortalHashTable = hash_create(PORTALS_PER_USER * 3, &ctl, HASH_ELEM);
+	PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
+								  &ctl, HASH_ELEM);
 }
 
 /*
@@ -234,15 +231,10 @@ CreatePortal(char *name)
  * Exceptions:
  *		BadState if called when disabled.
  *		BadArg if portal is invalid.
- *
- * Note peculiar calling convention: pass a pointer to a portal pointer.
- * This is mainly so that this routine can be used as a hashtable walker.
  */
 void
-PortalDrop(Portal *portalP)
+PortalDrop(Portal portal)
 {
-	Portal		portal = *portalP;
-
 	AssertArg(PortalIsValid(portal));
 
 	/* remove portal from hash table */
@@ -262,9 +254,23 @@ PortalDrop(Portal *portalP)
 
 /*
  * Destroy all portals created in the current transaction (ie, all of them).
+ *
+ * XXX This assumes that portals can be deleted in a random order, ie,
+ * no portal has a reference to any other (at least not one that will be
+ * exercised during deletion).  I think this is okay at the moment, but
+ * we've had bugs of that ilk in the past.  Keep a close eye on cursor
+ * references...
  */
 void
 AtEOXact_portals(void)
 {
-	HashTableWalk(PortalHashTable, (HashtFunc) PortalDrop, 0);
+	HASH_SEQ_STATUS status;
+	PortalHashEnt *hentry;
+
+	hash_seq_init(&status, PortalHashTable);
+
+	while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
+	{
+		PortalDrop(hentry->portal);
+	}
 }
diff --git a/src/include/lib/hasht.h b/src/include/lib/hasht.h
deleted file mode 100644
index 34aa89c61731c883a2e48cfa944a48c154cd25d8..0000000000000000000000000000000000000000
--- a/src/include/lib/hasht.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * hasht.h
- *	  hash table related functions that are not directly supported
- *	  under utils/hash.
- *
- *
- * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * $Id: hasht.h,v 1.12 2001/01/24 19:43:24 momjian Exp $
- *
- *-------------------------------------------------------------------------
- */
-#ifndef HASHT_H
-#define HASHT_H
-
-#include "utils/hsearch.h"
-
-typedef void (*HashtFunc) (void *hashitem, Datum arg);
-
-extern void HashTableWalk(HTAB *hashtable, HashtFunc function, Datum arg);
-
-#endif	 /* HASHT_H */
diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h
index a7ca140382dfea02970500af54c411d6973e7b67..b41cc2aa0d5351626883ccd3ea2bf8026a73be08 100644
--- a/src/include/storage/shmem.h
+++ b/src/include/storage/shmem.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: shmem.h,v 1.32 2001/10/01 05:36:17 tgl Exp $
+ * $Id: shmem.h,v 1.33 2001/10/05 17:28:13 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -66,9 +66,9 @@ extern void InitShmemAllocation(void *seghdr);
 extern void *ShmemAlloc(Size size);
 extern bool ShmemIsValid(unsigned long addr);
 extern void InitShmemIndex(void);
-extern HTAB *ShmemInitHash(char *name, long init_size, long max_size,
+extern HTAB *ShmemInitHash(const char *name, long init_size, long max_size,
 			  HASHCTL *infoP, int hash_flags);
-extern void *ShmemInitStruct(char *name, Size size, bool *foundPtr);
+extern void *ShmemInitStruct(const char *name, Size size, bool *foundPtr);
 
 
 /* size constants for the shmem index table */
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index 5b65e3ee23e5eca1691369edbda5631a3600d7ab..79f3296f123d21704f801abcf1ede12bccbdf0af 100644
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: hsearch.h,v 1.21 2001/10/01 05:36:17 tgl Exp $
+ * $Id: hsearch.h,v 1.22 2001/10/05 17:28:13 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -42,7 +42,7 @@
 /*
  * HASHELEMENT is the private part of a hashtable entry.  The caller's data
  * follows the HASHELEMENT structure (on a MAXALIGN'd boundary).  The hash key
- * is expected to be at the start of the caller's hash entry structure.
+ * is expected to be at the start of the caller's hash entry data structure.
  */
 typedef struct HASHELEMENT
 {
@@ -85,10 +85,12 @@ typedef struct HASHHDR
 typedef struct HTAB
 {
 	HASHHDR	   *hctl;			/* shared control information */
-	long		(*hash) (void *key, int keysize); /* Hash Function */
 	HASHSEGMENT *dir;			/* directory of segment starts */
+	long		(*hash) (void *key, int keysize); /* Hash Function */
 	void	   *(*alloc) (Size);/* memory allocator */
 	MemoryContext hcxt;			/* memory context if default allocator used */
+	char	   *tabname;		/* table name (for error messages) */
+	bool		isshared;		/* true if table is in shared memory */
 } HTAB;
 
 /* Parameter data structure for hash_create */
@@ -147,9 +149,10 @@ typedef struct
 /*
  * prototypes for functions in dynahash.c
  */
-extern HTAB *hash_create(long nelem, HASHCTL *info, int flags);
+extern HTAB *hash_create(const char *tabname, long nelem,
+						 HASHCTL *info, int flags);
 extern void hash_destroy(HTAB *hashp);
-extern void hash_stats(char *where, HTAB *hashp);
+extern void hash_stats(const char *where, HTAB *hashp);
 extern void *hash_search(HTAB *hashp, void *keyPtr, HASHACTION action,
 			bool *foundPtr);
 extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp);
diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h
index 6a17ec867044a0c4edfee5e8a10441154fa67bae..df3581c6537306d053e5d18e2f72eced092e6ae0 100644
--- a/src/include/utils/portal.h
+++ b/src/include/utils/portal.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: portal.h,v 1.28 2001/05/21 14:22:18 wieck Exp $
+ * $Id: portal.h,v 1.29 2001/10/05 17:28:13 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -61,7 +61,7 @@ typedef struct PortalData
 extern void EnablePortalManager(void);
 extern void AtEOXact_portals(void);
 extern Portal CreatePortal(char *name);
-extern void PortalDrop(Portal *portalP);
+extern void PortalDrop(Portal portal);
 extern Portal GetPortalByName(char *name);
 extern void PortalSetQuery(Portal portal, QueryDesc *queryDesc,
 			   TupleDesc attinfo, EState *state,