From ec7aa4b51545e0c1c69acc0cf135d9c229f61d11 Mon Sep 17 00:00:00 2001
From: Tom Lane <tgl@sss.pgh.pa.us>
Date: Mon, 21 Jul 2003 20:29:40 +0000
Subject: [PATCH] Error message editing in backend/access.

---
 src/backend/access/common/heaptuple.c      |  36 +-
 src/backend/access/common/indextuple.c     |  15 +-
 src/backend/access/common/printtup.c       |   6 +-
 src/backend/access/common/tupdesc.c        |  22 +-
 src/backend/access/gist/gist.c             |  10 +-
 src/backend/access/hash/hash.c             |   4 +-
 src/backend/access/hash/hashinsert.c       |   6 +-
 src/backend/access/hash/hashovfl.c         |  32 +-
 src/backend/access/hash/hashpage.c         |  44 +-
 src/backend/access/hash/hashutil.c         |   6 +-
 src/backend/access/heap/heapam.c           | 142 +++---
 src/backend/access/heap/hio.c              |  17 +-
 src/backend/access/heap/tuptoaster.c       |   6 +-
 src/backend/access/index/genam.c           |   8 +-
 src/backend/access/index/indexam.c         |  26 +-
 src/backend/access/index/istrat.c          |  12 +-
 src/backend/access/nbtree/nbtinsert.c      |  47 +-
 src/backend/access/nbtree/nbtpage.c        |  58 ++-
 src/backend/access/nbtree/nbtree.c         |  13 +-
 src/backend/access/nbtree/nbtsearch.c      |  20 +-
 src/backend/access/nbtree/nbtsort.c        |  11 +-
 src/backend/access/nbtree/nbtutils.c       |  10 +-
 src/backend/access/rtree/rtree.c           |  29 +-
 src/backend/access/rtree/rtscan.c          |   4 +-
 src/backend/access/transam/xact.c          |  55 +-
 src/backend/access/transam/xlog.c          | 560 +++++++++++++--------
 src/include/utils/elog.h                   |   4 +-
 src/test/regress/expected/alter_table.out  |  12 +-
 src/test/regress/expected/arrays.out       |   2 +-
 src/test/regress/expected/create_index.out |   4 +-
 src/test/regress/expected/errors.out       |   4 +-
 src/test/regress/expected/plpgsql.out      |   4 +-
 src/test/regress/output/constraints.source |   6 +-
 33 files changed, 702 insertions(+), 533 deletions(-)

diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 470241e22ad..ae1df582b0e 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.83 2002/09/27 15:04:08 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.84 2003/07/21 20:29:37 tgl Exp $
  *
  * NOTES
  *	  The old interface functions have been converted to macros
@@ -173,13 +173,11 @@ heap_attisnull(HeapTuple tup, int attnum)
 			case MinCommandIdAttributeNumber:
 			case MaxTransactionIdAttributeNumber:
 			case MaxCommandIdAttributeNumber:
+				/* these are never null */
 				break;
 
-			case 0:
-				elog(ERROR, "heap_attisnull: zero attnum disallowed");
-
 			default:
-				elog(ERROR, "heap_attisnull: undefined negative attnum");
+				elog(ERROR, "invalid attnum: %d", attnum);
 		}
 
 	return 0;
@@ -457,7 +455,7 @@ heap_getsysattr(HeapTuple tup, int attnum, bool *isnull)
 			result = ObjectIdGetDatum(tup->t_tableOid);
 			break;
 		default:
-			elog(ERROR, "heap_getsysattr: invalid attnum %d", attnum);
+			elog(ERROR, "invalid attnum: %d", attnum);
 			result = 0;			/* keep compiler quiet */
 			break;
 	}
@@ -581,8 +579,10 @@ heap_formtuple(TupleDesc tupleDescriptor,
 	int			numberOfAttributes = tupleDescriptor->natts;
 
 	if (numberOfAttributes > MaxTupleAttributeNumber)
-		elog(ERROR, "heap_formtuple: numberOfAttributes %d exceeds limit %d",
-			 numberOfAttributes, MaxTupleAttributeNumber);
+		ereport(ERROR,
+				(errcode(ERRCODE_TOO_MANY_COLUMNS),
+				 errmsg("number of attributes %d exceeds limit, %d",
+						numberOfAttributes, MaxTupleAttributeNumber)));
 
 	for (i = 0; i < numberOfAttributes; i++)
 	{
@@ -666,14 +666,11 @@ heap_modifytuple(HeapTuple tuple,
 	 * allocate and fill *value and *nulls arrays from either the tuple or
 	 * the repl information, as appropriate.
 	 */
-	value = (Datum *) palloc(numberOfAttributes * sizeof *value);
-	nulls = (char *) palloc(numberOfAttributes * sizeof *nulls);
+	value = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
+	nulls = (char *) palloc(numberOfAttributes * sizeof(char));
 
-	for (attoff = 0;
-		 attoff < numberOfAttributes;
-		 attoff += 1)
+	for (attoff = 0; attoff < numberOfAttributes; attoff++)
 	{
-
 		if (repl[attoff] == ' ')
 		{
 			value[attoff] = heap_getattr(tuple,
@@ -683,13 +680,13 @@ heap_modifytuple(HeapTuple tuple,
 			nulls[attoff] = (isNull) ? 'n' : ' ';
 
 		}
-		else if (repl[attoff] != 'r')
-			elog(ERROR, "heap_modifytuple: repl is \\%3d", repl[attoff]);
-		else
-		{						/* == 'r' */
+		else if (repl[attoff] == 'r')
+		{
 			value[attoff] = replValue[attoff];
 			nulls[attoff] = replNull[attoff];
 		}
+		else
+			elog(ERROR, "unrecognized replace flag: %d", (int) repl[attoff]);
 	}
 
 	/*
@@ -699,6 +696,9 @@ heap_modifytuple(HeapTuple tuple,
 							  value,
 							  nulls);
 
+	pfree(value);
+	pfree(nulls);
+
 	/*
 	 * copy the identification info of the old tuple: t_ctid, t_self, and
 	 * OID (if any)
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index e6518922cb9..abf25915ab5 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.64 2003/02/23 06:17:12 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.65 2003/07/21 20:29:37 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -52,8 +52,10 @@ index_formtuple(TupleDesc tupleDescriptor,
 #endif
 
 	if (numberOfAttributes > INDEX_MAX_KEYS)
-		elog(ERROR, "index_formtuple: numberOfAttributes %d > %d",
-			 numberOfAttributes, INDEX_MAX_KEYS);
+		ereport(ERROR,
+				(errcode(ERRCODE_TOO_MANY_COLUMNS),
+				 errmsg("number of index attributes %d exceeds limit, %d",
+						numberOfAttributes, INDEX_MAX_KEYS)));
 
 #ifdef TOAST_INDEX_HACK
 	for (i = 0; i < numberOfAttributes; i++)
@@ -158,8 +160,11 @@ index_formtuple(TupleDesc tupleDescriptor,
 	 * it in t_info.
 	 */
 	if ((size & INDEX_SIZE_MASK) != size)
-		elog(ERROR, "index_formtuple: data takes %lu bytes, max is %d",
-			 (unsigned long) size, INDEX_SIZE_MASK);
+		ereport(ERROR,
+				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+				 errmsg("index tuple requires %lu bytes, maximum size is %lu",
+						(unsigned long) size,
+						(unsigned long) INDEX_SIZE_MASK)));
 
 	infomask |= size;
 
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 29ed33c9f61..61ecdcd7e50 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.74 2003/05/26 17:51:38 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.75 2003/07/21 20:29:38 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -273,7 +273,9 @@ printtup_prepare_info(DR_printtup *myState, TupleDesc typeinfo, int numAttrs)
 			fmgr_info(thisState->typsend, &thisState->finfo);
 		}
 		else
-			elog(ERROR, "Unsupported format code %d", format);
+			ereport(ERROR,
+					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+					 errmsg("unsupported format code: %d", format)));
 	}
 }
 
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index 3bd15bd1fb3..7ec20cc4209 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.95 2003/06/15 17:59:10 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.96 2003/07/21 20:29:38 tgl Exp $
  *
  * NOTES
  *	  some of the executor utility code such as "ExecTypeFromTL" should be
@@ -417,7 +417,7 @@ TupleDescInitEntry(TupleDesc desc,
 						   ObjectIdGetDatum(oidtypeid),
 						   0, 0, 0);
 	if (!HeapTupleIsValid(tuple))
-		elog(ERROR, "Unable to look up type id %u", oidtypeid);
+		elog(ERROR, "cache lookup failed for type %u", oidtypeid);
 
 	/*
 	 * type info exists so we initialize our attribute information from
@@ -643,7 +643,7 @@ TypeGetTupleDesc(Oid typeoid, List *colaliases)
 		int			natts;
 
 		if (!OidIsValid(relid))
-			elog(ERROR, "Invalid typrelid for complex type %u", typeoid);
+			elog(ERROR, "invalid typrelid for complex type %u", typeoid);
 
 		rel = relation_open(relid, AccessShareLock);
 		tupdesc = CreateTupleDescCopy(RelationGetDescr(rel));
@@ -657,7 +657,9 @@ TypeGetTupleDesc(Oid typeoid, List *colaliases)
 
 			/* does the list length match the number of attributes? */
 			if (length(colaliases) != natts)
-				elog(ERROR, "TypeGetTupleDesc: number of aliases does not match number of attributes");
+				ereport(ERROR,
+						(errcode(ERRCODE_DATATYPE_MISMATCH),
+						 errmsg("number of aliases does not match number of attributes")));
 
 			/* OK, use the aliases instead */
 			for (varattno = 0; varattno < natts; varattno++)
@@ -676,11 +678,15 @@ TypeGetTupleDesc(Oid typeoid, List *colaliases)
 
 		/* the alias list is required for base types */
 		if (colaliases == NIL)
-			elog(ERROR, "TypeGetTupleDesc: no column alias was provided");
+			ereport(ERROR,
+					(errcode(ERRCODE_DATATYPE_MISMATCH),
+					 errmsg("no column alias was provided")));
 
 		/* the alias list length must be 1 */
 		if (length(colaliases) != 1)
-			elog(ERROR, "TypeGetTupleDesc: number of aliases does not match number of attributes");
+			ereport(ERROR,
+					(errcode(ERRCODE_DATATYPE_MISMATCH),
+					 errmsg("number of aliases does not match number of attributes")));
 
 		/* OK, get the column alias */
 		attname = strVal(lfirst(colaliases));
@@ -695,7 +701,9 @@ TypeGetTupleDesc(Oid typeoid, List *colaliases)
 						   false);
 	}
 	else if (functyptype == 'p' && typeoid == RECORDOID)
-		elog(ERROR, "Unable to determine tuple description for function returning \"record\"");
+		ereport(ERROR,
+				(errcode(ERRCODE_DATATYPE_MISMATCH),
+				 errmsg("unable to determine tuple description for function returning record")));
 	else
 	{
 		/* crummy error message, but parser should have caught this */
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index bf23ba34287..86bdfd08aac 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.103 2003/05/27 17:49:45 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.104 2003/07/21 20:29:38 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -168,7 +168,7 @@ gistbuild(PG_FUNCTION_ARGS)
 	 * that's not the case, big trouble's what we have.
 	 */
 	if (RelationGetNumberOfBlocks(index) != 0)
-		elog(ERROR, "%s already contains data",
+		elog(ERROR, "index \"%s\" already contains data",
 			 RelationGetRelationName(index));
 
 	/* initialize the root page */
@@ -396,7 +396,7 @@ gistPageAddItem(GISTSTATE *giststate,
 	retval = PageAddItem(page, (Item) *newtup, IndexTupleSize(*newtup),
 						 offsetNumber, flags);
 	if (retval == InvalidOffsetNumber)
-		elog(ERROR, "gist: failed to add index item to %s",
+		elog(ERROR, "failed to add index item to \"%s\"",
 			 RelationGetRelationName(r));
 	/* be tidy */
 	if (DatumGetPointer(tmpcentry.key) != NULL &&
@@ -603,7 +603,7 @@ gistwritebuffer(Relation r, Page page, IndexTuple *itup,
 		l = PageAddItem(page, (Item) itup[i], IndexTupleSize(itup[i]),
 						off, LP_USED);
 		if (l == InvalidOffsetNumber)
-			elog(ERROR, "gist: failed to add index item to %s",
+			elog(ERROR, "failed to add index item to \"%s\"",
 				 RelationGetRelationName(r));
 #endif
 	}
@@ -1663,7 +1663,7 @@ initGISTstate(GISTSTATE *giststate, Relation index)
 	int			i;
 
 	if (index->rd_att->natts > INDEX_MAX_KEYS)
-		elog(ERROR, "initGISTstate: numberOfAttributes %d > %d",
+		elog(ERROR, "numberOfAttributes %d > %d",
 			 index->rd_att->natts, INDEX_MAX_KEYS);
 
 	giststate->tupdesc = index->rd_att;
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index f3534d2e174..6578c502950 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.63 2003/03/23 23:01:03 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.64 2003/07/21 20:29:38 tgl Exp $
  *
  * NOTES
  *	  This file contains only the public interface routines.
@@ -69,7 +69,7 @@ hashbuild(PG_FUNCTION_ARGS)
 	 * that's not the case, big trouble's what we have.
 	 */
 	if (RelationGetNumberOfBlocks(index) != 0)
-		elog(ERROR, "%s already contains data",
+		elog(ERROR, "index \"%s\" already contains data",
 			 RelationGetRelationName(index));
 
 	/* initialize the hash index metadata page */
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 191182165a1..fd27e7a17ba 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.25 2002/06/20 20:29:24 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.26 2003/07/21 20:29:38 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -48,7 +48,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
 	/* we need a scan key to do our search, so build one */
 	itup = &(hitem->hash_itup);
 	if ((natts = rel->rd_rel->relnatts) != 1)
-		elog(ERROR, "Hash indices valid for only one index key.");
+		elog(ERROR, "Hash indexes support only one index key");
 	itup_scankey = _hash_mkscankey(rel, itup);
 
 	/*
@@ -228,7 +228,7 @@ _hash_pgaddtup(Relation rel,
 	itup_off = OffsetNumberNext(PageGetMaxOffsetNumber(page));
 	if (PageAddItem(page, (Item) hitem, itemsize, itup_off, LP_USED)
 		== InvalidOffsetNumber)
-		elog(ERROR, "_hash_pgaddtup: failed to add index item to %s",
+		elog(ERROR, "failed to add index item to \"%s\"",
 			 RelationGetRelationName(rel));
 
 	/* write the buffer, but hold our lock */
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 1e2df0aee08..ed9459feb90 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.34 2003/03/10 22:28:18 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.35 2003/07/21 20:29:38 tgl Exp $
  *
  * NOTES
  *	  Overflow pages look like ordinary relation pages.
@@ -58,7 +58,7 @@ _hash_addovflpage(Relation rel, Buffer *metabufp, Buffer buf)
 	/* allocate an empty overflow page */
 	oaddr = _hash_getovfladdr(rel, metabufp);
 	if (oaddr == InvalidOvflAddress)
-		elog(ERROR, "_hash_addovflpage: problem with _hash_getovfladdr.");
+		elog(ERROR, "_hash_getovfladdr failed");
 	ovflblkno = OADDR_TO_BLKNO(OADDR_OF(SPLITNUM(oaddr), OPAGENUM(oaddr)));
 	Assert(BlockNumberIsValid(ovflblkno));
 	ovflbuf = _hash_getbuf(rel, ovflblkno, HASH_WRITE);
@@ -158,12 +158,13 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
 	offset = metap->hashm_spares[splitnum] -
 		(splitnum ? metap->hashm_spares[splitnum - 1] : 0);
 
-#define OVMSG	"HASH: Out of overflow pages.  Out of luck.\n"
-
 	if (offset > SPLITMASK)
 	{
 		if (++splitnum >= NCACHED)
-			elog(ERROR, OVMSG);
+			ereport(ERROR,
+					(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+					 errmsg("out of overflow pages in hash index \"%s\"",
+							RelationGetRelationName(rel))));
 		metap->hashm_ovflpoint = splitnum;
 		metap->hashm_spares[splitnum] = metap->hashm_spares[splitnum - 1];
 		metap->hashm_spares[splitnum - 1]--;
@@ -179,7 +180,10 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
 
 		free_page++;
 		if (free_page >= NCACHED)
-			elog(ERROR, OVMSG);
+			ereport(ERROR,
+					(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+					 errmsg("out of overflow pages in hash index \"%s\"",
+							RelationGetRelationName(rel))));
 
 		/*
 		 * This is tricky.	The 1 indicates that you want the new page
@@ -193,13 +197,16 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
 		 */
 		if (_hash_initbitmap(rel, metap, OADDR_OF(splitnum, offset),
 							 1, free_page))
-			elog(ERROR, "overflow_page: problem with _hash_initbitmap.");
+			elog(ERROR, "_hash_initbitmap failed");
 		metap->hashm_spares[splitnum]++;
 		offset++;
 		if (offset > SPLITMASK)
 		{
 			if (++splitnum >= NCACHED)
-				elog(ERROR, OVMSG);
+				ereport(ERROR,
+						(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+						 errmsg("out of overflow pages in hash index \"%s\"",
+								RelationGetRelationName(rel))));
 			metap->hashm_ovflpoint = splitnum;
 			metap->hashm_spares[splitnum] = metap->hashm_spares[splitnum - 1];
 			metap->hashm_spares[splitnum - 1]--;
@@ -242,7 +249,10 @@ found:
 		;
 	offset = (i ? bit - metap->hashm_spares[i - 1] : bit);
 	if (offset >= SPLITMASK)
-		elog(ERROR, OVMSG);
+		ereport(ERROR,
+				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+				 errmsg("out of overflow pages in hash index \"%s\"",
+						RelationGetRelationName(rel))));
 
 	/* initialize this page */
 	oaddr = OADDR_OF(i, offset);
@@ -479,8 +489,6 @@ _hash_squeezebucket(Relation rel,
 	HashItem	hitem;
 	Size		itemsz;
 
-/*	  elog(DEBUG, "_hash_squeezebucket: squeezing bucket %d", bucket); */
-
 	/*
 	 * start squeezing into the base bucket page.
 	 */
@@ -565,7 +573,7 @@ _hash_squeezebucket(Relation rel,
 		woffnum = OffsetNumberNext(PageGetMaxOffsetNumber(wpage));
 		if (PageAddItem(wpage, (Item) hitem, itemsz, woffnum, LP_USED)
 			== InvalidOffsetNumber)
-			elog(ERROR, "_hash_squeezebucket: failed to add index item to %s",
+			elog(ERROR, "failed to add index item to \"%s\"",
 				 RelationGetRelationName(rel));
 
 		/*
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 4b3aea844c6..458542359f6 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.36 2002/06/20 20:29:24 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.37 2003/07/21 20:29:38 tgl Exp $
  *
  * NOTES
  *	  Postgres hash pages look like ordinary relation pages.  The opaque
@@ -90,7 +90,7 @@ _hash_metapinit(Relation rel)
 		LockRelation(rel, AccessExclusiveLock);
 
 	if (RelationGetNumberOfBlocks(rel) != 0)
-		elog(ERROR, "Cannot initialize non-empty hash table %s",
+		elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
 			 RelationGetRelationName(rel));
 
 	metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE);
@@ -148,7 +148,7 @@ _hash_metapinit(Relation rel)
 	 * created the first two buckets above.
 	 */
 	if (_hash_initbitmap(rel, metap, OADDR_OF(lg2nelem, 1), lg2nelem + 1, 0))
-		elog(ERROR, "Problem with _hash_initbitmap.");
+		elog(ERROR, "_hash_initbitmap failed");
 
 	/* all done */
 	_hash_wrtnorelbuf(metabuf);
@@ -193,7 +193,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access)
 	Buffer		buf;
 
 	if (blkno == P_NEW)
-		elog(ERROR, "_hash_getbuf: internal error: hash AM does not use P_NEW");
+		elog(ERROR, "hash AM does not use P_NEW");
 	switch (access)
 	{
 		case HASH_WRITE:
@@ -201,8 +201,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access)
 			_hash_setpagelock(rel, blkno, access);
 			break;
 		default:
-			elog(ERROR, "_hash_getbuf: invalid access (%d) on new blk: %s",
-				 access, RelationGetRelationName(rel));
+			elog(ERROR, "unrecognized hash access code: %d", access);
 			break;
 	}
 	buf = ReadBuffer(rel, blkno);
@@ -228,8 +227,8 @@ _hash_relbuf(Relation rel, Buffer buf, int access)
 			_hash_unsetpagelock(rel, blkno, access);
 			break;
 		default:
-			elog(ERROR, "_hash_relbuf: invalid access (%d) on blk %x: %s",
-				 access, blkno, RelationGetRelationName(rel));
+			elog(ERROR, "unrecognized hash access code: %d", access);
+			break;
 	}
 
 	ReleaseBuffer(buf);
@@ -287,8 +286,7 @@ _hash_chgbufaccess(Relation rel,
 			_hash_relbuf(rel, *bufp, from_access);
 			break;
 		default:
-			elog(ERROR, "_hash_chgbufaccess: invalid access (%d) on blk %x: %s",
-				 from_access, blkno, RelationGetRelationName(rel));
+			elog(ERROR, "unrecognized hash access code: %d", from_access);
 			break;
 	}
 	*bufp = _hash_getbuf(rel, blkno, to_access);
@@ -322,8 +320,7 @@ _hash_setpagelock(Relation rel,
 				LockPage(rel, blkno, ShareLock);
 				break;
 			default:
-				elog(ERROR, "_hash_setpagelock: invalid access (%d) on blk %x: %s",
-					 access, blkno, RelationGetRelationName(rel));
+				elog(ERROR, "unrecognized hash access code: %d", access);
 				break;
 		}
 	}
@@ -346,8 +343,7 @@ _hash_unsetpagelock(Relation rel,
 				UnlockPage(rel, blkno, ShareLock);
 				break;
 			default:
-				elog(ERROR, "_hash_unsetpagelock: invalid access (%d) on blk %x: %s",
-					 access, blkno, RelationGetRelationName(rel));
+				elog(ERROR, "unrecognized hash access code: %d", access);
 				break;
 		}
 	}
@@ -409,8 +405,6 @@ _hash_expandtable(Relation rel, Buffer metabuf)
 	Bucket		new_bucket;
 	uint32		spare_ndx;
 
-/*	  elog(DEBUG, "_hash_expandtable: expanding..."); */
-
 	metap = (HashMetaPage) BufferGetPage(metabuf);
 	_hash_checkpage((Page) metap, LH_META_PAGE);
 
@@ -483,9 +477,6 @@ _hash_splitpage(Relation rel,
 	Page		npage;
 	TupleDesc	itupdesc;
 
-/*	  elog(DEBUG, "_hash_splitpage: splitting %d into %d,%d",
-		 obucket, obucket, nbucket);
-*/
 	metap = (HashMetaPage) BufferGetPage(metabuf);
 	_hash_checkpage((Page) metap, LH_META_PAGE);
 
@@ -534,7 +525,7 @@ _hash_splitpage(Relation rel,
 		opage = BufferGetPage(obuf);
 		_hash_checkpage(opage, LH_OVERFLOW_PAGE);
 		if (PageIsEmpty(opage))
-			elog(ERROR, "_hash_splitpage: empty overflow page %d", oblkno);
+			elog(ERROR, "empty hash overflow page %u", oblkno);
 		oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
 	}
 
@@ -569,13 +560,9 @@ _hash_splitpage(Relation rel,
 				opage = BufferGetPage(obuf);
 				_hash_checkpage(opage, LH_OVERFLOW_PAGE);
 				oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
-
 				/* we're guaranteed that an ovfl page has at least 1 tuple */
 				if (PageIsEmpty(opage))
-				{
-					elog(ERROR, "_hash_splitpage: empty ovfl page %d!",
-						 oblkno);
-				}
+					elog(ERROR, "empty hash overflow page %u", oblkno);
 				ooffnum = FirstOffsetNumber;
 				omaxoffnum = PageGetMaxOffsetNumber(opage);
 			}
@@ -626,7 +613,7 @@ _hash_splitpage(Relation rel,
 			noffnum = OffsetNumberNext(PageGetMaxOffsetNumber(npage));
 			if (PageAddItem(npage, (Item) hitem, itemsz, noffnum, LP_USED)
 				== InvalidOffsetNumber)
-				elog(ERROR, "_hash_splitpage: failed to add index item to %s",
+				elog(ERROR, "failed to add index item to \"%s\"",
 					 RelationGetRelationName(rel));
 			_hash_wrtnorelbuf(nbuf);
 
@@ -670,10 +657,7 @@ _hash_splitpage(Relation rel,
 				oblkno = BufferGetBlockNumber(obuf);
 				oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
 				if (PageIsEmpty(opage))
-				{
-					elog(ERROR, "_hash_splitpage: empty overflow page %d",
-						 oblkno);
-				}
+					elog(ERROR, "empty hash overflow page %u", oblkno);
 				ooffnum = FirstOffsetNumber;
 				omaxoffnum = PageGetMaxOffsetNumber(opage);
 			}
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 224f28e6d55..5cb2fa1fa4d 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/hash/hashutil.c,v 1.31 2002/07/02 06:18:57 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/hash/hashutil.c,v 1.32 2003/07/21 20:29:38 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -78,7 +78,9 @@ _hash_formitem(IndexTuple itup)
 
 	/* disallow nulls in hash keys */
 	if (IndexTupleHasNulls(itup))
-		elog(ERROR, "hash indices cannot include null keys");
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("hash indexes cannot include null keys")));
 
 	/* make a copy of the index tuple with room for the sequence number */
 	tuplen = IndexTupleSize(itup);
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 7ef0bf2d558..a0d191f8a9d 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.151 2003/02/23 20:32:11 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.152 2003/07/21 20:29:38 tgl Exp $
  *
  *
  * INTERFACE ROUTINES
@@ -134,19 +134,16 @@ heapgettup(Relation relation,
 	 */
 #ifdef	HEAPDEBUGALL
 	if (ItemPointerIsValid(tid))
-	{
-		elog(LOG, "heapgettup(%s, tid=0x%x[%d,%d], dir=%d, ...)",
+		elog(DEBUG2, "heapgettup(%s, tid=0x%x[%d,%d], dir=%d, ...)",
 			 RelationGetRelationName(relation), tid, tid->ip_blkid,
 			 tid->ip_posid, dir);
-	}
 	else
-	{
-		elog(LOG, "heapgettup(%s, tid=0x%x, dir=%d, ...)",
+		elog(DEBUG2, "heapgettup(%s, tid=0x%x, dir=%d, ...)",
 			 RelationGetRelationName(relation), tid, dir);
-	}
-	elog(LOG, "heapgettup(..., b=0x%x, nkeys=%d, key=0x%x", buffer, nkeys, key);
 
-	elog(LOG, "heapgettup: relation(%c)=`%s', %p",
+	elog(DEBUG2, "heapgettup(..., b=0x%x, nkeys=%d, key=0x%x", buffer, nkeys, key);
+
+	elog(DEBUG2, "heapgettup: relation(%c)=`%s', %p",
 		 relation->rd_rel->relkind, RelationGetRelationName(relation),
 		 snapshot);
 #endif   /* !defined(HEAPLOGALL) */
@@ -194,7 +191,7 @@ heapgettup(Relation relation,
 									   relation,
 									   ItemPointerGetBlockNumber(tid));
 		if (!BufferIsValid(*buffer))
-			elog(ERROR, "heapgettup: failed ReadBuffer");
+			elog(ERROR, "ReadBuffer failed");
 
 		LockBuffer(*buffer, BUFFER_LOCK_SHARE);
 
@@ -229,7 +226,7 @@ heapgettup(Relation relation,
 									   relation,
 									   page);
 		if (!BufferIsValid(*buffer))
-			elog(ERROR, "heapgettup: failed ReadBuffer");
+			elog(ERROR, "ReadBuffer failed");
 
 		LockBuffer(*buffer, BUFFER_LOCK_SHARE);
 
@@ -269,7 +266,7 @@ heapgettup(Relation relation,
 									   relation,
 									   page);
 		if (!BufferIsValid(*buffer))
-			elog(ERROR, "heapgettup: failed ReadBuffer");
+			elog(ERROR, "ReadBuffer failed");
 
 		LockBuffer(*buffer, BUFFER_LOCK_SHARE);
 
@@ -363,7 +360,7 @@ heapgettup(Relation relation,
 									   relation,
 									   page);
 		if (!BufferIsValid(*buffer))
-			elog(ERROR, "heapgettup: failed ReadBuffer");
+			elog(ERROR, "ReadBuffer failed");
 
 		LockBuffer(*buffer, BUFFER_LOCK_SHARE);
 		dp = (Page) BufferGetPage(*buffer);
@@ -459,7 +456,7 @@ relation_open(Oid relationId, LOCKMODE lockmode)
 	r = RelationIdGetRelation(relationId);
 
 	if (!RelationIsValid(r))
-		elog(ERROR, "Relation %u does not exist", relationId);
+		elog(ERROR, "could not open relation with OID %u", relationId);
 
 	if (lockmode != NoLock)
 		LockRelation(r, lockmode);
@@ -532,7 +529,7 @@ relation_openr(const char *sysRelationName, LOCKMODE lockmode)
 	r = RelationSysNameGetRelation(sysRelationName);
 
 	if (!RelationIsValid(r))
-		elog(ERROR, "Relation \"%s\" does not exist", sysRelationName);
+		elog(ERROR, "could not open relation \"%s\"", sysRelationName);
 
 	if (lockmode != NoLock)
 		LockRelation(r, lockmode);
@@ -578,14 +575,20 @@ heap_open(Oid relationId, LOCKMODE lockmode)
 	r = relation_open(relationId, lockmode);
 
 	if (r->rd_rel->relkind == RELKIND_INDEX)
-		elog(ERROR, "%s is an index relation",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is an index relation",
+						RelationGetRelationName(r))));
 	else if (r->rd_rel->relkind == RELKIND_SPECIAL)
-		elog(ERROR, "%s is a special relation",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is a special relation",
+						RelationGetRelationName(r))));
 	else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
-		elog(ERROR, "%s is a composite type",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is a composite type",
+						RelationGetRelationName(r))));
 
 	pgstat_initstats(&r->pgstat_info, r);
 
@@ -607,14 +610,20 @@ heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
 	r = relation_openrv(relation, lockmode);
 
 	if (r->rd_rel->relkind == RELKIND_INDEX)
-		elog(ERROR, "%s is an index relation",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is an index relation",
+						RelationGetRelationName(r))));
 	else if (r->rd_rel->relkind == RELKIND_SPECIAL)
-		elog(ERROR, "%s is a special relation",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is a special relation",
+						RelationGetRelationName(r))));
 	else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
-		elog(ERROR, "%s is a composite type",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is a composite type",
+						RelationGetRelationName(r))));
 
 	pgstat_initstats(&r->pgstat_info, r);
 
@@ -636,14 +645,20 @@ heap_openr(const char *sysRelationName, LOCKMODE lockmode)
 	r = relation_openr(sysRelationName, lockmode);
 
 	if (r->rd_rel->relkind == RELKIND_INDEX)
-		elog(ERROR, "%s is an index relation",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is an index relation",
+						RelationGetRelationName(r))));
 	else if (r->rd_rel->relkind == RELKIND_SPECIAL)
-		elog(ERROR, "%s is a special relation",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is a special relation",
+						RelationGetRelationName(r))));
 	else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
-		elog(ERROR, "%s is a composite type",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is a composite type",
+						RelationGetRelationName(r))));
 
 	pgstat_initstats(&r->pgstat_info, r);
 
@@ -661,12 +676,6 @@ heap_beginscan(Relation relation, Snapshot snapshot,
 {
 	HeapScanDesc scan;
 
-	/*
-	 * sanity checks
-	 */
-	if (!RelationIsValid(relation))
-		elog(ERROR, "heap_beginscan: !RelationIsValid(relation)");
-
 	/*
 	 * increment relation ref count while scanning relation
 	 *
@@ -767,14 +776,12 @@ heap_endscan(HeapScanDesc scan)
 
 #ifdef HEAPDEBUGALL
 #define HEAPDEBUG_1 \
-	elog(LOG, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
+	elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
 		 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
-
 #define HEAPDEBUG_2 \
-	 elog(LOG, "heap_getnext returning EOS")
-
+	elog(DEBUG2, "heap_getnext returning EOS")
 #define HEAPDEBUG_3 \
-	 elog(LOG, "heap_getnext returning tuple")
+	elog(DEBUG2, "heap_getnext returning tuple")
 #else
 #define HEAPDEBUG_1
 #define HEAPDEBUG_2
@@ -787,12 +794,6 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction)
 {
 	/* Note: no locking manipulations needed */
 
-	/*
-	 * argument checks
-	 */
-	if (scan == NULL)
-		elog(ERROR, "heap_getnext: NULL relscan");
-
 	HEAPDEBUG_1;				/* heap_getnext( info ) */
 
 	/*
@@ -847,7 +848,7 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction)
  * the tuple); when keep_buf = false, the pin is released and *userbuf is set
  * to InvalidBuffer.
  *
- * It is somewhat inconsistent that we elog() on invalid block number but
+ * It is somewhat inconsistent that we ereport() on invalid block number but
  * return false on invalid item number.  This is historical.  The only
  * justification I can see is that the caller can relatively easily check the
  * block number for validity, but cannot check the item number without reading
@@ -875,7 +876,7 @@ heap_fetch(Relation relation,
 	buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
 
 	if (!BufferIsValid(buffer))
-		elog(ERROR, "heap_fetch: ReadBuffer(%s, %lu) failed",
+		elog(ERROR, "ReadBuffer(\"%s\", %lu) failed",
 			 RelationGetRelationName(relation),
 			 (unsigned long) ItemPointerGetBlockNumber(tid));
 
@@ -985,8 +986,9 @@ heap_get_latest_tid(Relation relation,
 	buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
 
 	if (!BufferIsValid(buffer))
-		elog(ERROR, "heap_get_latest_tid: %s relation: ReadBuffer(%lx) failed",
-			 RelationGetRelationName(relation), (long) tid);
+		elog(ERROR, "ReadBuffer(\"%s\", %lu) failed",
+			 RelationGetRelationName(relation),
+			 (unsigned long) ItemPointerGetBlockNumber(tid));
 
 	LockBuffer(buffer, BUFFER_LOCK_SHARE);
 
@@ -1103,7 +1105,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
 	/* Find buffer to insert this tuple into */
 	buffer = RelationGetBufferForTuple(relation, tup->t_len, InvalidBuffer);
 
-	/* NO ELOG(ERROR) from here till changes are logged */
+	/* NO EREPORT(ERROR) from here till changes are logged */
 	START_CRIT_SECTION();
 
 	RelationPutHeapTuple(relation, buffer, tup);
@@ -1219,7 +1221,7 @@ heap_delete(Relation relation, ItemPointer tid,
 	buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
 
 	if (!BufferIsValid(buffer))
-		elog(ERROR, "heap_delete: failed ReadBuffer");
+		elog(ERROR, "ReadBuffer failed");
 
 	LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 
@@ -1238,7 +1240,7 @@ l1:
 	{
 		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 		ReleaseBuffer(buffer);
-		elog(ERROR, "heap_delete: (am)invalid tid");
+		elog(ERROR, "attempted to delete invisible tuple");
 	}
 	else if (result == HeapTupleBeingUpdated)
 	{
@@ -1358,7 +1360,7 @@ l1:
  * This routine may be used to delete a tuple when concurrent updates of
  * the target tuple are not expected (for example, because we have a lock
  * on the relation associated with the tuple).	Any failure is reported
- * via elog().
+ * via ereport().
  */
 void
 simple_heap_delete(Relation relation, ItemPointer tid)
@@ -1371,7 +1373,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
 	{
 		case HeapTupleSelfUpdated:
 			/* Tuple was already updated in current command? */
-			elog(ERROR, "simple_heap_delete: tuple already updated by self");
+			elog(ERROR, "tuple already updated by self");
 			break;
 
 		case HeapTupleMayBeUpdated:
@@ -1379,11 +1381,11 @@ simple_heap_delete(Relation relation, ItemPointer tid)
 			break;
 
 		case HeapTupleUpdated:
-			elog(ERROR, "simple_heap_delete: tuple concurrently updated");
+			elog(ERROR, "tuple concurrently updated");
 			break;
 
 		default:
-			elog(ERROR, "Unknown status %u from heap_delete", result);
+			elog(ERROR, "unrecognized heap_delete status: %u", result);
 			break;
 	}
 }
@@ -1413,7 +1415,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
 
 	buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(otid));
 	if (!BufferIsValid(buffer))
-		elog(ERROR, "heap_update: failed ReadBuffer");
+		elog(ERROR, "ReadBuffer failed");
 	LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 
 	dp = (PageHeader) BufferGetPage(buffer);
@@ -1438,7 +1440,7 @@ l2:
 	{
 		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 		ReleaseBuffer(buffer);
-		elog(ERROR, "heap_update: (am)invalid tid");
+		elog(ERROR, "attempted to update invisible tuple");
 	}
 	else if (result == HeapTupleBeingUpdated)
 	{
@@ -1611,7 +1613,7 @@ l2:
 	 * buffer, only one pin is held.
 	 */
 
-	/* NO ELOG(ERROR) from here till changes are logged */
+	/* NO EREPORT(ERROR) from here till changes are logged */
 	START_CRIT_SECTION();
 
 	RelationPutHeapTuple(relation, newbuf, newtup);		/* insert new tuple */
@@ -1688,7 +1690,7 @@ l2:
  * This routine may be used to update a tuple when concurrent updates of
  * the target tuple are not expected (for example, because we have a lock
  * on the relation associated with the tuple).	Any failure is reported
- * via elog().
+ * via ereport().
  */
 void
 simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
@@ -1701,7 +1703,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
 	{
 		case HeapTupleSelfUpdated:
 			/* Tuple was already updated in current command? */
-			elog(ERROR, "simple_heap_update: tuple already updated by self");
+			elog(ERROR, "tuple already updated by self");
 			break;
 
 		case HeapTupleMayBeUpdated:
@@ -1709,11 +1711,11 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
 			break;
 
 		case HeapTupleUpdated:
-			elog(ERROR, "simple_heap_update: tuple concurrently updated");
+			elog(ERROR, "tuple concurrently updated");
 			break;
 
 		default:
-			elog(ERROR, "Unknown status %u from heap_update", result);
+			elog(ERROR, "unrecognized heap_update status: %u", result);
 			break;
 	}
 }
@@ -1733,7 +1735,7 @@ heap_mark4update(Relation relation, HeapTuple tuple, Buffer *buffer,
 	*buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
 
 	if (!BufferIsValid(*buffer))
-		elog(ERROR, "heap_mark4update: failed ReadBuffer");
+		elog(ERROR, "ReadBuffer failed");
 
 	LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
 
@@ -1750,7 +1752,7 @@ l3:
 	{
 		LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
 		ReleaseBuffer(*buffer);
-		elog(ERROR, "heap_mark4update: (am)invalid tid");
+		elog(ERROR, "attempted to mark4update invisible tuple");
 	}
 	else if (result == HeapTupleBeingUpdated)
 	{
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 53cf41c4449..7f575d1e894 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Id: hio.c,v 1.47 2003/02/13 05:35:11 momjian Exp $
+ *	  $Id: hio.c,v 1.48 2003/07/21 20:29:38 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -23,7 +23,7 @@
 /*
  * RelationPutHeapTuple - place tuple at specified page
  *
- * !!! ELOG(ERROR) IS DISALLOWED HERE !!!
+ * !!! EREPORT(ERROR) IS DISALLOWED HERE !!!  Must PANIC on failure!!!
  *
  * Note - caller must hold BUFFER_LOCK_EXCLUSIVE on the buffer.
  */
@@ -44,7 +44,7 @@ RelationPutHeapTuple(Relation relation,
 						 tuple->t_len, InvalidOffsetNumber, LP_USED);
 
 	if (offnum == InvalidOffsetNumber)
-		elog(PANIC, "RelationPutHeapTuple: failed to add tuple");
+		elog(PANIC, "failed to add tuple to page");
 
 	/* Update tuple->t_self to the actual position where it was stored */
 	ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
@@ -84,7 +84,7 @@ RelationPutHeapTuple(Relation relation,
  *	for indices only. Alternatively, we could define pseudo-table as
  *	we do for transactions with XactLockTable.
  *
- *	ELOG(ERROR) is allowed here, so this routine *must* be called
+ *	ereport(ERROR) is allowed here, so this routine *must* be called
  *	before any (unlogged) changes are made in buffer pool.
  */
 Buffer
@@ -104,8 +104,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
 	 * If we're gonna fail for oversize tuple, do it right away
 	 */
 	if (len > MaxTupleSize)
-		elog(ERROR, "Tuple is too big: size %lu, max size %ld",
-			 (unsigned long) len, MaxTupleSize);
+		ereport(ERROR,
+				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+				 errmsg("tuple is too big: size %lu, maximum size %lu",
+						(unsigned long) len,
+						(unsigned long) MaxTupleSize)));
 
 	if (otherBuffer != InvalidBuffer)
 		otherBlock = BufferGetBlockNumber(otherBuffer);
@@ -268,7 +271,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
 	if (len > PageGetFreeSpace(pageHeader))
 	{
 		/* We should not get here given the test at the top */
-		elog(PANIC, "Tuple is too big: size %lu", (unsigned long) len);
+		elog(PANIC, "tuple is too big: size %lu", (unsigned long) len);
 	}
 
 	/*
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index f8a883b150e..0262f9a146f 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.36 2002/09/04 20:31:09 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.37 2003/07/21 20:29:39 tgl Exp $
  *
  *
  * INTERFACE ROUTINES
@@ -896,7 +896,7 @@ toast_save_datum(Relation rel, Datum value)
 		memcpy(VARATT_DATA(&chunk_data), data_p, chunk_size);
 		toasttup = heap_formtuple(toasttupDesc, t_values, t_nulls);
 		if (!HeapTupleIsValid(toasttup))
-			elog(ERROR, "Failed to build TOAST tuple");
+			elog(ERROR, "failed to build TOAST tuple");
 
 		simple_heap_insert(toastrel, toasttup);
 
@@ -912,7 +912,7 @@ toast_save_datum(Relation rel, Datum value)
 							  &(toasttup->t_self),
 							  toastrel, toastidx->rd_index->indisunique);
 		if (idxres == NULL)
-			elog(ERROR, "Failed to insert index entry for TOAST tuple");
+			elog(ERROR, "failed to insert index entry for TOAST tuple");
 
 		/*
 		 * Free memory
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index a99b02c3c60..96acee2d21f 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.38 2003/03/24 21:42:33 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.39 2003/07/21 20:29:39 tgl Exp $
  *
  * NOTES
  *	  many of the old access method routines have been turned into
@@ -70,9 +70,6 @@ RelationGetIndexScan(Relation indexRelation,
 {
 	IndexScanDesc scan;
 
-	if (!RelationIsValid(indexRelation))
-		elog(ERROR, "RelationGetIndexScan: relation invalid");
-
 	scan = (IndexScanDesc) palloc(sizeof(IndexScanDescData));
 
 	scan->heapRelation = NULL;	/* may be set later */
@@ -135,9 +132,6 @@ RelationGetIndexScan(Relation indexRelation,
 void
 IndexScanEnd(IndexScanDesc scan)
 {
-	if (!IndexScanIsValid(scan))
-		elog(ERROR, "IndexScanEnd: invalid scan");
-
 	if (scan->keyData != NULL)
 		pfree(scan->keyData);
 
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 90e9af63c28..731c34b3ab6 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.66 2003/03/24 21:42:33 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.67 2003/07/21 20:29:39 tgl Exp $
  *
  * INTERFACE ROUTINES
  *		index_open		- open an index relation by relation OID
@@ -90,7 +90,7 @@
 	procedure = indexRelation->rd_am->y, \
 	(!RegProcedureIsValid(procedure)) ? \
 		elog(ERROR, "index_%s: invalid %s regproc", \
-			CppAsString(x), CppAsString(y)) \
+			 CppAsString(x), CppAsString(y)) \
 	: (void)NULL \
 )
 
@@ -99,7 +99,7 @@
 	procedure = scan->indexRelation->rd_am->y, \
 	(!RegProcedureIsValid(procedure)) ? \
 		elog(ERROR, "index_%s: invalid %s regproc", \
-			CppAsString(x), CppAsString(y)) \
+			 CppAsString(x), CppAsString(y)) \
 	: (void)NULL \
 )
 
@@ -129,8 +129,10 @@ index_open(Oid relationId)
 	r = relation_open(relationId, NoLock);
 
 	if (r->rd_rel->relkind != RELKIND_INDEX)
-		elog(ERROR, "%s is not an index relation",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is not an index relation",
+						RelationGetRelationName(r))));
 
 	pgstat_initstats(&r->pgstat_info, r);
 
@@ -152,8 +154,10 @@ index_openrv(const RangeVar *relation)
 	r = relation_openrv(relation, NoLock);
 
 	if (r->rd_rel->relkind != RELKIND_INDEX)
-		elog(ERROR, "%s is not an index relation",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is not an index relation",
+						RelationGetRelationName(r))));
 
 	pgstat_initstats(&r->pgstat_info, r);
 
@@ -175,8 +179,10 @@ index_openr(const char *sysRelationName)
 	r = relation_openr(sysRelationName, NoLock);
 
 	if (r->rd_rel->relkind != RELKIND_INDEX)
-		elog(ERROR, "%s is not an index relation",
-			 RelationGetRelationName(r));
+		ereport(ERROR,
+				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
+				 errmsg("\"%s\" is not an index relation",
+						RelationGetRelationName(r))));
 
 	pgstat_initstats(&r->pgstat_info, r);
 
@@ -753,7 +759,7 @@ index_getprocinfo(Relation irel,
 		 * use index_getprocid.)
 		 */
 		if (!RegProcedureIsValid(procId))
-			elog(ERROR, "Missing support function %d for attribute %d of index %s",
+			elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
 				 procnum, attnum, RelationGetRelationName(irel));
 
 		fmgr_info_cxt(procId, locinfo, irel->rd_indexcxt);
diff --git a/src/backend/access/index/istrat.c b/src/backend/access/index/istrat.c
index 00be3955354..66eb7e4a088 100644
--- a/src/backend/access/index/istrat.c
+++ b/src/backend/access/index/istrat.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.58 2002/06/20 20:29:25 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.59 2003/07/21 20:29:39 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -235,7 +235,7 @@ StrategyTermEvaluate(StrategyTerm term,
 				break;
 
 			default:
-				elog(ERROR, "StrategyTermEvaluate: impossible case %d",
+				elog(ERROR, "impossible strategy case: %d",
 					 operator->flags ^ entry->sk_flags);
 		}
 		if (!result)
@@ -310,13 +310,14 @@ RelationGetStrategy(Relation relation,
 			break;
 
 		default:
-			elog(FATAL, "RelationGetStrategy: impossible case %d", entry->sk_flags);
+			elog(ERROR, "impossible strategy case: %d",
+				 entry->sk_flags);
 	}
 
 	if (!StrategyNumberIsInBounds(strategy, evaluation->maxStrategy))
 	{
 		if (!StrategyNumberIsValid(strategy))
-			elog(ERROR, "RelationGetStrategy: corrupted evaluation");
+			elog(ERROR, "corrupted strategy evaluation");
 	}
 
 	return strategy;
@@ -435,8 +436,7 @@ RelationInvokeStrategy(Relation relation,
 		}
 	}
 
-	elog(ERROR, "RelationInvokeStrategy: cannot evaluate strategy %d",
-		 strategy);
+	elog(ERROR, "cannot evaluate strategy %d", strategy);
 
 	/* not reached, just to make compiler happy */
 	return FALSE;
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 254b1d88e88..334c309184f 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.100 2003/05/27 17:49:45 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.101 2003/07/21 20:29:39 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -153,7 +153,7 @@ top:
  *
  * Returns InvalidTransactionId if there is no conflict, else an xact ID
  * we must wait for to see if it commits a conflicting tuple.	If an actual
- * conflict is detected, no return --- just elog().
+ * conflict is detected, no return --- just ereport().
  */
 static TransactionId
 _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
@@ -237,8 +237,10 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
 					/*
 					 * Otherwise we have a definite conflict.
 					 */
-					elog(ERROR, "Cannot insert a duplicate key into unique index %s",
-						 RelationGetRelationName(rel));
+					ereport(ERROR,
+							(errcode(ERRCODE_UNIQUE_VIOLATION),
+							 errmsg("duplicate key violates UNIQUE constraint \"%s\"",
+									RelationGetRelationName(rel))));
 				}
 				else if (htup.t_data != NULL)
 				{
@@ -291,7 +293,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
 				if (!P_IGNORE(opaque))
 					break;
 				if (P_RIGHTMOST(opaque))
-					elog(ERROR, "_bt_check_unique: fell off the end of %s",
+					elog(ERROR, "fell off the end of \"%s\"",
 						 RelationGetRelationName(rel));
 			}
 			maxoff = PageGetMaxOffsetNumber(page);
@@ -387,8 +389,11 @@ _bt_insertonpg(Relation rel,
 	 * itemsz doesn't include the ItemId.
 	 */
 	if (itemsz > BTMaxItemSize(page))
-		elog(ERROR, "btree: index item size %lu exceeds maximum %lu",
-			 (unsigned long) itemsz, BTMaxItemSize(page));
+		ereport(ERROR,
+				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+				 errmsg("index tuple size %lu exceeds btree maximum, %lu",
+						(unsigned long) itemsz,
+						(unsigned long) BTMaxItemSize(page))));
 
 	/*
 	 * Determine exactly where new item will go.
@@ -445,7 +450,7 @@ _bt_insertonpg(Relation rel,
 				if (!P_IGNORE(lpageop))
 					break;
 				if (P_RIGHTMOST(lpageop))
-					elog(ERROR, "_bt_insertonpg: fell off the end of %s",
+					elog(ERROR, "fell off the end of \"%s\"",
 						 RelationGetRelationName(rel));
 			}
 			_bt_relbuf(rel, buf);
@@ -536,7 +541,7 @@ _bt_insertonpg(Relation rel,
 			}
 		}
 
-		/* Do the actual update.  No elog(ERROR) until changes are logged */
+		/* Do the update.  No ereport(ERROR) until changes are logged */
 		START_CRIT_SECTION();
 
 		_bt_pgaddtup(rel, page, itemsz, btitem, newitemoff, "page");
@@ -705,7 +710,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
 		item = (BTItem) PageGetItem(origpage, itemid);
 		if (PageAddItem(rightpage, (Item) item, itemsz, rightoff,
 						LP_USED) == InvalidOffsetNumber)
-			elog(PANIC, "btree: failed to add hikey to the right sibling");
+			elog(PANIC, "failed to add hikey to the right sibling");
 		rightoff = OffsetNumberNext(rightoff);
 	}
 
@@ -730,7 +735,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
 	}
 	if (PageAddItem(leftpage, (Item) item, itemsz, leftoff,
 					LP_USED) == InvalidOffsetNumber)
-		elog(PANIC, "btree: failed to add hikey to the left sibling");
+		elog(PANIC, "failed to add hikey to the left sibling");
 	leftoff = OffsetNumberNext(leftoff);
 
 	/*
@@ -815,14 +820,14 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
 		spage = BufferGetPage(sbuf);
 		sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
 		if (sopaque->btpo_prev != ropaque->btpo_prev)
-			elog(PANIC, "btree: right sibling's left-link doesn't match");
+			elog(PANIC, "right sibling's left-link doesn't match");
 	}
 
 	/*
 	 * Right sibling is locked, new siblings are prepared, but original
 	 * page is not updated yet. Log changes before continuing.
 	 *
-	 * NO ELOG(ERROR) till right sibling is updated.
+	 * NO EREPORT(ERROR) till right sibling is updated.
 	 */
 	START_CRIT_SECTION();
 
@@ -1059,7 +1064,7 @@ _bt_findsplitloc(Relation rel,
 	 * just in case ...
 	 */
 	if (!state.have_split)
-		elog(FATAL, "_bt_findsplitloc: can't find a feasible split point for %s",
+		elog(ERROR, "cannot find a feasible split point for \"%s\"",
 			 RelationGetRelationName(rel));
 
 	*newitemonleft = state.newitemonleft;
@@ -1193,7 +1198,7 @@ _bt_insert_parent(Relation rel,
 			BTPageOpaque lpageop;
 
 			if (!InRecovery)
-				elog(DEBUG2, "_bt_insert_parent: concurrent ROOT page split");
+				elog(DEBUG2, "concurrent ROOT page split");
 			lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
 			/* Find the leftmost page at the next level up */
 			pbuf = _bt_get_endpoint(rel, lpageop->btpo.level + 1, false);
@@ -1232,8 +1237,8 @@ _bt_insert_parent(Relation rel,
 
 		/* Check for error only after writing children */
 		if (pbuf == InvalidBuffer)
-			elog(ERROR, "_bt_getstackbuf: my bits moved right off the end of the world!"
-				 "\n\tRecreate index %s.", RelationGetRelationName(rel));
+			elog(ERROR, "failed to re-find parent key in \"%s\"",
+				 RelationGetRelationName(rel));
 
 		/* Recursively update the parent */
 		newres = _bt_insertonpg(rel, pbuf, stack->bts_parent,
@@ -1399,7 +1404,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
 	metapg = BufferGetPage(metabuf);
 	metad = BTPageGetMeta(metapg);
 
-	/* NO ELOG(ERROR) from here till newroot op is logged */
+	/* NO EREPORT(ERROR) from here till newroot op is logged */
 	START_CRIT_SECTION();
 
 	/* set btree special data */
@@ -1431,7 +1436,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
 	 * the two items will go into positions P_HIKEY and P_FIRSTKEY.
 	 */
 	if (PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY, LP_USED) == InvalidOffsetNumber)
-		elog(PANIC, "btree: failed to add leftkey to new root page");
+		elog(PANIC, "failed to add leftkey to new root page");
 	pfree(new_item);
 
 	/*
@@ -1448,7 +1453,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
 	 * insert the right page pointer into the new root page.
 	 */
 	if (PageAddItem(rootpage, (Item) new_item, itemsz, P_FIRSTKEY, LP_USED) == InvalidOffsetNumber)
-		elog(PANIC, "btree: failed to add rightkey to new root page");
+		elog(PANIC, "failed to add rightkey to new root page");
 	pfree(new_item);
 
 	/* XLOG stuff */
@@ -1533,7 +1538,7 @@ _bt_pgaddtup(Relation rel,
 
 	if (PageAddItem(page, (Item) btitem, itemsize, itup_off,
 					LP_USED) == InvalidOffsetNumber)
-		elog(PANIC, "btree: failed to add item to the %s for %s",
+		elog(PANIC, "failed to add item to the %s for \"%s\"",
 			 where, RelationGetRelationName(rel));
 }
 
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index dcd92e2d7a7..33f85cd59a6 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.65 2003/05/27 17:49:45 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.66 2003/07/21 20:29:39 tgl Exp $
  *
  *	NOTES
  *	   Postgres btree pages look like ordinary relation pages.	The opaque
@@ -44,7 +44,7 @@ _bt_metapinit(Relation rel)
 	BTPageOpaque op;
 
 	if (RelationGetNumberOfBlocks(rel) != 0)
-		elog(ERROR, "Cannot initialize non-empty btree %s",
+		elog(ERROR, "cannot initialize non-empty btree index \"%s\"",
 			 RelationGetRelationName(rel));
 
 	buf = ReadBuffer(rel, P_NEW);
@@ -145,13 +145,17 @@ _bt_getroot(Relation rel, int access)
 	/* sanity-check the metapage */
 	if (!(metaopaque->btpo_flags & BTP_META) ||
 		metad->btm_magic != BTREE_MAGIC)
-		elog(ERROR, "Index %s is not a btree",
-			 RelationGetRelationName(rel));
+		ereport(ERROR,
+				(errcode(ERRCODE_INDEX_CORRUPTED),
+				 errmsg("index \"%s\" is not a btree",
+						RelationGetRelationName(rel))));
 
 	if (metad->btm_version != BTREE_VERSION)
-		elog(ERROR, "Version mismatch on %s: version %d file, version %d code",
-			 RelationGetRelationName(rel),
-			 metad->btm_version, BTREE_VERSION);
+		ereport(ERROR,
+				(errcode(ERRCODE_INDEX_CORRUPTED),
+				 errmsg("version mismatch in \"%s\": file version %d, code version %d",
+						RelationGetRelationName(rel),
+						metad->btm_version, BTREE_VERSION)));
 
 	/* if no root page initialized yet, do it */
 	if (metad->btm_root == P_NONE)
@@ -265,7 +269,7 @@ _bt_getroot(Relation rel, int access)
 
 			/* it's dead, Jim.  step right one page */
 			if (P_RIGHTMOST(rootopaque))
-				elog(ERROR, "No live root page found in %s",
+				elog(ERROR, "no live root page found in \"%s\"",
 					 RelationGetRelationName(rel));
 			rootblkno = rootopaque->btpo_next;
 
@@ -274,7 +278,7 @@ _bt_getroot(Relation rel, int access)
 
 		/* Note: can't check btpo.level on deleted pages */
 		if (rootopaque->btpo.level != rootlevel)
-			elog(ERROR, "Root page %u of %s has level %u, expected %u",
+			elog(ERROR, "root page %u of \"%s\" has level %u, expected %u",
 				 rootblkno, RelationGetRelationName(rel),
 				 rootopaque->btpo.level, rootlevel);
 	}
@@ -320,13 +324,17 @@ _bt_gettrueroot(Relation rel)
 
 	if (!(metaopaque->btpo_flags & BTP_META) ||
 		metad->btm_magic != BTREE_MAGIC)
-		elog(ERROR, "Index %s is not a btree",
-			 RelationGetRelationName(rel));
+		ereport(ERROR,
+				(errcode(ERRCODE_INDEX_CORRUPTED),
+				 errmsg("index \"%s\" is not a btree",
+						RelationGetRelationName(rel))));
 
 	if (metad->btm_version != BTREE_VERSION)
-		elog(ERROR, "Version mismatch on %s: version %d file, version %d code",
-			 RelationGetRelationName(rel),
-			 metad->btm_version, BTREE_VERSION);
+		ereport(ERROR,
+				(errcode(ERRCODE_INDEX_CORRUPTED),
+				 errmsg("version mismatch in \"%s\": file version %d, code version %d",
+						RelationGetRelationName(rel),
+						metad->btm_version, BTREE_VERSION)));
 
 	/* if no root page initialized yet, fail */
 	if (metad->btm_root == P_NONE)
@@ -351,7 +359,7 @@ _bt_gettrueroot(Relation rel)
 
 		/* it's dead, Jim.  step right one page */
 		if (P_RIGHTMOST(rootopaque))
-			elog(ERROR, "No live root page found in %s",
+			elog(ERROR, "no live root page found in \"%s\"",
 				 RelationGetRelationName(rel));
 		rootblkno = rootopaque->btpo_next;
 
@@ -360,7 +368,7 @@ _bt_gettrueroot(Relation rel)
 
 	/* Note: can't check btpo.level on deleted pages */
 	if (rootopaque->btpo.level != rootlevel)
-		elog(ERROR, "Root page %u of %s has level %u, expected %u",
+		elog(ERROR, "root page %u of \"%s\" has level %u, expected %u",
 			 rootblkno, RelationGetRelationName(rel),
 			 rootopaque->btpo.level, rootlevel);
 
@@ -416,7 +424,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
 				_bt_pageinit(page, BufferGetPageSize(buf));
 				return buf;
 			}
-			elog(DEBUG2, "_bt_getbuf: FSM returned nonrecyclable page");
+			elog(DEBUG2, "FSM returned nonrecyclable page");
 			_bt_relbuf(rel, buf);
 		}
 
@@ -630,7 +638,7 @@ _bt_delitems(Relation rel, Buffer buf,
 	Page		page = BufferGetPage(buf);
 	int			i;
 
-	/* No elog(ERROR) until changes are logged */
+	/* No ereport(ERROR) until changes are logged */
 	START_CRIT_SECTION();
 
 	/*
@@ -775,7 +783,7 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
 	for (;;)
 	{
 		if (stack == NULL)
-			elog(ERROR, "_bt_pagedel: not enough stack items");
+			elog(ERROR, "not enough stack items");
 		if (ilevel == targetlevel)
 			break;
 		stack = stack->bts_parent;
@@ -805,7 +813,7 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
 			_bt_relbuf(rel, lbuf);
 			if (leftsib == P_NONE)
 			{
-				elog(LOG, "_bt_pagedel: no left sibling (concurrent deletion?)");
+				elog(LOG, "no left sibling (concurrent deletion?)");
 				return 0;
 			}
 			lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
@@ -837,7 +845,7 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
 		return 0;
 	}
 	if (opaque->btpo_prev != leftsib)
-		elog(ERROR, "_bt_pagedel: left link changed unexpectedly");
+		elog(ERROR, "left link changed unexpectedly");
 	/*
 	 * And next write-lock the (current) right sibling.
 	 */
@@ -851,8 +859,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
 				   target, P_HIKEY);
 	pbuf = _bt_getstackbuf(rel, stack, BT_WRITE);
 	if (pbuf == InvalidBuffer)
-		elog(ERROR, "_bt_getstackbuf: my bits moved right off the end of the world!"
-			 "\n\tRecreate index %s.", RelationGetRelationName(rel));
+		elog(ERROR, "failed to re-find parent key in \"%s\"",
+			 RelationGetRelationName(rel));
 	parent = stack->bts_blkno;
 	poffset = stack->bts_offset;
 	/*
@@ -924,7 +932,7 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
 	 * Here we begin doing the deletion.
 	 */
 
-	/* No elog(ERROR) until changes are logged */
+	/* No ereport(ERROR) until changes are logged */
 	START_CRIT_SECTION();
 
 	/*
@@ -954,7 +962,7 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
 		itemid = PageGetItemId(page, nextoffset);
 		btitem = (BTItem) PageGetItem(page, itemid);
 		if (ItemPointerGetBlockNumber(&(btitem->bti_itup.t_tid)) != rightsib)
-			elog(PANIC, "_bt_pagedel: right sibling is not next child");
+			elog(PANIC, "right sibling is not next child");
 
 		PageIndexTupleDelete(page, nextoffset);
 	}
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index a35901b47c6..3c814725fef 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.102 2003/03/23 23:01:03 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.103 2003/07/21 20:29:39 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -108,7 +108,7 @@ btbuild(PG_FUNCTION_ARGS)
 	 * that's not the case, big trouble's what we have.
 	 */
 	if (RelationGetNumberOfBlocks(index) != 0)
-		elog(ERROR, "%s already contains data",
+		elog(ERROR, "index \"%s\" already contains data",
 			 RelationGetRelationName(index));
 
 	/* initialize the btree index metadata page */
@@ -816,8 +816,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
 			 */
 			i = FlushRelationBuffers(rel, new_pages);
 			if (i < 0)
-				elog(ERROR, "btvacuumcleanup: FlushRelationBuffers returned %d",
-					 i);
+				elog(ERROR, "FlushRelationBuffers returned %d", i);
 
 			/*
 			 * Do the physical truncation.
@@ -929,8 +928,8 @@ _bt_restscan(IndexScanDesc scan)
 		 * we can find it again.
 		 */
 		if (P_RIGHTMOST(opaque))
-			elog(ERROR, "_bt_restscan: my bits moved right off the end of the world!"
-				 "\n\tRecreate index %s.", RelationGetRelationName(rel));
+			elog(ERROR, "failed to re-find previous key in \"%s\"",
+				 RelationGetRelationName(rel));
 		/* Advance to next non-dead page --- there must be one */
 		nextbuf = InvalidBuffer;
 		for (;;)
@@ -944,7 +943,7 @@ _bt_restscan(IndexScanDesc scan)
 			if (!P_IGNORE(opaque))
 				break;
 			if (P_RIGHTMOST(opaque))
-				elog(ERROR, "_bt_restscan: fell off the end of %s",
+				elog(ERROR, "fell off the end of \"%s\"",
 					 RelationGetRelationName(rel));
 		}
 		_bt_relbuf(rel, buf);
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 91089d85454..f12c1896c07 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.74 2003/02/22 00:45:04 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.75 2003/07/21 20:29:39 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -168,7 +168,7 @@ _bt_moveright(Relation rel,
 	}
 
 	if (P_IGNORE(opaque))
-		elog(ERROR, "_bt_moveright: fell off the end of %s",
+		elog(ERROR, "fell off the end of \"%s\"",
 			 RelationGetRelationName(rel));
 
 	return buf;
@@ -552,7 +552,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
 		{
 			pfree(nKeyIs);
 			pfree(scankeys);
-			elog(ERROR, "_bt_first: btree doesn't support is(not)null, yet");
+			elog(ERROR, "btree doesn't support is(not)null, yet");
 			return false;
 		}
 		procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
@@ -700,7 +700,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
 					result = _bt_compare(rel, keysCount, scankeys, page, offnum);
 				} while (result == 0);
 				if (!_bt_step(scan, &buf, BackwardScanDirection))
-					elog(ERROR, "_bt_first: equal items disappeared?");
+					elog(ERROR, "equal items disappeared?");
 			}
 			break;
 
@@ -991,7 +991,7 @@ _bt_walk_left(Relation rel, Buffer buf)
 			for (;;)
 			{
 				if (P_RIGHTMOST(opaque))
-					elog(ERROR, "_bt_walk_left: fell off the end of %s",
+					elog(ERROR, "fell off the end of \"%s\"",
 						 RelationGetRelationName(rel));
 				blkno = opaque->btpo_next;
 				_bt_relbuf(rel, buf);
@@ -1015,7 +1015,7 @@ _bt_walk_left(Relation rel, Buffer buf)
 			 * if there's anything wrong.
 			 */
 			if (opaque->btpo_prev == lblkno)
-				elog(ERROR, "_bt_walk_left: can't find left sibling in %s",
+				elog(ERROR, "cannot find left sibling in \"%s\"",
 					 RelationGetRelationName(rel));
 			/* Okay to try again with new lblkno value */
 		}
@@ -1028,7 +1028,7 @@ _bt_walk_left(Relation rel, Buffer buf)
  * _bt_get_endpoint() -- Find the first or last page on a given tree level
  *
  * If the index is empty, we will return InvalidBuffer; any other failure
- * condition causes elog().  We will not return a dead page.
+ * condition causes ereport().  We will not return a dead page.
  *
  * The returned buffer is pinned and read-locked.
  */
@@ -1075,7 +1075,7 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
 		{
 			blkno = opaque->btpo_next;
 			if (blkno == P_NONE)
-				elog(ERROR, "_bt_get_endpoint: fell off the end of %s",
+				elog(ERROR, "fell off the end of \"%s\"",
 					 RelationGetRelationName(rel));
 			_bt_relbuf(rel, buf);
 			buf = _bt_getbuf(rel, blkno, BT_READ);
@@ -1087,7 +1087,7 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
 		if (opaque->btpo.level == level)
 			break;
 		if (opaque->btpo.level < level)
-			elog(ERROR, "_bt_get_endpoint: btree level %u not found", level);
+			elog(ERROR, "btree level %u not found", level);
 
 		/* Descend to leftmost or rightmost child page */
 		if (rightmost)
@@ -1176,7 +1176,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
 	}
 	else
 	{
-		elog(ERROR, "Illegal scan direction %d", dir);
+		elog(ERROR, "invalid scan direction: %d", (int) dir);
 		start = 0;				/* keep compiler quiet */
 	}
 
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 62f020086d8..92a73021f66 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -36,7 +36,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.72 2003/02/22 00:45:04 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.73 2003/07/21 20:29:39 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -332,7 +332,7 @@ _bt_sortaddtup(Page page,
 
 	if (PageAddItem(page, (Item) btitem, itemsize, itup_off,
 					LP_USED) == InvalidOffsetNumber)
-		elog(ERROR, "btree: failed to add item to the page in _bt_sort");
+		elog(ERROR, "failed to add item to the index page");
 }
 
 /*----------
@@ -397,8 +397,11 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
 	 * during creation of an index, we don't go through there.
 	 */
 	if (btisz > BTMaxItemSize(npage))
-		elog(ERROR, "btree: index item size %lu exceeds maximum %ld",
-			 (unsigned long) btisz, BTMaxItemSize(npage));
+		ereport(ERROR,
+				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+				 errmsg("index tuple size %lu exceeds btree maximum, %lu",
+						(unsigned long) btisz,
+						(unsigned long) BTMaxItemSize(npage))));
 
 	if (pgspc < btisz || pgspc < state->btps_full)
 	{
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 50fe3312cc2..d736e2f15ee 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.51 2002/09/04 20:31:12 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.52 2003/07/21 20:29:39 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -227,7 +227,7 @@ _bt_orderkeys(IndexScanDesc scan)
 	cur = &key[0];
 	/* check input keys are correctly ordered */
 	if (cur->sk_attno != 1)
-		elog(ERROR, "_bt_orderkeys: key(s) for attribute 1 missed");
+		elog(ERROR, "key(s) for attribute 1 missed");
 
 	/* We can short-circuit most of the work if there's just one key */
 	if (numberOfKeys == 1)
@@ -305,8 +305,7 @@ _bt_orderkeys(IndexScanDesc scan)
 
 			/* check input keys are correctly ordered */
 			if (i < numberOfKeys && cur->sk_attno != attno + 1)
-				elog(ERROR, "_bt_orderkeys: key(s) for attribute %d missed",
-					 attno + 1);
+				elog(ERROR, "key(s) for attribute %d missed", attno + 1);
 
 			/*
 			 * If = has been specified, no other key will be used. In case
@@ -462,8 +461,7 @@ _bt_getstrategynumber(RegProcedure sk_procedure, StrategyMap map)
 		if (sk_procedure == map->entry[j].sk_procedure)
 			return j;
 	}
-	elog(ERROR, "_bt_getstrategynumber: unable to identify operator %u",
-		 sk_procedure);
+	elog(ERROR, "unable to identify operator %u", sk_procedure);
 	return -1;					/* keep compiler quiet */
 }
 
diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c
index 96bdd5ba1ab..53e61989d37 100644
--- a/src/backend/access/rtree/rtree.c
+++ b/src/backend/access/rtree/rtree.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.77 2003/02/24 00:57:17 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.78 2003/07/21 20:29:39 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -125,7 +125,7 @@ rtbuild(PG_FUNCTION_ARGS)
 	 * that's not the case, big trouble's what we have.
 	 */
 	if (RelationGetNumberOfBlocks(index) != 0)
-		elog(ERROR, "%s already contains data",
+		elog(ERROR, "index \"%s\" already contains data",
 			 RelationGetRelationName(index));
 
 	/* initialize the root page */
@@ -328,7 +328,7 @@ rtdoinsert(Relation r, IndexTuple itup, RTSTATE *rtstate)
 						LP_USED);
 	}
 	if (l == InvalidOffsetNumber)
-		elog(ERROR, "rtdoinsert: failed to add index item to %s",
+		elog(ERROR, "failed to add index item to \"%s\"",
 			 RelationGetRelationName(r));
 
 	WriteBuffer(buffer);
@@ -520,7 +520,7 @@ rtdosplit(Relation r,
 
 		if (PageAddItem(left, (Item) item, IndexTupleSize(item),
 						leftoff, LP_USED) == InvalidOffsetNumber)
-			elog(ERROR, "rtdosplit: failed to add index item to %s",
+			elog(ERROR, "failed to add index item to \"%s\"",
 				 RelationGetRelationName(r));
 		leftoff = OffsetNumberNext(leftoff);
 
@@ -544,7 +544,7 @@ rtdosplit(Relation r,
 
 		if (PageAddItem(right, (Item) item, IndexTupleSize(item),
 						rightoff, LP_USED) == InvalidOffsetNumber)
-			elog(ERROR, "rtdosplit: failed to add index item to %s",
+			elog(ERROR, "failed to add index item to \"%s\"",
 				 RelationGetRelationName(r));
 		rightoff = OffsetNumberNext(rightoff);
 
@@ -640,7 +640,9 @@ rtintinsert(Relation r,
 	 */
 
 	if (IndexTupleSize(old) != IndexTupleSize(ltup))
-		elog(ERROR, "Variable-length rtree keys are not supported.");
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("variable-length rtree keys are not supported")));
 
 	/* install pointer to left child */
 	memmove(old, ltup, IndexTupleSize(ltup));
@@ -660,7 +662,7 @@ rtintinsert(Relation r,
 		if (PageAddItem(p, (Item) rtup, IndexTupleSize(rtup),
 						PageGetMaxOffsetNumber(p),
 						LP_USED) == InvalidOffsetNumber)
-			elog(ERROR, "rtintinsert: failed to add index item to %s",
+			elog(ERROR, "failed to add index item to \"%s\"",
 				 RelationGetRelationName(r));
 		WriteBuffer(b);
 		ldatum = IndexTupleGetDatum(ltup);
@@ -686,12 +688,12 @@ rtnewroot(Relation r, IndexTuple lt, IndexTuple rt)
 	if (PageAddItem(p, (Item) lt, IndexTupleSize(lt),
 					FirstOffsetNumber,
 					LP_USED) == InvalidOffsetNumber)
-		elog(ERROR, "rtnewroot: failed to add index item to %s",
+		elog(ERROR, "failed to add index item to \"%s\"",
 			 RelationGetRelationName(r));
 	if (PageAddItem(p, (Item) rt, IndexTupleSize(rt),
 					OffsetNumberNext(FirstOffsetNumber),
 					LP_USED) == InvalidOffsetNumber)
-		elog(ERROR, "rtnewroot: failed to add index item to %s",
+		elog(ERROR, "failed to add index item to \"%s\"",
 			 RelationGetRelationName(r));
 	WriteBuffer(b);
 }
@@ -778,8 +780,11 @@ rtpicksplit(Relation r,
 	 */
 	newitemsz = IndexTupleTotalSize(itup);
 	if (newitemsz > RTPageAvailSpace)
-		elog(ERROR, "rtree: index item size %lu exceeds maximum %lu",
-			 (unsigned long) newitemsz, (unsigned long) RTPageAvailSpace);
+		ereport(ERROR,
+				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+				 errmsg("index tuple size %lu exceeds rtree maximum, %lu",
+						(unsigned long) newitemsz,
+						(unsigned long) RTPageAvailSpace)));
 
 	maxoff = PageGetMaxOffsetNumber(page);
 	newitemoff = OffsetNumberNext(maxoff);		/* phony index for new
@@ -1065,7 +1070,7 @@ rtpicksplit(Relation r,
 			choose_left = false;
 		else
 		{
-			elog(ERROR, "rtpicksplit: failed to find a workable page split");
+			elog(ERROR, "failed to find a workable rtree page split");
 			choose_left = false;	/* keep compiler quiet */
 		}
 
diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c
index a0b9883cfe8..345e66c676c 100644
--- a/src/backend/access/rtree/rtscan.c
+++ b/src/backend/access/rtree/rtscan.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.43 2003/03/23 23:01:03 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.44 2003/07/21 20:29:39 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -384,7 +384,7 @@ adjustiptr(IndexScanDesc s,
 					break;
 
 				default:
-					elog(ERROR, "Bad operation in rtree scan adjust: %d", op);
+					elog(ERROR, "unrecognized operation in rtree scan adjust: %d", op);
 			}
 		}
 	}
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 455d637762e..40b41519a93 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.148 2003/05/14 03:26:00 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.149 2003/07/21 20:29:39 tgl Exp $
  *
  * NOTES
  *		Transaction aborts can now occur two ways:
@@ -400,7 +400,9 @@ CommandCounterIncrement(void)
 
 	s->commandId += 1;
 	if (s->commandId == FirstCommandId) /* check for overflow */
-		elog(ERROR, "You may only have 2^32-1 commands per transaction");
+		ereport(ERROR,
+				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+				 errmsg("cannot have more than 2^32-1 commands in a transaction")));
 
 	/* Propagate new command ID into query snapshots, if set */
 	if (QuerySnapshot)
@@ -672,8 +674,7 @@ RecordTransactionAbort(void)
 		 * RecordTransactionCommit ...
 		 */
 		if (TransactionIdDidCommit(xid))
-			elog(PANIC, "RecordTransactionAbort: xact %u already committed",
-				 xid);
+			elog(PANIC, "cannot abort transaction %u, it was already committed", xid);
 
 		START_CRIT_SECTION();
 
@@ -1367,23 +1368,24 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
 	 * xact block already started?
 	 */
 	if (IsTransactionBlock())
-	{
-		/* translator: %s represents an SQL statement name */
-		elog(ERROR, "%s cannot run inside a transaction block", stmtType);
-	}
+		ereport(ERROR,
+				(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+				 /* translator: %s represents an SQL statement name */
+				 errmsg("%s cannot run inside a transaction block",
+						stmtType)));
 	/*
 	 * Are we inside a function call?  If the statement's parameter block
 	 * was allocated in QueryContext, assume it is an interactive command.
 	 * Otherwise assume it is coming from a function.
 	 */
 	if (!MemoryContextContains(QueryContext, stmtNode))
-	{
-		/* translator: %s represents an SQL statement name */
-		elog(ERROR, "%s cannot be executed from a function", stmtType);
-	}
+		ereport(ERROR,
+				(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+				 /* translator: %s represents an SQL statement name */
+				 errmsg("%s cannot be executed from a function", stmtType)));
 	/* If we got past IsTransactionBlock test, should be in default state */
 	if (CurrentTransactionState->blockState != TBLOCK_DEFAULT)
-		elog(ERROR, "PreventTransactionChain: can't prevent chain");
+		elog(ERROR, "cannot prevent transaction chain");
 	/* all okay */
 }
 
@@ -1419,9 +1421,11 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
 	 */
 	if (!MemoryContextContains(QueryContext, stmtNode))
 		return;
-	/* translator: %s represents an SQL statement name */
-	elog(ERROR, "%s may only be used in begin/end transaction blocks",
-		 stmtType);
+	ereport(ERROR,
+			(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
+			 /* translator: %s represents an SQL statement name */
+			 errmsg("%s may only be used in BEGIN/END transaction blocks",
+					stmtType)));
 }
 
 
@@ -1441,7 +1445,9 @@ BeginTransactionBlock(void)
 	 * check the current transaction state
 	 */
 	if (s->blockState != TBLOCK_DEFAULT)
-		elog(WARNING, "BEGIN: already a transaction in progress");
+		ereport(WARNING,
+				(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+				 errmsg("there is already a transaction in progress")));
 
 	/*
 	 * set the current transaction block state information appropriately
@@ -1501,7 +1507,9 @@ EndTransactionBlock(void)
 	 * CommitTransactionCommand() will then put us back into the default
 	 * state.
 	 */
-	elog(WARNING, "COMMIT: no transaction in progress");
+	ereport(WARNING,
+			(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
+			 errmsg("there is no transaction in progress")));
 	AbortTransaction();
 	s->blockState = TBLOCK_ENDABORT;
 }
@@ -1537,7 +1545,9 @@ AbortTransactionBlock(void)
 	 * CommitTransactionCommand() will then put us back into the default
 	 * state.
 	 */
-	elog(WARNING, "ROLLBACK: no transaction in progress");
+	ereport(WARNING,
+			(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
+			 errmsg("there is no transaction in progress")));
 	AbortTransaction();
 	s->blockState = TBLOCK_ENDABORT;
 }
@@ -1583,7 +1593,9 @@ UserAbortTransactionBlock(void)
 	 * CommitTransactionCommand() will then put us back into the default
 	 * state.
 	 */
-	elog(WARNING, "ROLLBACK: no transaction in progress");
+	ereport(WARNING,
+			(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
+			 errmsg("there is no transaction in progress")));
 	AbortTransaction();
 	s->blockState = TBLOCK_ENDABORT;
 }
@@ -1663,7 +1675,8 @@ TransactionBlockStatusCode(void)
 	}
 
 	/* should never get here */
-	elog(ERROR, "bogus transaction block state");
+	elog(ERROR, "invalid transaction block state: %d",
+		 (int) s->blockState);
 	return 0;					/* keep compiler quiet */
 }
 
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index bc163a70286..50fa1125bee 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.118 2003/07/17 16:45:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.119 2003/07/21 20:29:39 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -504,8 +504,7 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
 	if (info & XLR_INFO_MASK)
 	{
 		if ((info & XLR_INFO_MASK) != XLOG_NO_TRAN)
-			elog(PANIC, "XLogInsert: invalid info mask %02X",
-				 (info & XLR_INFO_MASK));
+			elog(PANIC, "invalid xlog info mask %02X", (info & XLR_INFO_MASK));
 		no_tran = true;
 		info &= ~XLR_INFO_MASK;
 	}
@@ -609,7 +608,7 @@ begin:;
 				}
 			}
 			if (i >= XLR_MAX_BKP_BLOCKS)
-				elog(PANIC, "XLogInsert: can backup %d blocks at most",
+				elog(PANIC, "can backup at most %d blocks per xlog record",
 					 XLR_MAX_BKP_BLOCKS);
 		}
 		/* Break out of loop when rdt points to last list item */
@@ -627,7 +626,7 @@ begin:;
 	 * also remove the check for xl_len == 0 in ReadRecord, below.
 	 */
 	if (len == 0 || len > MAXLOGRECSZ)
-		elog(PANIC, "XLogInsert: invalid record length %u", len);
+		elog(PANIC, "invalid xlog record length %u", len);
 
 	START_CRIT_SECTION();
 
@@ -1028,7 +1027,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
 		 * AdvanceXLInsertBuffer.
 		 */
 		if (!XLByteLT(LogwrtResult.Write, XLogCtl->xlblocks[Write->curridx]))
-			elog(PANIC, "XLogWrite: write request %X/%X is past end of log %X/%X",
+			elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
 				 LogwrtResult.Write.xlogid, LogwrtResult.Write.xrecoff,
 				 XLogCtl->xlblocks[Write->curridx].xlogid,
 				 XLogCtl->xlblocks[Write->curridx].xrecoff);
@@ -1045,8 +1044,10 @@ XLogWrite(XLogwrtRqst WriteRqst)
 			if (openLogFile >= 0)
 			{
 				if (close(openLogFile) != 0)
-					elog(PANIC, "close of log file %u, segment %u failed: %m",
-						 openLogId, openLogSeg);
+					ereport(PANIC,
+							(errcode_for_file_access(),
+							 errmsg("close of log file %u, segment %u failed: %m",
+									openLogId, openLogSeg)));
 				openLogFile = -1;
 			}
 			XLByteToPrevSeg(LogwrtResult.Write, openLogId, openLogSeg);
@@ -1080,7 +1081,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
 					 (uint32) CheckPointSegments))
 				{
 					if (XLOG_DEBUG)
-						elog(LOG, "XLogWrite: time for a checkpoint, signaling postmaster");
+						elog(LOG, "time for a checkpoint, signaling postmaster");
 					SendPostmasterSignal(PMSIGNAL_DO_CHECKPOINT);
 				}
 			}
@@ -1099,8 +1100,10 @@ XLogWrite(XLogwrtRqst WriteRqst)
 		{
 			openLogOff = (LogwrtResult.Write.xrecoff - BLCKSZ) % XLogSegSize;
 			if (lseek(openLogFile, (off_t) openLogOff, SEEK_SET) < 0)
-				elog(PANIC, "lseek of log file %u, segment %u, offset %u failed: %m",
-					 openLogId, openLogSeg, openLogOff);
+				ereport(PANIC,
+						(errcode_for_file_access(),
+						 errmsg("lseek of log file %u, segment %u, offset %u failed: %m",
+								openLogId, openLogSeg, openLogOff)));
 		}
 
 		/* OK to write the page */
@@ -1111,8 +1114,10 @@ XLogWrite(XLogwrtRqst WriteRqst)
 			/* if write didn't set errno, assume problem is no disk space */
 			if (errno == 0)
 				errno = ENOSPC;
-			elog(PANIC, "write of log file %u, segment %u, offset %u failed: %m",
-				 openLogId, openLogSeg, openLogOff);
+			ereport(PANIC,
+					(errcode_for_file_access(),
+					 errmsg("write of log file %u, segment %u, offset %u failed: %m",
+							openLogId, openLogSeg, openLogOff)));
 		}
 		openLogOff += BLCKSZ;
 
@@ -1155,8 +1160,10 @@ XLogWrite(XLogwrtRqst WriteRqst)
 			 !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
 			{
 				if (close(openLogFile) != 0)
-					elog(PANIC, "close of log file %u, segment %u failed: %m",
-						 openLogId, openLogSeg);
+					ereport(PANIC,
+							(errcode_for_file_access(),
+							 errmsg("close of log file %u, segment %u failed: %m",
+									openLogId, openLogSeg)));
 				openLogFile = -1;
 			}
 			if (openLogFile < 0)
@@ -1214,13 +1221,10 @@ XLogFlush(XLogRecPtr record)
 		return;
 
 	if (XLOG_DEBUG)
-	{
-		elog(LOG, "XLogFlush%s: request %X/%X; write %X/%X; flush %X/%X",
-			 (IsBootstrapProcessingMode()) ? "(bootstrap)" : "",
+		elog(LOG, "xlog flush request %X/%X; write %X/%X; flush %X/%X",
 			 record.xlogid, record.xrecoff,
 			 LogwrtResult.Write.xlogid, LogwrtResult.Write.xrecoff,
 			 LogwrtResult.Flush.xlogid, LogwrtResult.Flush.xrecoff);
-	}
 
 	START_CRIT_SECTION();
 
@@ -1311,7 +1315,7 @@ XLogFlush(XLogRecPtr record)
 	 */
 	if (XLByteLT(LogwrtResult.Flush, record))
 		elog(InRecovery ? WARNING : ERROR,
-			 "XLogFlush: request %X/%X is not satisfied --- flushed only to %X/%X",
+			 "xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
 			 record.xlogid, record.xrecoff,
 			 LogwrtResult.Flush.xlogid, LogwrtResult.Flush.xrecoff);
 }
@@ -1354,8 +1358,10 @@ XLogFileInit(uint32 log, uint32 seg,
 		if (fd < 0)
 		{
 			if (errno != ENOENT)
-				elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
-					 path, log, seg);
+				ereport(PANIC,
+						(errcode_for_file_access(),
+						 errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+								path, log, seg)));
 		}
 		else
 			return (fd);
@@ -1376,7 +1382,9 @@ XLogFileInit(uint32 log, uint32 seg,
 	fd = BasicOpenFile(tmppath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
 					   S_IRUSR | S_IWUSR);
 	if (fd < 0)
-		elog(PANIC, "creation of file %s failed: %m", tmppath);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("creation of file \"%s\" failed: %m", tmppath)));
 
 	/*
 	 * Zero-fill the file.	We have to do this the hard way to ensure that
@@ -1403,12 +1411,16 @@ XLogFileInit(uint32 log, uint32 seg,
 			/* if write didn't set errno, assume problem is no disk space */
 			errno = save_errno ? save_errno : ENOSPC;
 
-			elog(PANIC, "ZeroFill failed to write %s: %m", tmppath);
+			ereport(PANIC,
+					(errcode_for_file_access(),
+					 errmsg("failed to write \"%s\": %m", tmppath)));
 		}
 	}
 
 	if (pg_fsync(fd) != 0)
-		elog(PANIC, "fsync of file %s failed: %m", tmppath);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("fsync of file \"%s\" failed: %m", tmppath)));
 
 	close(fd);
 
@@ -1435,8 +1447,10 @@ XLogFileInit(uint32 log, uint32 seg,
 	fd = BasicOpenFile(path, O_RDWR | PG_BINARY | XLOG_SYNC_BIT,
 					   S_IRUSR | S_IWUSR);
 	if (fd < 0)
-		elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
-			 path, log, seg);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+						path, log, seg)));
 
 	return (fd);
 }
@@ -1464,7 +1478,7 @@ XLogFileInit(uint32 log, uint32 seg,
  * caller must *not* hold the lock at call.
  *
  * Returns TRUE if file installed, FALSE if not installed because of
- * exceeding max_advance limit.  (Any other kind of failure causes elog().)
+ * exceeding max_advance limit.  (Any other kind of failure causes ereport().)
  */
 static bool
 InstallXLogFileSegment(uint32 log, uint32 seg, char *tmppath,
@@ -1511,13 +1525,17 @@ InstallXLogFileSegment(uint32 log, uint32 seg, char *tmppath,
 	 */
 #if HAVE_WORKING_LINK
 	if (link(tmppath, path) < 0)
-		elog(PANIC, "link from %s to %s (initialization of log file %u, segment %u) failed: %m",
-			 tmppath, path, log, seg);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("link from \"%s\" to \"%s\" (initialization of log file %u, segment %u) failed: %m",
+						tmppath, path, log, seg)));
 	unlink(tmppath);
 #else
 	if (rename(tmppath, path) < 0)
-		elog(PANIC, "rename from %s to %s (initialization of log file %u, segment %u) failed: %m",
-			 tmppath, path, log, seg);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("rename from \"%s\" to \"%s\" (initialization of log file %u, segment %u) failed: %m",
+						tmppath, path, log, seg)));
 #endif
 
 	if (use_lock)
@@ -1543,12 +1561,16 @@ XLogFileOpen(uint32 log, uint32 seg, bool econt)
 	{
 		if (econt && errno == ENOENT)
 		{
-			elog(LOG, "open of %s (log file %u, segment %u) failed: %m",
-				 path, log, seg);
+			ereport(LOG,
+					(errcode_for_file_access(),
+					 errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+							path, log, seg)));
 			return (fd);
 		}
-		elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
-			 path, log, seg);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+						path, log, seg)));
 	}
 
 	return (fd);
@@ -1597,8 +1619,10 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
 
 	xldir = opendir(XLogDir);
 	if (xldir == NULL)
-		elog(PANIC, "could not open transaction log directory (%s): %m",
-			 XLogDir);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("could not open transaction log directory \"%s\": %m",
+						XLogDir)));
 
 	sprintf(lastoff, "%08X%08X", log, seg);
 
@@ -1612,9 +1636,10 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
 			snprintf(path, MAXPGPATH, "%s/%s", XLogDir, xlde->d_name);
 			if (XLOG_archive_dir[0])
 			{
-				elog(LOG, "archiving transaction log file %s",
-					 xlde->d_name);
-				elog(WARNING, "archiving log files is not implemented!");
+				ereport(LOG,
+						(errmsg("archiving transaction log file \"%s\"",
+								xlde->d_name)));
+				elog(WARNING, "archiving log files is not implemented");
 			}
 			else
 			{
@@ -1628,14 +1653,16 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
 										   true, XLOGfileslop,
 										   true))
 				{
-					elog(LOG, "recycled transaction log file %s",
-						 xlde->d_name);
+					ereport(LOG,
+							(errmsg("recycled transaction log file \"%s\"",
+									xlde->d_name)));
 				}
 				else
 				{
 					/* No need for any more future segments... */
-					elog(LOG, "removing transaction log file %s",
-						 xlde->d_name);
+					ereport(LOG,
+							(errmsg("removing transaction log file \"%s\"",
+									xlde->d_name)));
 					unlink(path);
 				}
 			}
@@ -1643,8 +1670,10 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
 		errno = 0;
 	}
 	if (errno)
-		elog(PANIC, "could not read transaction log directory (%s): %m",
-			 XLogDir);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("could not read transaction log directory \"%s\": %m",
+						XLogDir)));
 	closedir(xldir);
 }
 
@@ -1716,8 +1745,9 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
 
 	if (!EQ_CRC64(record->xl_crc, crc))
 	{
-		elog(emode, "ReadRecord: bad resource manager data checksum in record at %X/%X",
-			 recptr.xlogid, recptr.xrecoff);
+		ereport(emode,
+				(errmsg("bad resource manager data checksum in record at %X/%X",
+						recptr.xlogid, recptr.xrecoff)));
 		return (false);
 	}
 
@@ -1738,8 +1768,9 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
 
 		if (!EQ_CRC64(cbuf, crc))
 		{
-			elog(emode, "ReadRecord: bad checksum of backup block %d in record at %X/%X",
-				 i + 1, recptr.xlogid, recptr.xrecoff);
+			ereport(emode,
+					(errmsg("bad checksum of backup block %d in record at %X/%X",
+							i + 1, recptr.xlogid, recptr.xrecoff)));
 			return (false);
 		}
 		blk += sizeof(BkpBlock) + BLCKSZ;
@@ -1807,8 +1838,9 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
 		tmpRecPtr.xrecoff += SizeOfXLogPHD;
 	}
 	else if (!XRecOffIsValid(RecPtr->xrecoff))
-		elog(PANIC, "ReadRecord: invalid record offset at %X/%X",
-			 RecPtr->xlogid, RecPtr->xrecoff);
+		ereport(PANIC,
+				(errmsg("invalid record offset at %X/%X",
+						RecPtr->xlogid, RecPtr->xrecoff)));
 
 	if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
 	{
@@ -1830,14 +1862,18 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
 		readOff = targetPageOff;
 		if (lseek(readFile, (off_t) readOff, SEEK_SET) < 0)
 		{
-			elog(emode, "ReadRecord: lseek of log file %u, segment %u, offset %u failed: %m",
-				 readId, readSeg, readOff);
+			ereport(emode,
+					(errcode_for_file_access(),
+					 errmsg("lseek of log file %u, segment %u, offset %u failed: %m",
+							readId, readSeg, readOff)));
 			goto next_record_is_invalid;
 		}
 		if (read(readFile, readBuf, BLCKSZ) != BLCKSZ)
 		{
-			elog(emode, "ReadRecord: read of log file %u, segment %u, offset %u failed: %m",
-				 readId, readSeg, readOff);
+			ereport(emode,
+					(errcode_for_file_access(),
+					 errmsg("read of log file %u, segment %u, offset %u failed: %m",
+							readId, readSeg, readOff)));
 			goto next_record_is_invalid;
 		}
 		if (!ValidXLOGHeader((XLogPageHeader) readBuf, emode, nextmode))
@@ -1846,8 +1882,9 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
 	if ((((XLogPageHeader) readBuf)->xlp_info & XLP_FIRST_IS_CONTRECORD) &&
 		RecPtr->xrecoff % BLCKSZ == SizeOfXLogPHD)
 	{
-		elog(emode, "ReadRecord: contrecord is requested by %X/%X",
-			 RecPtr->xlogid, RecPtr->xrecoff);
+		ereport(emode,
+				(errmsg("contrecord is requested by %X/%X",
+						RecPtr->xlogid, RecPtr->xrecoff)));
 		goto next_record_is_invalid;
 	}
 	record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ);
@@ -1860,8 +1897,9 @@ got_record:;
 	 */
 	if (record->xl_len == 0)
 	{
-		elog(emode, "ReadRecord: record with zero length at %X/%X",
-			 RecPtr->xlogid, RecPtr->xrecoff);
+		ereport(emode,
+				(errmsg("record with zero length at %X/%X",
+						RecPtr->xlogid, RecPtr->xrecoff)));
 		goto next_record_is_invalid;
 	}
 
@@ -1884,14 +1922,16 @@ got_record:;
 	 */
 	if (total_len > _INTL_MAXLOGRECSZ)
 	{
-		elog(emode, "ReadRecord: record length %u at %X/%X too long",
-			 total_len, RecPtr->xlogid, RecPtr->xrecoff);
+		ereport(emode,
+				(errmsg("record length %u at %X/%X too long",
+						total_len, RecPtr->xlogid, RecPtr->xrecoff)));
 		goto next_record_is_invalid;
 	}
 	if (record->xl_rmid > RM_MAX_ID)
 	{
-		elog(emode, "ReadRecord: invalid resource manager id %u at %X/%X",
-			 record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff);
+		ereport(emode,
+				(errmsg("invalid resource manager id %u at %X/%X",
+						record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
 		goto next_record_is_invalid;
 	}
 	nextRecord = NULL;
@@ -1920,24 +1960,29 @@ got_record:;
 			}
 			if (read(readFile, readBuf, BLCKSZ) != BLCKSZ)
 			{
-				elog(emode, "ReadRecord: read of log file %u, segment %u, offset %u failed: %m",
-					 readId, readSeg, readOff);
+				ereport(emode,
+						(errcode_for_file_access(),
+						 errmsg("read of log file %u, segment %u, offset %u failed: %m",
+								readId, readSeg, readOff)));
 				goto next_record_is_invalid;
 			}
 			if (!ValidXLOGHeader((XLogPageHeader) readBuf, emode, true))
 				goto next_record_is_invalid;
 			if (!(((XLogPageHeader) readBuf)->xlp_info & XLP_FIRST_IS_CONTRECORD))
 			{
-				elog(emode, "ReadRecord: there is no ContRecord flag in log file %u, segment %u, offset %u",
-					 readId, readSeg, readOff);
+				ereport(emode,
+						(errmsg("there is no contrecord flag in log file %u, segment %u, offset %u",
+								readId, readSeg, readOff)));
 				goto next_record_is_invalid;
 			}
 			contrecord = (XLogContRecord *) ((char *) readBuf + SizeOfXLogPHD);
 			if (contrecord->xl_rem_len == 0 ||
 				total_len != (contrecord->xl_rem_len + gotlen))
 			{
-				elog(emode, "ReadRecord: invalid ContRecord length %u in log file %u, segment %u, offset %u",
-					 contrecord->xl_rem_len, readId, readSeg, readOff);
+				ereport(emode,
+						(errmsg("invalid contrecord length %u in log file %u, segment %u, offset %u",
+								contrecord->xl_rem_len,
+								readId, readSeg, readOff)));
 				goto next_record_is_invalid;
 			}
 			len = BLCKSZ - SizeOfXLogPHD - SizeOfXLogContRecord;
@@ -2000,23 +2045,26 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
 
 	if (hdr->xlp_magic != XLOG_PAGE_MAGIC)
 	{
-		elog(emode, "ReadRecord: invalid magic number %04X in log file %u, segment %u, offset %u",
-			 hdr->xlp_magic, readId, readSeg, readOff);
+		ereport(emode,
+				(errmsg("invalid magic number %04X in log file %u, segment %u, offset %u",
+						hdr->xlp_magic, readId, readSeg, readOff)));
 		return false;
 	}
 	if ((hdr->xlp_info & ~XLP_ALL_FLAGS) != 0)
 	{
-		elog(emode, "ReadRecord: invalid info bits %04X in log file %u, segment %u, offset %u",
-			 hdr->xlp_info, readId, readSeg, readOff);
+		ereport(emode,
+				(errmsg("invalid info bits %04X in log file %u, segment %u, offset %u",
+						hdr->xlp_info, readId, readSeg, readOff)));
 		return false;
 	}
 	recaddr.xlogid = readId;
 	recaddr.xrecoff = readSeg * XLogSegSize + readOff;
 	if (!XLByteEQ(hdr->xlp_pageaddr, recaddr))
 	{
-		elog(emode, "ReadRecord: unexpected pageaddr %X/%X in log file %u, segment %u, offset %u",
-			 hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
-			 readId, readSeg, readOff);
+		ereport(emode,
+				(errmsg("unexpected pageaddr %X/%X in log file %u, segment %u, offset %u",
+						hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
+						readId, readSeg, readOff)));
 		return false;
 	}
 
@@ -2035,9 +2083,11 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
 		if (hdr->xlp_sui < lastReadSUI ||
 			hdr->xlp_sui > lastReadSUI + 512)
 		{
-			/* translator: SUI = startup id */
-			elog(emode, "ReadRecord: out-of-sequence SUI %u (after %u) in log file %u, segment %u, offset %u",
-				 hdr->xlp_sui, lastReadSUI, readId, readSeg, readOff);
+			ereport(emode,
+					/* translator: SUI = startup id */
+					(errmsg("out-of-sequence SUI %u (after %u) in log file %u, segment %u, offset %u",
+							hdr->xlp_sui, lastReadSUI,
+							readId, readSeg, readOff)));
 			return false;
 		}
 	}
@@ -2095,11 +2145,13 @@ WriteControlFile(void)
 	ControlFile->localeBuflen = LOCALE_NAME_BUFLEN;
 	localeptr = setlocale(LC_COLLATE, NULL);
 	if (!localeptr)
-		elog(PANIC, "invalid LC_COLLATE setting");
+		ereport(PANIC,
+				(errmsg("invalid LC_COLLATE setting")));
 	StrNCpy(ControlFile->lc_collate, localeptr, LOCALE_NAME_BUFLEN);
 	localeptr = setlocale(LC_CTYPE, NULL);
 	if (!localeptr)
-		elog(PANIC, "invalid LC_CTYPE setting");
+		ereport(PANIC,
+				(errmsg("invalid LC_CTYPE setting")));
 	StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
 
 	/* Contents are protected with a CRC */
@@ -2117,7 +2169,8 @@ WriteControlFile(void)
 	 * specific error than "couldn't read pg_control".
 	 */
 	if (sizeof(ControlFileData) > BLCKSZ)
-		elog(PANIC, "sizeof(ControlFileData) is larger than BLCKSZ; fix either one");
+		ereport(PANIC,
+				(errmsg("sizeof(ControlFileData) is larger than BLCKSZ; fix either one")));
 
 	memset(buffer, 0, BLCKSZ);
 	memcpy(buffer, ControlFile, sizeof(ControlFileData));
@@ -2125,8 +2178,10 @@ WriteControlFile(void)
 	fd = BasicOpenFile(ControlFilePath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
 					   S_IRUSR | S_IWUSR);
 	if (fd < 0)
-		elog(PANIC, "WriteControlFile: could not create control file (%s): %m",
-			 ControlFilePath);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("could not create control file \"%s\": %m",
+						ControlFilePath)));
 
 	errno = 0;
 	if (write(fd, buffer, BLCKSZ) != BLCKSZ)
@@ -2134,11 +2189,15 @@ WriteControlFile(void)
 		/* if write didn't set errno, assume problem is no disk space */
 		if (errno == 0)
 			errno = ENOSPC;
-		elog(PANIC, "WriteControlFile: write to control file failed: %m");
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("write to control file failed: %m")));
 	}
 
 	if (pg_fsync(fd) != 0)
-		elog(PANIC, "WriteControlFile: fsync of control file failed: %m");
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("fsync of control file failed: %m")));
 
 	close(fd);
 }
@@ -2154,10 +2213,15 @@ ReadControlFile(void)
 	 */
 	fd = BasicOpenFile(ControlFilePath, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
 	if (fd < 0)
-		elog(PANIC, "could not open control file (%s): %m", ControlFilePath);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("could not open control file \"%s\": %m",
+						ControlFilePath)));
 
 	if (read(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData))
-		elog(PANIC, "read from control file failed: %m");
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("read from control file failed: %m")));
 
 	close(fd);
 
@@ -2168,12 +2232,12 @@ ReadControlFile(void)
 	 * more enlightening than complaining about wrong CRC.
 	 */
 	if (ControlFile->pg_control_version != PG_CONTROL_VERSION)
-		elog(PANIC,
-			 "The database cluster was initialized with PG_CONTROL_VERSION %d,\n"
-			 "\tbut the server was compiled with PG_CONTROL_VERSION %d.\n"
-			 "\tIt looks like you need to initdb.",
-			 ControlFile->pg_control_version, PG_CONTROL_VERSION);
-
+		ereport(FATAL,
+				(errmsg("database files are incompatible with server"),
+				 errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
+						   " but the server was compiled with PG_CONTROL_VERSION %d.",
+						   ControlFile->pg_control_version, PG_CONTROL_VERSION),
+				 errhint("It looks like you need to initdb.")));
 	/* Now check the CRC. */
 	INIT_CRC64(crc);
 	COMP_CRC64(crc,
@@ -2182,7 +2246,8 @@ ReadControlFile(void)
 	FIN_CRC64(crc);
 
 	if (!EQ_CRC64(crc, ControlFile->crc))
-		elog(PANIC, "invalid checksum in control file");
+		ereport(FATAL,
+				(errmsg("invalid checksum in control file")));
 
 	/*
 	 * Do compatibility checking immediately.  We do this here for 2
@@ -2197,71 +2262,78 @@ ReadControlFile(void)
 	 * compatibility items because they can affect sort order of indexes.)
 	 */
 	if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
-		elog(PANIC,
-			 "The database cluster was initialized with CATALOG_VERSION_NO %d,\n"
-		   "\tbut the backend was compiled with CATALOG_VERSION_NO %d.\n"
-			 "\tIt looks like you need to initdb.",
-			 ControlFile->catalog_version_no, CATALOG_VERSION_NO);
+		ereport(FATAL,
+				(errmsg("database files are incompatible with server"),
+				 errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d,"
+						   " but the server was compiled with CATALOG_VERSION_NO %d.",
+						   ControlFile->catalog_version_no, CATALOG_VERSION_NO),
+				 errhint("It looks like you need to initdb.")));
 	if (ControlFile->blcksz != BLCKSZ)
-		elog(PANIC,
-			 "The database cluster was initialized with BLCKSZ %d,\n"
-			 "\tbut the backend was compiled with BLCKSZ %d.\n"
-			 "\tIt looks like you need to initdb.",
-			 ControlFile->blcksz, BLCKSZ);
+		ereport(FATAL,
+				(errmsg("database files are incompatible with server"),
+				 errdetail("The database cluster was initialized with BLCKSZ %d,"
+						   " but the server was compiled with BLCKSZ %d.",
+						   ControlFile->blcksz, BLCKSZ),
+				 errhint("It looks like you need to recompile or initdb.")));
 	if (ControlFile->relseg_size != RELSEG_SIZE)
-		elog(PANIC,
-			 "The database cluster was initialized with RELSEG_SIZE %d,\n"
-			 "\tbut the backend was compiled with RELSEG_SIZE %d.\n"
-			 "\tIt looks like you need to recompile or initdb.",
-			 ControlFile->relseg_size, RELSEG_SIZE);
-
+		ereport(FATAL,
+				(errmsg("database files are incompatible with server"),
+				 errdetail("The database cluster was initialized with RELSEG_SIZE %d,"
+						   " but the server was compiled with RELSEG_SIZE %d.",
+						   ControlFile->relseg_size, RELSEG_SIZE),
+				 errhint("It looks like you need to recompile or initdb.")));
 	if (ControlFile->nameDataLen != NAMEDATALEN)
-		elog(PANIC,
-			 "The database cluster was initialized with NAMEDATALEN %d,\n"
-			 "\tbut the backend was compiled with NAMEDATALEN %d.\n"
-			 "\tIt looks like you need to recompile or initdb.",
-			 ControlFile->nameDataLen, NAMEDATALEN);
-
+		ereport(FATAL,
+				(errmsg("database files are incompatible with server"),
+				 errdetail("The database cluster was initialized with NAMEDATALEN %d,"
+						   " but the server was compiled with NAMEDATALEN %d.",
+						   ControlFile->nameDataLen, NAMEDATALEN),
+				 errhint("It looks like you need to recompile or initdb.")));
 	if (ControlFile->funcMaxArgs != FUNC_MAX_ARGS)
-		elog(PANIC,
-		  "The database cluster was initialized with FUNC_MAX_ARGS %d,\n"
-			 "\tbut the backend was compiled with FUNC_MAX_ARGS %d.\n"
-			 "\tIt looks like you need to recompile or initdb.",
-			 ControlFile->funcMaxArgs, FUNC_MAX_ARGS);
+		ereport(FATAL,
+				(errmsg("database files are incompatible with server"),
+				 errdetail("The database cluster was initialized with FUNC_MAX_ARGS %d,"
+						   " but the server was compiled with FUNC_MAX_ARGS %d.",
+						   ControlFile->funcMaxArgs, FUNC_MAX_ARGS),
+				 errhint("It looks like you need to recompile or initdb.")));
 
 #ifdef HAVE_INT64_TIMESTAMP
 	if (ControlFile->enableIntTimes != TRUE)
-		elog(PANIC,
-			 "The database cluster was initialized without HAVE_INT64_TIMESTAMP\n"
-			 "\tbut the backend was compiled with HAVE_INT64_TIMESTAMP.\n"
-			 "\tIt looks like you need to recompile or initdb.");
+		ereport(FATAL,
+				(errmsg("database files are incompatible with server"),
+				 errdetail("The database cluster was initialized without HAVE_INT64_TIMESTAMP"
+						   " but the server was compiled with HAVE_INT64_TIMESTAMP."),
+				 errhint("It looks like you need to recompile or initdb.")));
 #else
 	if (ControlFile->enableIntTimes != FALSE)
-		elog(PANIC,
-		"The database cluster was initialized with HAVE_INT64_TIMESTAMP\n"
-		 "\tbut the backend was compiled without HAVE_INT64_TIMESTAMP.\n"
-			 "\tIt looks like you need to recompile or initdb.");
+		ereport(FATAL,
+				(errmsg("database files are incompatible with server"),
+				 errdetail("The database cluster was initialized with HAVE_INT64_TIMESTAMP"
+						   " but the server was compiled without HAVE_INT64_TIMESTAMP."),
+				 errhint("It looks like you need to recompile or initdb.")));
 #endif
 
 	if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN)
-		elog(PANIC,
-			 "The database cluster was initialized with LOCALE_NAME_BUFLEN %d,\n"
-		   "\tbut the backend was compiled with LOCALE_NAME_BUFLEN %d.\n"
-			 "\tIt looks like you need to initdb.",
-			 ControlFile->localeBuflen, LOCALE_NAME_BUFLEN);
-
+		ereport(FATAL,
+				(errmsg("database files are incompatible with server"),
+				 errdetail("The database cluster was initialized with LOCALE_NAME_BUFLEN %d,"
+						   " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
+						   ControlFile->localeBuflen, LOCALE_NAME_BUFLEN),
+				 errhint("It looks like you need to recompile or initdb.")));
 	if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
-		elog(PANIC,
-		   "The database cluster was initialized with LC_COLLATE '%s',\n"
-			 "\twhich is not recognized by setlocale().\n"
-			 "\tIt looks like you need to initdb.",
-			 ControlFile->lc_collate);
+		ereport(FATAL,
+				(errmsg("database files are incompatible with operating system"),
+				 errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
+						   " which is not recognized by setlocale().",
+						   ControlFile->lc_collate),
+				 errhint("It looks like you need to initdb or install locale support.")));
 	if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL)
-		elog(PANIC,
-			 "The database cluster was initialized with LC_CTYPE '%s',\n"
-			 "\twhich is not recognized by setlocale().\n"
-			 "\tIt looks like you need to initdb.",
-			 ControlFile->lc_ctype);
+		ereport(FATAL,
+				(errmsg("database files are incompatible with operating system"),
+				 errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
+						   " which is not recognized by setlocale().",
+						   ControlFile->lc_ctype),
+				 errhint("It looks like you need to initdb or install locale support.")));
 
 	/* Make the fixed locale settings visible as GUC variables, too */
 	SetConfigOption("lc_collate", ControlFile->lc_collate,
@@ -2283,7 +2355,10 @@ UpdateControlFile(void)
 
 	fd = BasicOpenFile(ControlFilePath, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
 	if (fd < 0)
-		elog(PANIC, "could not open control file (%s): %m", ControlFilePath);
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("could not open control file \"%s\": %m",
+						ControlFilePath)));
 
 	errno = 0;
 	if (write(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData))
@@ -2291,11 +2366,15 @@ UpdateControlFile(void)
 		/* if write didn't set errno, assume problem is no disk space */
 		if (errno == 0)
 			errno = ENOSPC;
-		elog(PANIC, "write to control file failed: %m");
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("write to control file failed: %m")));
 	}
 
 	if (pg_fsync(fd) != 0)
-		elog(PANIC, "fsync of control file failed: %m");
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("fsync of control file failed: %m")));
 
 	close(fd);
 }
@@ -2435,11 +2514,15 @@ BootStrapXLOG(void)
 		/* if write didn't set errno, assume problem is no disk space */
 		if (errno == 0)
 			errno = ENOSPC;
-		elog(PANIC, "BootStrapXLOG failed to write log file: %m");
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("failed to write bootstrap xlog file: %m")));
 	}
 
 	if (pg_fsync(openLogFile) != 0)
-		elog(PANIC, "BootStrapXLOG failed to fsync log file: %m");
+		ereport(PANIC,
+				(errcode_for_file_access(),
+				 errmsg("failed to fsync bootstrap xlog file: %m")));
 
 	close(openLogFile);
 	openLogFile = -1;
@@ -2506,22 +2589,27 @@ StartupXLOG(void)
 		ControlFile->state < DB_SHUTDOWNED ||
 		ControlFile->state > DB_IN_PRODUCTION ||
 		!XRecOffIsValid(ControlFile->checkPoint.xrecoff))
-		elog(PANIC, "control file context is broken");
+		ereport(FATAL,
+				(errmsg("control file contains invalid data")));
 
 	if (ControlFile->state == DB_SHUTDOWNED)
-		elog(LOG, "database system was shut down at %s",
-			 str_time(ControlFile->time));
+		ereport(LOG,
+				(errmsg("database system was shut down at %s",
+						str_time(ControlFile->time))));
 	else if (ControlFile->state == DB_SHUTDOWNING)
-		elog(LOG, "database system shutdown was interrupted at %s",
-			 str_time(ControlFile->time));
+		ereport(LOG,
+				(errmsg("database system shutdown was interrupted at %s",
+						str_time(ControlFile->time))));
 	else if (ControlFile->state == DB_IN_RECOVERY)
-		elog(LOG, "database system was interrupted being in recovery at %s\n"
-			 "\tThis probably means that some data blocks are corrupted\n"
-			 "\tand you will have to use the last backup for recovery.",
-			 str_time(ControlFile->time));
+		ereport(LOG,
+				(errmsg("database system was interrupted while in recovery at %s",
+						str_time(ControlFile->time)),
+				 errhint("This probably means that some data is corrupted and"
+						 " you will have to use the last backup for recovery.")));
 	else if (ControlFile->state == DB_IN_PRODUCTION)
-		elog(LOG, "database system was interrupted at %s",
-			 str_time(ControlFile->time));
+		ereport(LOG,
+				(errmsg("database system was interrupted at %s",
+						str_time(ControlFile->time))));
 
 	/* This is just to allow attaching to startup process with a debugger */
 #ifdef XLOG_REPLAY_DELAY
@@ -2537,8 +2625,9 @@ StartupXLOG(void)
 	if (record != NULL)
 	{
 		checkPointLoc = ControlFile->checkPoint;
-		elog(LOG, "checkpoint record is at %X/%X",
-			 checkPointLoc.xlogid, checkPointLoc.xrecoff);
+		ereport(LOG,
+				(errmsg("checkpoint record is at %X/%X",
+						checkPointLoc.xlogid, checkPointLoc.xrecoff)));
 	}
 	else
 	{
@@ -2546,25 +2635,30 @@ StartupXLOG(void)
 		if (record != NULL)
 		{
 			checkPointLoc = ControlFile->prevCheckPoint;
-			elog(LOG, "using previous checkpoint record at %X/%X",
-				 checkPointLoc.xlogid, checkPointLoc.xrecoff);
+			ereport(LOG,
+					(errmsg("using previous checkpoint record at %X/%X",
+							checkPointLoc.xlogid, checkPointLoc.xrecoff)));
 			InRecovery = true;	/* force recovery even if SHUTDOWNED */
 		}
 		else
-			elog(PANIC, "unable to locate a valid checkpoint record");
+			ereport(PANIC,
+					(errmsg("unable to locate a valid checkpoint record")));
 	}
 	LastRec = RecPtr = checkPointLoc;
 	memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
 	wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
 
-	elog(LOG, "redo record is at %X/%X; undo record is at %X/%X; shutdown %s",
-		 checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
-		 checkPoint.undo.xlogid, checkPoint.undo.xrecoff,
-		 wasShutdown ? "TRUE" : "FALSE");
-	elog(LOG, "next transaction id: %u; next oid: %u",
-		 checkPoint.nextXid, checkPoint.nextOid);
+	ereport(LOG,
+			(errmsg("redo record is at %X/%X; undo record is at %X/%X; shutdown %s",
+					checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
+					checkPoint.undo.xlogid, checkPoint.undo.xrecoff,
+					wasShutdown ? "TRUE" : "FALSE")));
+	ereport(LOG,
+			(errmsg("next transaction id: %u; next oid: %u",
+					checkPoint.nextXid, checkPoint.nextOid)));
 	if (!TransactionIdIsNormal(checkPoint.nextXid))
-		elog(PANIC, "invalid next transaction id");
+		ereport(PANIC,
+				(errmsg("invalid next transaction id")));
 
 	ShmemVariableCache->nextXid = checkPoint.nextXid;
 	ShmemVariableCache->nextOid = checkPoint.nextOid;
@@ -2586,7 +2680,8 @@ StartupXLOG(void)
 		XLogCtl->SavedRedoRecPtr = checkPoint.redo;
 
 	if (XLByteLT(RecPtr, checkPoint.redo))
-		elog(PANIC, "invalid redo in checkpoint record");
+		ereport(PANIC,
+				(errmsg("invalid redo in checkpoint record")));
 	if (checkPoint.undo.xrecoff == 0)
 		checkPoint.undo = RecPtr;
 
@@ -2594,7 +2689,8 @@ StartupXLOG(void)
 		XLByteLT(checkPoint.redo, RecPtr))
 	{
 		if (wasShutdown)
-			elog(PANIC, "invalid redo/undo record in shutdown checkpoint");
+			ereport(PANIC,
+					(errmsg("invalid redo/undo record in shutdown checkpoint")));
 		InRecovery = true;
 	}
 	else if (ControlFile->state != DB_SHUTDOWNED)
@@ -2605,8 +2701,9 @@ StartupXLOG(void)
 	{
 		int		rmid;
 
-		elog(LOG, "database system was not properly shut down; "
-			 "automatic recovery in progress");
+		ereport(LOG,
+				(errmsg("database system was not properly shut down; "
+						"automatic recovery in progress")));
 		ControlFile->state = DB_IN_RECOVERY;
 		ControlFile->time = time(NULL);
 		UpdateControlFile();
@@ -2632,8 +2729,9 @@ StartupXLOG(void)
 		if (record != NULL)
 		{
 			InRedo = true;
-			elog(LOG, "redo starts at %X/%X",
-				 ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
+			ereport(LOG,
+					(errmsg("redo starts at %X/%X",
+							ReadRecPtr.xlogid, ReadRecPtr.xrecoff)));
 			do
 			{
 				/* nextXid must be beyond record's xid */
@@ -2663,13 +2761,15 @@ StartupXLOG(void)
 				RmgrTable[record->xl_rmid].rm_redo(EndRecPtr, record);
 				record = ReadRecord(NULL, LOG, buffer);
 			} while (record != NULL);
-			elog(LOG, "redo done at %X/%X",
-				 ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
+			ereport(LOG,
+					(errmsg("redo done at %X/%X",
+							ReadRecPtr.xlogid, ReadRecPtr.xrecoff)));
 			LastRec = ReadRecPtr;
 			InRedo = false;
 		}
 		else
-			elog(LOG, "redo is not required");
+			ereport(LOG,
+					(errmsg("redo is not required")));
 	}
 
 	/*
@@ -2736,8 +2836,9 @@ StartupXLOG(void)
 		RecPtr = ReadRecPtr;
 		if (XLByteLT(checkPoint.undo, RecPtr))
 		{
-			elog(LOG, "undo starts at %X/%X",
-				 RecPtr.xlogid, RecPtr.xrecoff);
+			ereport(LOG,
+					(errmsg("undo starts at %X/%X",
+							RecPtr.xlogid, RecPtr.xrecoff)));
 			do
 			{
 				record = ReadRecord(&RecPtr, PANIC, buffer);
@@ -2746,11 +2847,13 @@ StartupXLOG(void)
 					RmgrTable[record->xl_rmid].rm_undo(EndRecPtr, record);
 				RecPtr = record->xl_prev;
 			} while (XLByteLE(checkPoint.undo, RecPtr));
-			elog(LOG, "undo done at %X/%X",
-				 ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
+			ereport(LOG,
+					(errmsg("undo done at %X/%X",
+							ReadRecPtr.xlogid, ReadRecPtr.xrecoff)));
 		}
 		else
-			elog(LOG, "undo is not required");
+			ereport(LOG,
+					(errmsg("undo is not required")));
 	}
 #endif
 
@@ -2837,7 +2940,8 @@ StartupXLOG(void)
 	/* Start up the commit log, too */
 	StartupCLOG();
 
-	elog(LOG, "database system is ready");
+	ereport(LOG,
+			(errmsg("database system is ready")));
 	CritSectionCount--;
 
 	/* Shut down readFile facility, free space */
@@ -2868,9 +2972,10 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
 
 	if (!XRecOffIsValid(RecPtr.xrecoff))
 	{
-		elog(LOG, (whichChkpt == 1 ?
-				   "invalid primary checkpoint link in control file" :
-				   "invalid secondary checkpoint link in control file"));
+		ereport(LOG,
+				/* translator: %s is "primary" or "secondary" */
+				(errmsg("invalid %s checkpoint link in control file",
+						(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
 		return NULL;
 	}
 
@@ -2878,31 +2983,35 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
 
 	if (record == NULL)
 	{
-		elog(LOG, (whichChkpt == 1 ?
-				   "invalid primary checkpoint record" :
-				   "invalid secondary checkpoint record"));
+		ereport(LOG,
+				/* translator: %s is "primary" or "secondary" */
+				(errmsg("invalid %s checkpoint record",
+						(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
 		return NULL;
 	}
 	if (record->xl_rmid != RM_XLOG_ID)
 	{
-		elog(LOG, (whichChkpt == 1 ?
-			 "invalid resource manager id in primary checkpoint record" :
-		  "invalid resource manager id in secondary checkpoint record"));
+		ereport(LOG,
+				/* translator: %s is "primary" or "secondary" */
+				(errmsg("invalid resource manager id in %s checkpoint record",
+						(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
 		return NULL;
 	}
 	if (record->xl_info != XLOG_CHECKPOINT_SHUTDOWN &&
 		record->xl_info != XLOG_CHECKPOINT_ONLINE)
 	{
-		elog(LOG, (whichChkpt == 1 ?
-				   "invalid xl_info in primary checkpoint record" :
-				   "invalid xl_info in secondary checkpoint record"));
+		ereport(LOG,
+				/* translator: %s is "primary" or "secondary" */
+				(errmsg("invalid xl_info in %s checkpoint record",
+						(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
 		return NULL;
 	}
 	if (record->xl_len != sizeof(CheckPoint))
 	{
-		elog(LOG, (whichChkpt == 1 ?
-				   "invalid length of primary checkpoint record" :
-				   "invalid length of secondary checkpoint record"));
+		ereport(LOG,
+				/* translator: %s is "primary" or "secondary" */
+				(errmsg("invalid length of %s checkpoint record",
+						(whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
 		return NULL;
 	}
 	return record;
@@ -2965,7 +3074,8 @@ GetRedoRecPtr(void)
 void
 ShutdownXLOG(void)
 {
-	elog(LOG, "shutting down");
+	ereport(LOG,
+			(errmsg("shutting down")));
 
 	/* suppress in-transaction check in CreateCheckPoint */
 	MyLastRecPtr.xrecoff = 0;
@@ -2978,7 +3088,8 @@ ShutdownXLOG(void)
 	ShutdownCLOG();
 	CritSectionCount--;
 
-	elog(LOG, "database system is shut down");
+	ereport(LOG,
+			(errmsg("database system is shut down")));
 }
 
 /*
@@ -2999,7 +3110,9 @@ CreateCheckPoint(bool shutdown, bool force)
 	uint32		_logSeg;
 
 	if (MyXactMadeXLogEntry)
-		elog(ERROR, "CreateCheckPoint: cannot be called inside transaction block");
+		ereport(ERROR,
+				(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+				 errmsg("checkpoint cannot be made inside transaction block")));
 
 	/*
 	 * Acquire CheckpointLock to ensure only one checkpoint happens at a time.
@@ -3183,7 +3296,8 @@ CreateCheckPoint(bool shutdown, bool force)
 	 * recptr = end of actual checkpoint record.
 	 */
 	if (shutdown && !XLByteEQ(checkPoint.redo, ProcLastRecPtr))
-		elog(PANIC, "concurrent transaction log activity while database system is shutting down");
+		ereport(PANIC,
+				(errmsg("concurrent transaction log activity while database system is shutting down")));
 
 	/*
 	 * Select point at which we can truncate the log, which we base on the
@@ -3422,13 +3536,17 @@ assign_xlog_sync_method(const char *method, bool doit, bool interactive)
 		if (openLogFile >= 0)
 		{
 			if (pg_fsync(openLogFile) != 0)
-				elog(PANIC, "fsync of log file %u, segment %u failed: %m",
-					 openLogId, openLogSeg);
+				ereport(PANIC,
+						(errcode_for_file_access(),
+						 errmsg("fsync of log file %u, segment %u failed: %m",
+								openLogId, openLogSeg)));
 			if (open_sync_bit != new_sync_bit)
 			{
 				if (close(openLogFile) != 0)
-					elog(PANIC, "close of log file %u, segment %u failed: %m",
-						 openLogId, openLogSeg);
+					ereport(PANIC,
+							(errcode_for_file_access(),
+							 errmsg("close of log file %u, segment %u failed: %m",
+									openLogId, openLogSeg)));
 				openLogFile = -1;
 			}
 		}
@@ -3450,21 +3568,25 @@ issue_xlog_fsync(void)
 	{
 		case SYNC_METHOD_FSYNC:
 			if (pg_fsync(openLogFile) != 0)
-				elog(PANIC, "fsync of log file %u, segment %u failed: %m",
-					 openLogId, openLogSeg);
+				ereport(PANIC,
+						(errcode_for_file_access(),
+						 errmsg("fsync of log file %u, segment %u failed: %m",
+								openLogId, openLogSeg)));
 			break;
 #ifdef HAVE_FDATASYNC
 		case SYNC_METHOD_FDATASYNC:
 			if (pg_fdatasync(openLogFile) != 0)
-				elog(PANIC, "fdatasync of log file %u, segment %u failed: %m",
-					 openLogId, openLogSeg);
+				ereport(PANIC,
+						(errcode_for_file_access(),
+						 errmsg("fdatasync of log file %u, segment %u failed: %m",
+								openLogId, openLogSeg)));
 			break;
 #endif
 		case SYNC_METHOD_OPEN:
 			/* write synced it already */
 			break;
 		default:
-			elog(PANIC, "bogus wal_sync_method %d", sync_method);
+			elog(PANIC, "unrecognized wal_sync_method: %d", sync_method);
 			break;
 	}
 }
diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h
index 438612db449..7b08854ebba 100644
--- a/src/include/utils/elog.h
+++ b/src/include/utils/elog.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: elog.h,v 1.52 2003/07/21 17:05:11 tgl Exp $
+ * $Id: elog.h,v 1.53 2003/07/21 20:29:39 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -177,6 +177,7 @@
 #define ERRCODE_NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION	MAKE_SQLSTATE('2','5', '0','0','5')
 #define ERRCODE_READ_ONLY_SQL_TRANSACTION	MAKE_SQLSTATE('2','5', '0','0','6')
 #define ERRCODE_SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED	MAKE_SQLSTATE('2','5', '0','0','7')
+#define ERRCODE_NO_ACTIVE_SQL_TRANSACTION	MAKE_SQLSTATE('2','5', 'P','0','1')
 
 /* Class 26 - Invalid SQL Statement Name */
 /* (we take this to mean prepared statements) */
@@ -308,6 +309,7 @@
 #define ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE	MAKE_SQLSTATE('5','5', '0','0','0')
 #define ERRCODE_OBJECT_IN_USE				MAKE_SQLSTATE('5','5', '0','0','6')
 #define ERRCODE_INDEXES_DEACTIVATED			MAKE_SQLSTATE('5','5', 'P','0','1')
+#define ERRCODE_INDEX_CORRUPTED				MAKE_SQLSTATE('5','5', 'P','0','2')
 
 /* Class 57 - Operator Intervention (class borrowed from DB2) */
 #define ERRCODE_OPERATOR_INTERVENTION		MAKE_SQLSTATE('5','7', '0','0','0')
diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out
index ea4524dd545..677dce17ea5 100644
--- a/src/test/regress/expected/alter_table.out
+++ b/src/test/regress/expected/alter_table.out
@@ -475,7 +475,7 @@ NOTICE:  ALTER TABLE / ADD UNIQUE will create implicit index "atacc_test1" for t
 insert into atacc1 (test) values (2);
 -- should fail
 insert into atacc1 (test) values (2);
-ERROR:  Cannot insert a duplicate key into unique index atacc_test1
+ERROR:  duplicate key violates UNIQUE constraint "atacc_test1"
 -- should succeed
 insert into atacc1 (test) values (4);
 -- try adding a unique oid constraint
@@ -509,7 +509,7 @@ NOTICE:  ALTER TABLE / ADD UNIQUE will create implicit index "atacc_test1" for t
 insert into atacc1 (test,test2) values (4,4);
 -- should fail
 insert into atacc1 (test,test2) values (4,4);
-ERROR:  Cannot insert a duplicate key into unique index atacc_test1
+ERROR:  duplicate key violates UNIQUE constraint "atacc_test1"
 -- should all succeed
 insert into atacc1 (test,test2) values (4,5);
 insert into atacc1 (test,test2) values (5,4);
@@ -523,7 +523,7 @@ NOTICE:  ALTER TABLE / ADD UNIQUE will create implicit index "atacc1_test2_key"
 -- should fail for @@ second one @@
 insert into atacc1 (test2, test) values (3, 3);
 insert into atacc1 (test2, test) values (2, 3);
-ERROR:  Cannot insert a duplicate key into unique index atacc1_test_key
+ERROR:  duplicate key violates UNIQUE constraint "atacc1_test_key"
 drop table atacc1;
 -- test primary key constraint adding
 create table atacc1 ( test int );
@@ -534,7 +534,7 @@ NOTICE:  ALTER TABLE / ADD PRIMARY KEY will create implicit index "atacc_test1"
 insert into atacc1 (test) values (2);
 -- should fail
 insert into atacc1 (test) values (2);
-ERROR:  Cannot insert a duplicate key into unique index atacc_test1
+ERROR:  duplicate key violates UNIQUE constraint "atacc_test1"
 -- should succeed
 insert into atacc1 (test) values (4);
 -- inserting NULL should fail
@@ -589,7 +589,7 @@ ERROR:  multiple primary keys for table "atacc1" are not allowed
 insert into atacc1 (test,test2) values (4,4);
 -- should fail
 insert into atacc1 (test,test2) values (4,4);
-ERROR:  Cannot insert a duplicate key into unique index atacc_test1
+ERROR:  duplicate key violates UNIQUE constraint "atacc_test1"
 insert into atacc1 (test,test2) values (NULL,3);
 ERROR:  null value for attribute "test" violates NOT NULL constraint
 insert into atacc1 (test,test2) values (3, NULL);
@@ -607,7 +607,7 @@ NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "atacc1_pkey" for
 -- only first should succeed
 insert into atacc1 (test2, test) values (3, 3);
 insert into atacc1 (test2, test) values (2, 3);
-ERROR:  Cannot insert a duplicate key into unique index atacc1_pkey
+ERROR:  duplicate key violates UNIQUE constraint "atacc1_pkey"
 insert into atacc1 (test2, test) values (1, NULL);
 ERROR:  null value for attribute "test" violates NOT NULL constraint
 drop table atacc1;
diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out
index 84d87e701e1..ee08082d5bd 100644
--- a/src/test/regress/expected/arrays.out
+++ b/src/test/regress/expected/arrays.out
@@ -363,7 +363,7 @@ insert into arr_tbl values ('{1,2,3}');
 insert into arr_tbl values ('{1,2}');
 -- failure expected:
 insert into arr_tbl values ('{1,2,3}');
-ERROR:  Cannot insert a duplicate key into unique index arr_tbl_f1_key
+ERROR:  duplicate key violates UNIQUE constraint "arr_tbl_f1_key"
 insert into arr_tbl values ('{2,3,4}');
 insert into arr_tbl values ('{1,5,3}');
 insert into arr_tbl values ('{1,2,10}');
diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out
index 81e9672b05e..67a777e9d04 100644
--- a/src/test/regress/expected/create_index.out
+++ b/src/test/regress/expected/create_index.out
@@ -78,7 +78,7 @@ INSERT INTO func_index_heap VALUES('AB','CDEFG');
 INSERT INTO func_index_heap VALUES('QWE','RTY');
 -- this should fail because of unique index:
 INSERT INTO func_index_heap VALUES('ABCD', 'EF');
-ERROR:  Cannot insert a duplicate key into unique index func_index_index
+ERROR:  duplicate key violates UNIQUE constraint "func_index_index"
 -- but this shouldn't:
 INSERT INTO func_index_heap VALUES('QWERTY');
 --
@@ -92,7 +92,7 @@ INSERT INTO func_index_heap VALUES('AB','CDEFG');
 INSERT INTO func_index_heap VALUES('QWE','RTY');
 -- this should fail because of unique index:
 INSERT INTO func_index_heap VALUES('ABCD', 'EF');
-ERROR:  Cannot insert a duplicate key into unique index func_index_index
+ERROR:  duplicate key violates UNIQUE constraint "func_index_index"
 -- but this shouldn't:
 INSERT INTO func_index_heap VALUES('QWERTY');
 --
diff --git a/src/test/regress/expected/errors.out b/src/test/regress/expected/errors.out
index 132e2ad709d..2a6c5ff044f 100644
--- a/src/test/regress/expected/errors.out
+++ b/src/test/regress/expected/errors.out
@@ -99,10 +99,10 @@ ERROR:  attribute "oid" of relation "stud_emp" already exists
  
 -- not in a xact 
 abort;
-WARNING:  ROLLBACK: no transaction in progress
+WARNING:  there is no transaction in progress
 -- not in a xact 
 end;
-WARNING:  COMMIT: no transaction in progress
+WARNING:  there is no transaction in progress
 --
 -- CREATE AGGREGATE
 -- sfunc/finalfunc type disagreement 
diff --git a/src/test/regress/expected/plpgsql.out b/src/test/regress/expected/plpgsql.out
index 6ef512a218d..4e6e43cfa87 100644
--- a/src/test/regress/expected/plpgsql.out
+++ b/src/test/regress/expected/plpgsql.out
@@ -1515,7 +1515,7 @@ select * from PField_v1 where pfname = 'PF0_2' order by slotname;
 -- Finally we want errors
 --
 insert into PField values ('PF1_1', 'should fail due to unique index');
-ERROR:  Cannot insert a duplicate key into unique index pfield_name
+ERROR:  duplicate key violates UNIQUE constraint "pfield_name"
 update PSlot set backlink = 'WS.not.there' where slotname = 'PS.base.a1';
 ERROR:  WS.not.there         does not exist
 CONTEXT:  PL/pgSQL function tg_backlink_a line 16 at assignment
@@ -1529,7 +1529,7 @@ update PSlot set slotlink = 'XX.illegal' where slotname = 'PS.base.a1';
 ERROR:  illegal slotlink beginning with XX
 CONTEXT:  PL/pgSQL function tg_slotlink_a line 16 at assignment
 insert into HSlot values ('HS', 'base.hub1', 1, '');
-ERROR:  Cannot insert a duplicate key into unique index hslot_name
+ERROR:  duplicate key violates UNIQUE constraint "hslot_name"
 insert into HSlot values ('HS', 'base.hub1', 20, '');
 ERROR:  no manual manipulation of HSlot
 delete from HSlot;
diff --git a/src/test/regress/output/constraints.source b/src/test/regress/output/constraints.source
index c35bc8b3f25..773e2fda525 100644
--- a/src/test/regress/output/constraints.source
+++ b/src/test/regress/output/constraints.source
@@ -290,7 +290,7 @@ NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "primary_tbl_pkey
 INSERT INTO PRIMARY_TBL VALUES (1, 'one');
 INSERT INTO PRIMARY_TBL VALUES (2, 'two');
 INSERT INTO PRIMARY_TBL VALUES (1, 'three');
-ERROR:  Cannot insert a duplicate key into unique index primary_tbl_pkey
+ERROR:  duplicate key violates UNIQUE constraint "primary_tbl_pkey"
 INSERT INTO PRIMARY_TBL VALUES (4, 'three');
 INSERT INTO PRIMARY_TBL VALUES (5, 'one');
 INSERT INTO PRIMARY_TBL (t) VALUES ('six');
@@ -334,7 +334,7 @@ NOTICE:  CREATE TABLE / UNIQUE will create implicit index "unique_tbl_i_key" for
 INSERT INTO UNIQUE_TBL VALUES (1, 'one');
 INSERT INTO UNIQUE_TBL VALUES (2, 'two');
 INSERT INTO UNIQUE_TBL VALUES (1, 'three');
-ERROR:  Cannot insert a duplicate key into unique index unique_tbl_i_key
+ERROR:  duplicate key violates UNIQUE constraint "unique_tbl_i_key"
 INSERT INTO UNIQUE_TBL VALUES (4, 'four');
 INSERT INTO UNIQUE_TBL VALUES (5, 'one');
 INSERT INTO UNIQUE_TBL (t) VALUES ('six');
@@ -358,7 +358,7 @@ INSERT INTO UNIQUE_TBL VALUES (1, 'one');
 INSERT INTO UNIQUE_TBL VALUES (2, 'two');
 INSERT INTO UNIQUE_TBL VALUES (1, 'three');
 INSERT INTO UNIQUE_TBL VALUES (1, 'one');
-ERROR:  Cannot insert a duplicate key into unique index unique_tbl_i_key
+ERROR:  duplicate key violates UNIQUE constraint "unique_tbl_i_key"
 INSERT INTO UNIQUE_TBL VALUES (5, 'one');
 INSERT INTO UNIQUE_TBL (t) VALUES ('six');
 SELECT '' AS five, * FROM UNIQUE_TBL;
-- 
GitLab