diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 9f1bcf1de4ab665dd703f5dae558f1a5864d62d4..06db65d76f99b5cfad483fe03932ec17da9acfee 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2028,7 +2028,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
 	 * the heaptup data structure is all in local memory, not in the shared
 	 * buffer.
 	 */
-	CacheInvalidateHeapTuple(relation, heaptup);
+	CacheInvalidateHeapTuple(relation, heaptup, NULL);
 
 	pgstat_count_heap_insert(relation);
 
@@ -2354,7 +2354,7 @@ l1:
 	 * boundary. We have to do this before releasing the buffer because we
 	 * need to look at the contents of the tuple.
 	 */
-	CacheInvalidateHeapTuple(relation, &tp);
+	CacheInvalidateHeapTuple(relation, &tp, NULL);
 
 	/* Now we can release the buffer */
 	ReleaseBuffer(buffer);
@@ -2930,10 +2930,13 @@ l2:
 
 	/*
 	 * Mark old tuple for invalidation from system caches at next command
-	 * boundary. We have to do this before releasing the buffer because we
-	 * need to look at the contents of the tuple.
+	 * boundary, and mark the new tuple for invalidation in case we abort.
+	 * We have to do this before releasing the buffer because oldtup is in
+	 * the buffer.  (heaptup is all in local memory, but it's necessary to
+	 * process both tuple versions in one call to inval.c so we can avoid
+	 * redundant sinval messages.)
 	 */
-	CacheInvalidateHeapTuple(relation, &oldtup);
+	CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
 
 	/* Now we can release the buffer(s) */
 	if (newbuf != buffer)
@@ -2944,14 +2947,6 @@ l2:
 	if (BufferIsValid(vmbuffer))
 		ReleaseBuffer(vmbuffer);
 
-	/*
-	 * If new tuple is cachable, mark it for invalidation from the caches in
-	 * case we abort.  Note it is OK to do this after releasing the buffer,
-	 * because the heaptup data structure is all in local memory, not in the
-	 * shared buffer.
-	 */
-	CacheInvalidateHeapTuple(relation, heaptup);
-
 	/*
 	 * Release the lmgr tuple lock, if we had it.
 	 */
@@ -3659,9 +3654,14 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
 
 	UnlockReleaseBuffer(buffer);
 
-	/* Send out shared cache inval if necessary */
+	/*
+	 * Send out shared cache inval if necessary.  Note that because we only
+	 * pass the new version of the tuple, this mustn't be used for any
+	 * operations that could change catcache lookup keys.  But we aren't
+	 * bothering with index updates either, so that's true a fortiori.
+	 */
 	if (!IsBootstrapProcessingMode())
-		CacheInvalidateHeapTuple(relation, tuple);
+		CacheInvalidateHeapTuple(relation, tuple, NULL);
 }
 
 
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 2386a894356939de55064f349e93d6cb1b2bf79b..d42f944cc4e01b5e57f5ea7b5340aaac9d3ac85e 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -192,7 +192,7 @@ static void recomputeNamespacePath(void);
 static void InitTempTableNamespace(void);
 static void RemoveTempRelations(Oid tempNamespaceId);
 static void RemoveTempRelationsCallback(int code, Datum arg);
-static void NamespaceCallback(Datum arg, int cacheid, ItemPointer tuplePtr);
+static void NamespaceCallback(Datum arg, int cacheid, uint32 hashvalue);
 static bool MatchNamedCall(HeapTuple proctup, int nargs, List *argnames,
 			   int **argnumbers);
 
@@ -3750,7 +3750,7 @@ InitializeSearchPath(void)
  *		Syscache inval callback function
  */
 static void
-NamespaceCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
+NamespaceCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
 	/* Force search path to be recomputed on next use */
 	baseSearchPathValid = false;
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index d0704ed0718040390f2a0d37447519a6e5b3b333..108baa6144ff461db975d12be3c80417ab45c0b0 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -964,8 +964,7 @@ _copyPlanInvalItem(PlanInvalItem *from)
 	PlanInvalItem *newnode = makeNode(PlanInvalItem);
 
 	COPY_SCALAR_FIELD(cacheId);
-	/* tupleId isn't really a "scalar", but this works anyway */
-	COPY_SCALAR_FIELD(tupleId);
+	COPY_SCALAR_FIELD(hashValue);
 
 	return newnode;
 }
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 417aeb882212d78576149b3b38a1ef8bee490e13..627a8f51ee83b4b85e7c23f52f4d2068f2a1fbee 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -845,9 +845,7 @@ _outPlanInvalItem(StringInfo str, PlanInvalItem *node)
 	WRITE_NODE_TYPE("PLANINVALITEM");
 
 	WRITE_INT_FIELD(cacheId);
-	appendStringInfo(str, " :tupleId (%u,%u)",
-					 ItemPointerGetBlockNumber(&node->tupleId),
-					 ItemPointerGetOffsetNumber(&node->tupleId));
+	WRITE_UINT_FIELD(hashValue);
 }
 
 /*****************************************************************************
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 60a1484c992b7b90476d6f60904ee4108bae891c..c3a5aac2fab95423c5f83a7a908113b3e8198b88 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -15,6 +15,7 @@
  */
 #include "postgres.h"
 
+#include "access/hash.h"
 #include "access/transam.h"
 #include "catalog/pg_type.h"
 #include "nodes/makefuncs.h"
@@ -1751,25 +1752,21 @@ record_plan_function_dependency(PlannerGlobal *glob, Oid funcid)
 	 */
 	if (funcid >= (Oid) FirstBootstrapObjectId)
 	{
-		HeapTuple	func_tuple;
-		PlanInvalItem *inval_item;
-
-		func_tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
-		if (!HeapTupleIsValid(func_tuple))
-			elog(ERROR, "cache lookup failed for function %u", funcid);
-
-		inval_item = makeNode(PlanInvalItem);
+		PlanInvalItem *inval_item = makeNode(PlanInvalItem);
 
 		/*
-		 * It would work to use any syscache on pg_proc, but plancache.c
-		 * expects us to use PROCOID.
+		 * It would work to use any syscache on pg_proc, but the easiest is
+		 * PROCOID since we already have the function's OID at hand.  Note
+		 * that plancache.c knows we use PROCOID.  Also, we're perhaps
+		 * assuming more than we should about how CatalogCacheComputeHashValue
+		 * computes hash values...
 		 */
 		inval_item->cacheId = PROCOID;
-		inval_item->tupleId = func_tuple->t_self;
+		inval_item->hashValue =
+			DatumGetUInt32(DirectFunctionCall1(hashoid,
+											   ObjectIdGetDatum(funcid)));
 
 		glob->invalItems = lappend(glob->invalItems, inval_item);
-
-		ReleaseSysCache(func_tuple);
 	}
 }
 
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index beabafb5a8660c3fa8a65d2b53e98115b7e26a74..cb10a31b077b2e616151fd5154c556e9631a382e 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -101,7 +101,7 @@ static bool list_member_strip(List *list, Expr *datum);
 static bool btree_predicate_proof(Expr *predicate, Node *clause,
 					  bool refute_it);
 static Oid	get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it);
-static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, ItemPointer tuplePtr);
+static void InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashvalue);
 
 
 /*
@@ -1738,7 +1738,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
  * Callback for pg_amop inval events
  */
 static void
-InvalidateOprProofCacheCallBack(Datum arg, int cacheid, ItemPointer tuplePtr)
+InvalidateOprProofCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
 {
 	HASH_SEQ_STATUS status;
 	OprProofCacheEntry *hentry;
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index 4a2f77771b8d8a29f6944b796a3e30a901cadaa7..d4cd1efa64ab8e9c90d9a2c1e68b5a76d1fe9008 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -79,7 +79,7 @@ static bool make_oper_cache_key(OprCacheKey *key, List *opname,
 					Oid ltypeId, Oid rtypeId);
 static Oid	find_oper_cache_entry(OprCacheKey *key);
 static void make_oper_cache_entry(OprCacheKey *key, Oid opr_oid);
-static void InvalidateOprCacheCallBack(Datum arg, int cacheid, ItemPointer tuplePtr);
+static void InvalidateOprCacheCallBack(Datum arg, int cacheid, uint32 hashvalue);
 
 
 /*
@@ -1104,7 +1104,7 @@ make_oper_cache_entry(OprCacheKey *key, Oid opr_oid)
  * Callback for pg_operator and pg_cast inval events
  */
 static void
-InvalidateOprCacheCallBack(Datum arg, int cacheid, ItemPointer tuplePtr)
+InvalidateOprCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
 {
 	HASH_SEQ_STATUS status;
 	OprCacheEntry *hentry;
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 3fa95e2fd394c240b827cfc0ad3b4bf576df158d..e79ba50ad3315ab25dd5e5030a83ad5c627be84d 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -112,7 +112,7 @@ static AclMode convert_tablespace_priv_string(text *priv_type_text);
 static AclMode convert_role_priv_string(text *priv_type_text);
 static AclResult pg_role_aclcheck(Oid role_oid, Oid roleid, AclMode mode);
 
-static void RoleMembershipCacheCallback(Datum arg, int cacheid, ItemPointer tuplePtr);
+static void RoleMembershipCacheCallback(Datum arg, int cacheid, uint32 hashvalue);
 static Oid	get_role_oid_or_public(const char *rolname);
 
 
@@ -4355,7 +4355,7 @@ initialize_acl(void)
  *		Syscache inval callback function
  */
 static void
-RoleMembershipCacheCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
+RoleMembershipCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
 	/* Force membership caches to be recomputed on next use */
 	cached_privs_role = InvalidOid;
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 7018ccfe62a6d2a71e0274c93f2f73d3654b0126..ba39aa8ee7f61636692148d2ddd6fb754b43a1fb 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -53,7 +53,7 @@ typedef struct
  * query execution), this seems OK.
  */
 static void
-InvalidateAttoptCacheCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
+InvalidateAttoptCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
 	HASH_SEQ_STATUS status;
 	AttoptCacheEntry *attopt;
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 6a0c020ff97df4e7ae24fefde4fd596ec70cb6d5..f43e4181e781190222a6e82fcec9482c9366fc64 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -435,21 +435,14 @@ CatCacheRemoveCList(CatCache *cache, CatCList *cl)
  *	target tuple that has to be invalidated has a different TID than it
  *	did when the event was created.  So now we just compare hash values and
  *	accept the small risk of unnecessary invalidations due to false matches.
- *	(The ItemPointer argument is therefore useless and should get removed.)
  *
  *	This routine is only quasi-public: it should only be used by inval.c.
  */
 void
-CatalogCacheIdInvalidate(int cacheId,
-						 uint32 hashValue,
-						 ItemPointer pointer)
+CatalogCacheIdInvalidate(int cacheId, uint32 hashValue)
 {
 	CatCache   *ccp;
 
-	/*
-	 * sanity checks
-	 */
-	Assert(ItemPointerIsValid(pointer));
 	CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
 
 	/*
@@ -699,7 +692,7 @@ CatalogCacheFlushCatalog(Oid catId)
 			ResetCatalogCache(cache);
 
 			/* Tell inval.c to call syscache callbacks for this cache */
-			CallSyscacheCallbacks(cache->id, NULL);
+			CallSyscacheCallbacks(cache->id, 0);
 		}
 	}
 
@@ -1708,11 +1701,16 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
  *	The lists of tuples that need to be flushed are kept by inval.c.  This
  *	routine is a helper routine for inval.c.  Given a tuple belonging to
  *	the specified relation, find all catcaches it could be in, compute the
- *	correct hash value for each such catcache, and call the specified function
- *	to record the cache id, hash value, and tuple ItemPointer in inval.c's
- *	lists.	CatalogCacheIdInvalidate will be called later, if appropriate,
+ *	correct hash value for each such catcache, and call the specified
+ *	function to record the cache id and hash value in inval.c's lists.
+ *	CatalogCacheIdInvalidate will be called later, if appropriate,
  *	using the recorded information.
  *
+ *	For an insert or delete, tuple is the target tuple and newtuple is NULL.
+ *	For an update, we are called just once, with tuple being the old tuple
+ *	version and newtuple the new version.  We should make two list entries
+ *	if the tuple's hash value changed, but only one if it didn't.
+ *
  *	Note that it is irrelevant whether the given tuple is actually loaded
  *	into the catcache at the moment.  Even if it's not there now, it might
  *	be by the end of the command, or there might be a matching negative entry
@@ -1727,7 +1725,8 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
 void
 PrepareToInvalidateCacheTuple(Relation relation,
 							  HeapTuple tuple,
-							void (*function) (int, uint32, ItemPointer, Oid))
+							  HeapTuple newtuple,
+							  void (*function) (int, uint32, Oid))
 {
 	CatCache   *ccp;
 	Oid			reloid;
@@ -1747,13 +1746,16 @@ PrepareToInvalidateCacheTuple(Relation relation,
 	/* ----------------
 	 *	for each cache
 	 *	   if the cache contains tuples from the specified relation
-	 *		   compute the tuple's hash value in this cache,
+	 *		   compute the tuple's hash value(s) in this cache,
 	 *		   and call the passed function to register the information.
 	 * ----------------
 	 */
 
 	for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
 	{
+		uint32		hashvalue;
+		Oid			dbid;
+
 		if (ccp->cc_reloid != reloid)
 			continue;
 
@@ -1761,10 +1763,20 @@ PrepareToInvalidateCacheTuple(Relation relation,
 		if (ccp->cc_tupdesc == NULL)
 			CatalogCacheInitializeCache(ccp);
 
-		(*function) (ccp->id,
-					 CatalogCacheComputeTupleHashValue(ccp, tuple),
-					 &tuple->t_self,
-					 ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);
+		hashvalue = CatalogCacheComputeTupleHashValue(ccp, tuple);
+		dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
+
+		(*function) (ccp->id, hashvalue, dbid);
+
+		if (newtuple)
+		{
+			uint32		newhashvalue;
+
+			newhashvalue = CatalogCacheComputeTupleHashValue(ccp, newtuple);
+
+			if (newhashvalue != hashvalue)
+				(*function) (ccp->id, newhashvalue, dbid);
+		}
 	}
 }
 
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 4249bd337654bf1134f6d971c4e267f750ae75eb..8792ec4084200a82735d8fdb6a321e7ede22ee6a 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -39,8 +39,8 @@
  *
  *	In short, we need to remember until xact end every insert or delete
  *	of a tuple that might be in the system caches.	Updates are treated as
- *	two events, delete + insert, for simplicity.  (There are cases where
- *	it'd be possible to record just one event, but we don't currently try.)
+ *	two events, delete + insert, for simplicity.  (If the update doesn't
+ *	change the tuple hash value, catcache.c optimizes this into one event.)
  *
  *	We do not need to register EVERY tuple operation in this way, just those
  *	on tuples in relations that have associated catcaches.	We do, however,
@@ -314,14 +314,12 @@ AppendInvalidationMessageList(InvalidationChunk **destHdr,
  */
 static void
 AddCatcacheInvalidationMessage(InvalidationListHeader *hdr,
-							   int id, uint32 hashValue,
-							   ItemPointer tuplePtr, Oid dbId)
+							   int id, uint32 hashValue, Oid dbId)
 {
 	SharedInvalidationMessage msg;
 
 	Assert(id < CHAR_MAX);
 	msg.cc.id = (int8) id;
-	msg.cc.tuplePtr = *tuplePtr;
 	msg.cc.dbId = dbId;
 	msg.cc.hashValue = hashValue;
 	AddInvalidationMessage(&hdr->cclist, &msg);
@@ -416,11 +414,10 @@ ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr,
 static void
 RegisterCatcacheInvalidation(int cacheId,
 							 uint32 hashValue,
-							 ItemPointer tuplePtr,
 							 Oid dbId)
 {
 	AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
-								   cacheId, hashValue, tuplePtr, dbId);
+								   cacheId, hashValue, dbId);
 }
 
 /*
@@ -476,11 +473,9 @@ LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
 	{
 		if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
 		{
-			CatalogCacheIdInvalidate(msg->cc.id,
-									 msg->cc.hashValue,
-									 &msg->cc.tuplePtr);
+			CatalogCacheIdInvalidate(msg->cc.id, msg->cc.hashValue);
 
-			CallSyscacheCallbacks(msg->cc.id, &msg->cc.tuplePtr);
+			CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
 		}
 	}
 	else if (msg->id == SHAREDINVALCATALOG_ID)
@@ -555,7 +550,7 @@ InvalidateSystemCaches(void)
 	{
 		struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
 
-		(*ccitem->function) (ccitem->arg, ccitem->id, NULL);
+		(*ccitem->function) (ccitem->arg, ccitem->id, 0);
 	}
 
 	for (i = 0; i < relcache_callback_count; i++)
@@ -566,98 +561,6 @@ InvalidateSystemCaches(void)
 	}
 }
 
-/*
- * PrepareForTupleInvalidation
- *		Detect whether invalidation of this tuple implies invalidation
- *		of catalog/relation cache entries; if so, register inval events.
- */
-static void
-PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
-{
-	Oid			tupleRelId;
-	Oid			databaseId;
-	Oid			relationId;
-
-	/* Do nothing during bootstrap */
-	if (IsBootstrapProcessingMode())
-		return;
-
-	/*
-	 * We only need to worry about invalidation for tuples that are in system
-	 * relations; user-relation tuples are never in catcaches and can't affect
-	 * the relcache either.
-	 */
-	if (!IsSystemRelation(relation))
-		return;
-
-	/*
-	 * TOAST tuples can likewise be ignored here. Note that TOAST tables are
-	 * considered system relations so they are not filtered by the above test.
-	 */
-	if (IsToastRelation(relation))
-		return;
-
-	/*
-	 * First let the catcache do its thing
-	 */
-	PrepareToInvalidateCacheTuple(relation, tuple,
-								  RegisterCatcacheInvalidation);
-
-	/*
-	 * Now, is this tuple one of the primary definers of a relcache entry?
-	 */
-	tupleRelId = RelationGetRelid(relation);
-
-	if (tupleRelId == RelationRelationId)
-	{
-		Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
-
-		relationId = HeapTupleGetOid(tuple);
-		if (classtup->relisshared)
-			databaseId = InvalidOid;
-		else
-			databaseId = MyDatabaseId;
-	}
-	else if (tupleRelId == AttributeRelationId)
-	{
-		Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
-
-		relationId = atttup->attrelid;
-
-		/*
-		 * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
-		 * even if the rel in question is shared (which we can't easily tell).
-		 * This essentially means that only backends in this same database
-		 * will react to the relcache flush request.  This is in fact
-		 * appropriate, since only those backends could see our pg_attribute
-		 * change anyway.  It looks a bit ugly though.	(In practice, shared
-		 * relations can't have schema changes after bootstrap, so we should
-		 * never come here for a shared rel anyway.)
-		 */
-		databaseId = MyDatabaseId;
-	}
-	else if (tupleRelId == IndexRelationId)
-	{
-		Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
-
-		/*
-		 * When a pg_index row is updated, we should send out a relcache inval
-		 * for the index relation.	As above, we don't know the shared status
-		 * of the index, but in practice it doesn't matter since indexes of
-		 * shared catalogs can't have such updates.
-		 */
-		relationId = indextup->indexrelid;
-		databaseId = MyDatabaseId;
-	}
-	else
-		return;
-
-	/*
-	 * Yes.  We need to register a relcache invalidation event.
-	 */
-	RegisterRelcacheInvalidation(databaseId, relationId);
-}
-
 
 /* ----------------------------------------------------------------
  *					  public functions
@@ -1056,11 +959,103 @@ CommandEndInvalidationMessages(void)
  * CacheInvalidateHeapTuple
  *		Register the given tuple for invalidation at end of command
  *		(ie, current command is creating or outdating this tuple).
+ *		Also, detect whether a relcache invalidation is implied.
+ *
+ * For an insert or delete, tuple is the target tuple and newtuple is NULL.
+ * For an update, we are called just once, with tuple being the old tuple
+ * version and newtuple the new version.  This allows avoidance of duplicate
+ * effort during an update.
  */
 void
-CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple)
+CacheInvalidateHeapTuple(Relation relation,
+						 HeapTuple tuple,
+						 HeapTuple newtuple)
 {
-	PrepareForTupleInvalidation(relation, tuple);
+	Oid			tupleRelId;
+	Oid			databaseId;
+	Oid			relationId;
+
+	/* Do nothing during bootstrap */
+	if (IsBootstrapProcessingMode())
+		return;
+
+	/*
+	 * We only need to worry about invalidation for tuples that are in system
+	 * relations; user-relation tuples are never in catcaches and can't affect
+	 * the relcache either.
+	 */
+	if (!IsSystemRelation(relation))
+		return;
+
+	/*
+	 * TOAST tuples can likewise be ignored here. Note that TOAST tables are
+	 * considered system relations so they are not filtered by the above test.
+	 */
+	if (IsToastRelation(relation))
+		return;
+
+	/*
+	 * First let the catcache do its thing
+	 */
+	PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
+								  RegisterCatcacheInvalidation);
+
+	/*
+	 * Now, is this tuple one of the primary definers of a relcache entry?
+	 *
+	 * Note we ignore newtuple here; we assume an update cannot move a tuple
+	 * from being part of one relcache entry to being part of another.
+	 */
+	tupleRelId = RelationGetRelid(relation);
+
+	if (tupleRelId == RelationRelationId)
+	{
+		Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
+
+		relationId = HeapTupleGetOid(tuple);
+		if (classtup->relisshared)
+			databaseId = InvalidOid;
+		else
+			databaseId = MyDatabaseId;
+	}
+	else if (tupleRelId == AttributeRelationId)
+	{
+		Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
+
+		relationId = atttup->attrelid;
+
+		/*
+		 * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
+		 * even if the rel in question is shared (which we can't easily tell).
+		 * This essentially means that only backends in this same database
+		 * will react to the relcache flush request.  This is in fact
+		 * appropriate, since only those backends could see our pg_attribute
+		 * change anyway.  It looks a bit ugly though.	(In practice, shared
+		 * relations can't have schema changes after bootstrap, so we should
+		 * never come here for a shared rel anyway.)
+		 */
+		databaseId = MyDatabaseId;
+	}
+	else if (tupleRelId == IndexRelationId)
+	{
+		Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
+
+		/*
+		 * When a pg_index row is updated, we should send out a relcache inval
+		 * for the index relation.	As above, we don't know the shared status
+		 * of the index, but in practice it doesn't matter since indexes of
+		 * shared catalogs can't have such updates.
+		 */
+		relationId = indextup->indexrelid;
+		databaseId = MyDatabaseId;
+	}
+	else
+		return;
+
+	/*
+	 * Yes.  We need to register a relcache invalidation event.
+	 */
+	RegisterRelcacheInvalidation(databaseId, relationId);
 }
 
 /*
@@ -1094,7 +1089,7 @@ CacheInvalidateCatalog(Oid catalogId)
  *
  * This is used in places that need to force relcache rebuild but aren't
  * changing any of the tuples recognized as contributors to the relcache
- * entry by PrepareForTupleInvalidation.  (An example is dropping an index.)
+ * entry by CacheInvalidateHeapTuple.  (An example is dropping an index.)
  */
 void
 CacheInvalidateRelcache(Relation relation)
@@ -1216,10 +1211,14 @@ CacheInvalidateRelmap(Oid databaseId)
  * CacheRegisterSyscacheCallback
  *		Register the specified function to be called for all future
  *		invalidation events in the specified cache.  The cache ID and the
- *		TID of the tuple being invalidated will be passed to the function.
+ *		hash value of the tuple being invalidated will be passed to the
+ *		function.
  *
- * NOTE: NULL will be passed for the TID if a cache reset request is received.
+ * NOTE: Hash value zero will be passed if a cache reset request is received.
  * In this case the called routines should flush all cached state.
+ * Yes, there's a possibility of a false match to zero, but it doesn't seem
+ * worth troubling over, especially since most of the current callees just
+ * flush all cached state anyway.
  */
 void
 CacheRegisterSyscacheCallback(int cacheid,
@@ -1265,7 +1264,7 @@ CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,
  * this module from knowing which catcache IDs correspond to which catalogs.
  */
 void
-CallSyscacheCallbacks(int cacheid, ItemPointer tuplePtr)
+CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
 {
 	int			i;
 
@@ -1274,6 +1273,6 @@ CallSyscacheCallbacks(int cacheid, ItemPointer tuplePtr)
 		struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
 
 		if (ccitem->id == cacheid)
-			(*ccitem->function) (ccitem->arg, cacheid, tuplePtr);
+			(*ccitem->function) (ccitem->arg, cacheid, hashvalue);
 	}
 }
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 08ddfa9bcbad73fa37d02168b0e9dadb71e15430..1410dec1e90d7ef92bd07f5529c55597e54be499 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -71,8 +71,8 @@ static void ScanQueryForLocks(Query *parsetree, bool acquire);
 static bool ScanQueryWalker(Node *node, bool *acquire);
 static bool plan_list_is_transient(List *stmt_list);
 static void PlanCacheRelCallback(Datum arg, Oid relid);
-static void PlanCacheFuncCallback(Datum arg, int cacheid, ItemPointer tuplePtr);
-static void PlanCacheSysCallback(Datum arg, int cacheid, ItemPointer tuplePtr);
+static void PlanCacheFuncCallback(Datum arg, int cacheid, uint32 hashvalue);
+static void PlanCacheSysCallback(Datum arg, int cacheid, uint32 hashvalue);
 
 
 /*
@@ -1029,14 +1029,14 @@ PlanCacheRelCallback(Datum arg, Oid relid)
  * PlanCacheFuncCallback
  *		Syscache inval callback function for PROCOID cache
  *
- * Invalidate all plans mentioning the given catalog entry, or all plans
- * mentioning any member of this cache if tuplePtr == NULL.
+ * Invalidate all plans mentioning the object with the specified hash value,
+ * or all plans mentioning any member of this cache if hashvalue == 0.
  *
  * Note that the coding would support use for multiple caches, but right
  * now only user-defined functions are tracked this way.
  */
 static void
-PlanCacheFuncCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
+PlanCacheFuncCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
 	ListCell   *lc1;
 
@@ -1060,8 +1060,8 @@ PlanCacheFuncCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
 
 			if (item->cacheId != cacheid)
 				continue;
-			if (tuplePtr == NULL ||
-				ItemPointerEquals(tuplePtr, &item->tupleId))
+			if (hashvalue == 0 ||
+				item->hashValue == hashvalue)
 			{
 				/* Invalidate the plan! */
 				plan->dead = true;
@@ -1086,8 +1086,8 @@ PlanCacheFuncCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
 
 					if (item->cacheId != cacheid)
 						continue;
-					if (tuplePtr == NULL ||
-						ItemPointerEquals(tuplePtr, &item->tupleId))
+					if (hashvalue == 0 ||
+						item->hashValue == hashvalue)
 					{
 						/* Invalidate the plan! */
 						plan->dead = true;
@@ -1108,7 +1108,7 @@ PlanCacheFuncCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
  * Just invalidate everything...
  */
 static void
-PlanCacheSysCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
+PlanCacheSysCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
 	ResetPlanCache();
 }
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index 57e5d0342a662a7544a6866880f3cbc1020abc61..b505f219243fa7a4ccd034188e0d0d20d888ecd2 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -50,7 +50,7 @@ typedef struct
  * tablespaces, nor do we expect them to be frequently modified.
  */
 static void
-InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
+InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
 	HASH_SEQ_STATUS status;
 	TableSpaceCacheEntry *spc;
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index a8c4d76565a375d888dc0cc28cefac23969d7daa..cffa2384385299ecfe488b0d1af452d063a0e099 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -90,7 +90,7 @@ static Oid	TSCurrentConfigCache = InvalidOid;
  * table address as the "arg".
  */
 static void
-InvalidateTSCacheCallBack(Datum arg, int cacheid, ItemPointer tuplePtr)
+InvalidateTSCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
 {
 	HTAB	   *hash = (HTAB *) DatumGetPointer(arg);
 	HASH_SEQ_STATUS status;
diff --git a/src/backend/utils/misc/superuser.c b/src/backend/utils/misc/superuser.c
index e70b1f5ccfb34489e27b437987730d58599e9405..bbc160110db9879a2b0362ecf8fa9ddcaae21b61 100644
--- a/src/backend/utils/misc/superuser.c
+++ b/src/backend/utils/misc/superuser.c
@@ -36,7 +36,7 @@ static Oid	last_roleid = InvalidOid;	/* InvalidOid == cache not valid */
 static bool last_roleid_is_super = false;
 static bool roleid_callback_registered = false;
 
-static void RoleidCallback(Datum arg, int cacheid, ItemPointer tuplePtr);
+static void RoleidCallback(Datum arg, int cacheid, uint32 hashvalue);
 
 
 /*
@@ -96,11 +96,11 @@ superuser_arg(Oid roleid)
 }
 
 /*
- * UseridCallback
+ * RoleidCallback
  *		Syscache inval callback function
  */
 static void
-RoleidCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
+RoleidCallback(Datum arg, int cacheid, uint32 hashvalue)
 {
 	/* Invalidate our local cache in case role's superuserness changed */
 	last_roleid = InvalidOid;
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 7c085b3f4f66610cf11b0e582f5395e31fb207c8..852ef775309937e82dfe5b53263772eb7387a4a5 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -17,7 +17,6 @@
 #include "access/sdir.h"
 #include "nodes/bitmapset.h"
 #include "nodes/primnodes.h"
-#include "storage/itemptr.h"
 
 
 /* ----------------------------------------------------------------
@@ -793,13 +792,13 @@ typedef struct PlanRowMark
  * relations are recorded as a simple list of OIDs, and everything else
  * is represented as a list of PlanInvalItems.	A PlanInvalItem is designed
  * to be used with the syscache invalidation mechanism, so it identifies a
- * system catalog entry by cache ID and tuple TID.
+ * system catalog entry by cache ID and hash value.
  */
 typedef struct PlanInvalItem
 {
 	NodeTag		type;
 	int			cacheId;		/* a syscache ID, see utils/syscache.h */
-	ItemPointerData tupleId;	/* TID of the object's catalog tuple */
+	uint32		hashValue;		/* hash value of object's cache lookup key */
 } PlanInvalItem;
 
 #endif   /* PLANNODES_H */
diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h
index aba474d23714a6d8f3a283f808c0b20d5094e443..b468468fa480f005e244b646cadd187895080b68 100644
--- a/src/include/storage/sinval.h
+++ b/src/include/storage/sinval.h
@@ -14,7 +14,6 @@
 #ifndef SINVAL_H
 #define SINVAL_H
 
-#include "storage/itemptr.h"
 #include "storage/relfilenode.h"
 
 
@@ -32,22 +31,17 @@
  *
  * Catcache inval events are initially driven by detecting tuple inserts,
  * updates and deletions in system catalogs (see CacheInvalidateHeapTuple).
- * An update generates two inval events, one for the old tuple and one for
- * the new --- this is needed to get rid of both positive entries for the
- * old tuple, and negative cache entries associated with the new tuple's
- * cache key.  (This could perhaps be optimized down to one event when the
- * cache key is not changing, but for now we don't bother to try.)  Note that
- * the inval events themselves don't actually say whether the tuple is being
- * inserted or deleted.
+ * An update can generate two inval events, one for the old tuple and one for
+ * the new, but this is reduced to one event if the tuple's hash key doesn't
+ * change.  Note that the inval events themselves don't actually say whether
+ * the tuple is being inserted or deleted.  Also, since we transmit only a
+ * hash key, there is a small risk of unnecessary invalidations due to chance
+ * matches of hash keys.
  *
  * Note that some system catalogs have multiple caches on them (with different
  * indexes).  On detecting a tuple invalidation in such a catalog, separate
- * catcache inval messages must be generated for each of its caches.  The
- * catcache inval messages carry the hash value for the target tuple, so
- * that the catcache only needs to search one hash chain not all its chains,
- * and so that negative cache entries can be recognized with good accuracy.
- * (Of course this assumes that all the backends are using identical hashing
- * code, but that should be OK.)
+ * catcache inval messages must be generated for each of its caches, since
+ * the hash keys will generally be different.
  *
  * Catcache and relcache invalidations are transactional, and so are sent
  * to other backends upon commit.  Internally to the generating backend,
@@ -62,9 +56,7 @@
 
 typedef struct
 {
-	/* note: field layout chosen with an eye to alignment concerns */
 	int8		id;				/* cache ID --- must be first */
-	ItemPointerData tuplePtr;	/* tuple identifier in cached relation */
 	Oid			dbId;			/* database ID, or 0 if a shared relation */
 	uint32		hashValue;		/* hash value of key for this catcache */
 } SharedInvalCatcacheMsg;
@@ -91,6 +83,7 @@ typedef struct
 
 typedef struct
 {
+	/* note: field layout chosen to pack into 16 bytes */
 	int8		id;				/* type field --- must be first */
 	int8		backend_hi;		/* high bits of backend ID, if temprel */
 	uint16		backend_lo;		/* low bits of backend ID, if temprel */
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index 7a990528e754e2013964c07a64c2dff753914818..84163d0b7fb0aaeffc1580ef149aa84ba66e4b44 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -181,11 +181,11 @@ extern void ReleaseCatCacheList(CatCList *list);
 
 extern void ResetCatalogCaches(void);
 extern void CatalogCacheFlushCatalog(Oid catId);
-extern void CatalogCacheIdInvalidate(int cacheId, uint32 hashValue,
-						 ItemPointer pointer);
+extern void CatalogCacheIdInvalidate(int cacheId, uint32 hashValue);
 extern void PrepareToInvalidateCacheTuple(Relation relation,
 							  HeapTuple tuple,
-						   void (*function) (int, uint32, ItemPointer, Oid));
+							  HeapTuple newtuple,
+							  void (*function) (int, uint32, Oid));
 
 extern void PrintCatCacheLeakWarning(HeapTuple tuple);
 extern void PrintCatCacheListLeakWarning(CatCList *list);
diff --git a/src/include/utils/inval.h b/src/include/utils/inval.h
index dda2a63d6e10fe5938cb6a0aab580ace7b146dbf..606b778f10e29f193a71ac877340e2cc597463aa 100644
--- a/src/include/utils/inval.h
+++ b/src/include/utils/inval.h
@@ -19,7 +19,7 @@
 #include "utils/relcache.h"
 
 
-typedef void (*SyscacheCallbackFunction) (Datum arg, int cacheid, ItemPointer tuplePtr);
+typedef void (*SyscacheCallbackFunction) (Datum arg, int cacheid, uint32 hashvalue);
 typedef void (*RelcacheCallbackFunction) (Datum arg, Oid relid);
 
 
@@ -39,7 +39,9 @@ extern void PostPrepare_Inval(void);
 
 extern void CommandEndInvalidationMessages(void);
 
-extern void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple);
+extern void CacheInvalidateHeapTuple(Relation relation,
+						 HeapTuple tuple,
+						 HeapTuple newtuple);
 
 extern void CacheInvalidateCatalog(Oid catalogId);
 
@@ -60,7 +62,7 @@ extern void CacheRegisterSyscacheCallback(int cacheid,
 extern void CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,
 							  Datum arg);
 
-extern void CallSyscacheCallbacks(int cacheid, ItemPointer tuplePtr);
+extern void CallSyscacheCallbacks(int cacheid, uint32 hashvalue);
 
 extern void inval_twophase_postcommit(TransactionId xid, uint16 info,
 						  void *recdata, uint32 len);