diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index d5f36e4e4d9468db715b0231209f82b50eb71473..4bb638d6c9374a05dd0fd0162fb372013bb17721 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.84 2000/08/04 04:16:06 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.85 2000/09/07 09:58:34 vadim Exp $
  *
  *
  * INTERFACE ROUTINES
@@ -24,7 +24,7 @@
  *		heap_fetch		- retrive tuple with tid
  *		heap_insert		- insert tuple into a relation
  *		heap_delete		- delete a tuple from a relation
- *		heap_update - replace a tuple in a relation with another tuple
+ *		heap_update		- replace a tuple in a relation with another tuple
  *		heap_markpos	- mark scan position
  *		heap_restrpos	- restore position to marked location
  *
@@ -86,6 +86,10 @@
 #include "utils/inval.h"
 #include "utils/relcache.h"
 
+#ifdef XLOG	/* comments are in _heap_update */
+static ItemPointerData	_locked_tuple;
+#endif
+
 
 /* ----------------------------------------------------------------
  *						 heap support routines
@@ -1367,7 +1371,7 @@ heap_insert(Relation relation, HeapTuple tup)
 #endif
 
 	/* Find buffer for this tuple */
-	buffer = RelationGetBufferForTuple(relation, tup->t_len, InvalidBuffer);
+	buffer = RelationGetBufferForTuple(relation, tup->t_len);
 
 	/* NO ELOG(ERROR) from here till changes are logged */
 	RelationPutHeapTuple(relation, buffer, tup);
@@ -1376,10 +1380,9 @@ heap_insert(Relation relation, HeapTuple tup)
 	/* XLOG stuff */
 	{
 		xl_heap_insert	xlrec;
-		xlrec.itid.dbId = relation->rd_lockInfo.lockRelId.dbId;
-		xlrec.itid.relId = relation->rd_lockInfo.lockRelId.relId;
-		xlrec.itid.cid = GetCurrentCommandId();
-		xlrec.itid.tid = tup->t_self;
+		xlrec.target.node = relation->rd_node;
+		xlrec.target.cid = GetCurrentCommandId();
+		xlrec.target.tid = tup->t_self;
 		xlrec.t_natts = tup->t_data->t_natts;
 		xlrec.t_oid = tup->t_data->t_oid;
 		xlrec.t_hoff = tup->t_data->t_hoff;
@@ -1390,8 +1393,8 @@ heap_insert(Relation relation, HeapTuple tup)
 			(char*) tup->t_data + offsetof(HeapTupleHeaderData, t_bits), 
 			tup->t_len - offsetof(HeapTupleHeaderData, t_bits));
 
-		((PageHeader) BufferGetPage(buffer))->pd_lsn = recptr;
-		((PageHeader) BufferGetPage(buffer))->pd_sui = ThisStartUpID;
+		PageSetLSN(BufferGetPage(buffer), recptr);
+		PageSetSUI(BufferGetPage(buffer), ThisStartUpID);
 	}
 #endif
 
@@ -1490,15 +1493,14 @@ l1:
 	/* XLOG stuff */
 	{
 		xl_heap_delete	xlrec;
-		xlrec.dtid.dbId = relation->rd_lockInfo.lockRelId.dbId;
-		xlrec.dtid.relId = relation->rd_lockInfo.lockRelId.relId;
-		xlrec.dtid.cid = GetCurrentCommandId();
-		xlrec.dtid.tid = tp.t_self;
+		xlrec.target.node = relation->rd_node;
+		xlrec.target.cid = GetCurrentCommandId();
+		xlrec.target.tid = tp.t_self;
 		XLogRecPtr recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE,
 			(char*) xlrec, SizeOfHeapDelete, NULL, 0);
 
-		dp->pd_lsn = recptr;
-		dp->pd_sui = ThisStartUpID;
+		PageSetLSN(dp, recptr);
+		PageSetSUI(dp, ThisStartUpID);
 	}
 #endif
 
@@ -1638,18 +1640,49 @@ l2:
 	if ((unsigned) MAXALIGN(newtup->t_len) <= PageGetFreeSpace((Page) dp))
 		newbuf = buffer;
 	else
-		newbuf = RelationGetBufferForTuple(relation, newtup->t_len, buffer);
+	{
+#ifdef XLOG
+		/* 
+		 * We have to unlock old tuple buffer before extending table
+		 * file but have to keep lock on the old tuple. To avoid second
+		 * XLOG log record we use xact mngr hook to unlock old tuple
+		 * without reading log if xact will abort before update is logged.
+		 * In the event of crash prio logging, TQUAL routines will see
+		 * HEAP_XMAX_UNLOGGED flag...
+		 */
+		_locked_tuple = *otid;
+		XactPushRollback(_heap_unlock_tuple, (void*) &_locked_tuple);
+#endif
+		TransactionIdStore(GetCurrentTransactionId(), &(oldtup.t_data->t_xmax));
+		oldtup.t_data->t_cmax = GetCurrentCommandId();
+		oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
+								 HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+		oldtup.t_data->t_infomask |= HEAP_XMAX_UNLOGGED;
+		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+		newbuf = RelationGetBufferForTuple(relation, newtup->t_len);
+		/* this seems to be deadlock free... */
+		LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+	}
 
 	/* NO ELOG(ERROR) from here till changes are logged */
 
 	/* insert new tuple */
 	RelationPutHeapTuple(relation, newbuf, newtup);
 
-	/* logically delete old tuple */
-	TransactionIdStore(GetCurrentTransactionId(), &(oldtup.t_data->t_xmax));
-	oldtup.t_data->t_cmax = GetCurrentCommandId();
-	oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
-							 HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+	if (buffer == newbuf)
+	{
+		TransactionIdStore(GetCurrentTransactionId(), &(oldtup.t_data->t_xmax));
+		oldtup.t_data->t_cmax = GetCurrentCommandId();
+		oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
+								 HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+	}
+	else
+	{
+		oldtup.t_data->t_infomask &= ~HEAP_XMAX_UNLOGGED;
+#ifdef XLOG
+		XactPopRollback();
+#endif
+	}
 
 	/* record address of new tuple in t_ctid of old one */
 	oldtup.t_data->t_ctid = newtup->t_self;
@@ -1658,10 +1691,10 @@ l2:
 	/* XLOG stuff */
 	{
 		xl_heap_update	xlrec;
-		xlrec.dtid.dbId = relation->rd_lockInfo.lockRelId.dbId;
-		xlrec.dtid.relId = relation->rd_lockInfo.lockRelId.relId;
-		xlrec.dtid.cid = GetCurrentCommandId();
-		xlrec.itid.tid = newtup->t_self;
+		xlrec.target.node = relation->rd_node;
+		xlrec.target.cid = GetCurrentCommandId();
+		xlrec.target.tid = oldtup.t_self;
+		xlrec.newtid.tid = newtup->t_self;
 		xlrec.t_natts = newtup->t_data->t_natts;
 		xlrec.t_hoff = newtup->t_data->t_hoff;
 		xlrec.mask = newtup->t_data->t_infomask;
@@ -1673,11 +1706,11 @@ l2:
 
 		if (newbuf != buffer)
 		{
-			((PageHeader) BufferGetPage(newbuf))->pd_lsn = recptr;
-			((PageHeader) BufferGetPage(newbuf))->pd_sui = ThisStartUpID;
+			PageSetLSN(BufferGetPage(newbuf), recptr);
+			PageSetSUI(BufferGetPage(newbuf), ThisStartUpID);
 		}
-		((PageHeader) BufferGetPage(buffer))->pd_lsn = recptr;
-		((PageHeader) BufferGetPage(buffer))->pd_sui = ThisStartUpID;
+		PageSetLSN(BufferGetPage(buffer), recptr);
+		PageSetSUI(BufferGetPage(buffer), ThisStartUpID);
 	}
 #endif
 
@@ -1969,7 +2002,7 @@ heap_restrpos(HeapScanDesc scan)
 void heap_redo(XLogRecPtr lsn, XLogRecord *record)
 {
 	uint8	info = record->xl_info & ~XLR_INFO_MASK;
-	
+
 	if (info == XLOG_HEAP_INSERT)
 		heap_xlog_insert(true, lsn, record);
 	else if (info == XLOG_HEAP_DELETE)
@@ -1982,25 +2015,389 @@ void heap_redo(XLogRecPtr lsn, XLogRecord *record)
 		elog(STOP, "heap_redo: unknown op code %u", info);
 }
 
-void heap_undo(XLogRecPtr lsn, XLogRecord *record)
+void heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
 {
-	uint8	info = record->xl_info & ~XLR_INFO_MASK;
-	
-	if (info == XLOG_HEAP_INSERT)
-		heap_xlog_insert(false, lsn, record);
-	else if (info == XLOG_HEAP_DELETE)
-		heap_xlog_delete(false, lsn, record);
-	else if (info == XLOG_HEAP_UPDATE)
-		heap_xlog_update(false, lsn, record);
-	else if (info == XLOG_HEAP_MOVE)
-		heap_xlog_move(false, lsn, record);
-	else
-		elog(STOP, "heap_undo: unknown op code %u", info);
+	xl_heap_delete *xlrec = (xl_heap_delete*) XLogRecGetData(record);
+	Relation		reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+
+	if (!RelationIsValid(reln))
+		return;
+	Buffer buffer = XLogReadBuffer(false, reln, 
+						ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+	if (!BufferIsValid(buffer))
+		return;
+
+	Page page = (Page) BufferGetPage(buffer);
+	if (PageIsNew((PageHeader) page))
+	{
+		PageInit(page, BufferGetPageSize(buffer), 0);
+		PageSetLSN(page, lsn);
+		PageSetSUI(page, ThisStartUpID);
+		UnlockAndWriteBuffer(buffer);
+		return;
+	}
+
+	if (redo)
+	{
+		if (XLByteLE(lsn, PageGetLSN(page)))	/* changes are applied */
+		{
+			UnlockAndReleaseBuffer(buffer);
+			return;
+		}
+	}
+	else if (XLByteLT(PageGetLSN(page), lsn))	/* changes are not applied ?! */
+		elog(STOP, "heap_delete_undo: bad page LSN");
+
+	OffsetNumber	offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+	ItemId			lp = PageGetItemId(page, offnum);
+
+	if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
+	{
+		if (redo)
+			elog(STOP, "heap_delete_redo: unused/deleted target tuple");
+		if (!InRecovery)
+			elog(STOP, "heap_delete_undo: unused/deleted target tuple in rollback");
+		if (ItemIdDeleted(lp))
+		{
+			lp->lp_flags &= ~LP_USED;
+			PageRepairFragmentation(page);
+			UnlockAndWriteBuffer(buffer);
+		}
+		else
+			UnlockAndReleaseBuffer(buffer);
+		return;
+	}
+	HeapTupleHeader	htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+	if (redo)
+	{
+		htup->t_xmax = record->xl_xid;
+		htup->t_cmax = xlrec->target.cid;
+		htup->t_infomask &= ~(HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+		htup->t_infomask |= HEAP_XMAX_COMMITTED;
+		PageSetLSN(page, lsn);
+		PageSetSUI(page, ThisStartUpID);
+		UnlockAndWriteBuffer(buffer);
+		return;
+	}
+
+	/* undo... is it our tuple ? */
+	if (htup->t_xmax != record->xl_xid || htup->t_cmax != xlrec->target.cid)
+	{
+		if (!InRecovery)
+			elog(STOP, "heap_delete_undo: invalid target tuple in rollback");
+		UnlockAndReleaseBuffer(buffer);
+		return;
+	}
+	else	/* undo DELETE */
+	{
+		htup->t_infomask |= HEAP_XMAX_INVALID;
+		UnlockAndWriteBuffer(buffer);
+		return;
+	}
+
 }
 
 void heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
 {
-	xl_heap_insert  xlrec = XLogRecGetData(record);
+	xl_heap_insert *xlrec = (xl_heap_insert*) XLogRecGetData(record);
+	Relation		reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+
+	if (!RelationIsValid(reln))
+		return;
+	Buffer buffer = XLogReadBuffer((redo) ? true : false, reln, 
+						ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+	if (!BufferIsValid(buffer))
+		return;
+
+	Page page = (Page) BufferGetPage(buffer);
+	if (PageIsNew((PageHeader) page))
+	{
+		PageInit(page, BufferGetPageSize(buffer), 0);
+		if (!redo)
+		{
+			PageSetLSN(page, lsn);
+			PageSetSUI(page, ThisStartUpID);
+			UnlockAndWriteBuffer(buffer);
+			return;
+		}
+	}
+
+	if (redo)
+	{
+		if (XLByteLE(lsn, PageGetLSN(page)))	/* changes are applied */
+		{
+			UnlockAndReleaseBuffer(buffer);
+			return;
+		}
+
+		char			tbuf[MaxTupleSize];
+		HeapTupleHeader	htup = (HeapTupleHeader) tbuf;
+		uint32			newlen = record->xl_len - SizeOfHeapInsert;
+
+		memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits), 
+			(char*)xlrec + SizeOfHeapInsert, newlen);
+		newlen += offsetof(HeapTupleHeaderData, t_bits);
+		htup->t_oid = xlrec->t_oid;
+		htup->t_natts = xlrec->t_natts;
+		htup->t_hoff = xlrec->t_hoff;
+		htup->t_xmin = record->xl_xid;
+		htup->t_cmin = xlrec->target.cid;
+		htup->t_infomask = HEAP_XMAX_INVALID | HEAP_XMIN_COMMITTED | xlrec->mask;
+		
+		PageManagerModeSet(OverwritePageManagerMode);
+		OffsetNumber offnum = PageAddItem(page, htup, newlen, 
+			ItemPointerGetOffsetNumber(&(xlrec->target.tid)), LP_USED);
+		PageManagerModeSet(ShufflePageManagerMode);
+		if (offnum == InvalidOffsetNumber)
+			elog(STOP, "heap_insert_redo: failed to add tuple");
+		PageSetLSN(page, lsn);
+		PageSetSUI(page, ThisStartUpID);	/* prev sui */
+		UnlockAndWriteBuffer(buffer);
+		return;
+	}
+
+	/* undo insert */
+	if (XLByteLT(PageGetLSN(page), lsn))	/* changes are not applied ?! */
+		elog(STOP, "heap_insert_undo: bad page LSN");
+
+	OffsetNumber	offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+	ItemId			lp = PageGetItemId(page, offnum);
+
+	if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
+	{
+		if (!InRecovery)
+			elog(STOP, "heap_insert_undo: unused/deleted target tuple in rollback");
+		if (ItemIdDeleted(lp))
+		{
+			lp->lp_flags &= ~LP_USED;
+			PageRepairFragmentation(page);
+			UnlockAndWriteBuffer(buffer);
+		}
+		else
+			UnlockAndReleaseBuffer(buffer);
+		return;
+	}
+	HeapTupleHeader	htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+	/* is it our tuple ? */
+	if (htup->t_xmin != record->xl_xid || htup->t_cmin != xlrec->target.cid)
+	{
+		if (!InRecovery)
+			elog(STOP, "heap_insert_undo: invalid target tuple in rollback");
+		UnlockAndReleaseBuffer(buffer);
+		return;
+	}
+
+	if (InRecovery || BufferIsUpdatable(buffer))
+	{
+		lp->lp_flags &= ~LP_USED;
+		PageRepairFragmentation(page);
+		UnlockAndWriteBuffer(buffer);
+	}
+	else	/* we can't delete tuple right now */
+	{
+		lp->lp_flags |= LP_DELETE;	/* mark for deletion */
+		MarkBufferForCleanup(buffer, PageCleanup);
+	}
+
+}
+
+void heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record)
+{
+	xl_heap_update *xlrec = (xl_heap_update*) XLogRecGetData(record);
+	Relation		reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+
+	if (!RelationIsValid(reln))
+		return;
+	Buffer			buffer;
+	Page			page;
+	OffsetNumber	offnum;
+	ItemId			lp;
+	HeapTupleHeader	htup;
+
+	/* 
+	 * Currently UPDATE is DELETE + INSERT and so code below are near
+	 * exact sum of code in heap_xlog_delete & heap_xlog_insert. We could
+	 * re-structure code better, but keeping in mind upcoming overwriting
+	 * smgr separate heap_xlog_update code seems to be Good Thing.
+	 */
+
+	/* Deal with old tuple version */
+
+	buffer = XLogReadBuffer(false, reln, 
+					ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+	if (!BufferIsValid(buffer))
+		goto newt;
+
+	page = (Page) BufferGetPage(buffer);
+	if (PageIsNew((PageHeader) page))
+	{
+		PageInit(page, BufferGetPageSize(buffer), 0);
+		PageSetLSN(page, lsn);
+		PageSetSUI(page, ThisStartUpID);
+		UnlockAndWriteBuffer(buffer);
+		goto newt;
+	}
+
+	if (redo)
+	{
+		if (XLByteLE(lsn, PageGetLSN(page)))	/* changes are applied */
+		{
+			UnlockAndReleaseBuffer(buffer);
+			goto newt;
+		}
+	}
+	else if (XLByteLT(PageGetLSN(page), lsn))	/* changes are not applied ?! */
+		elog(STOP, "heap_update_undo: bad old tuple page LSN");
+
+	offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
+	lp = PageGetItemId(page, offnum);
+
+	if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
+	{
+		if (redo)
+			elog(STOP, "heap_update_redo: unused/deleted old tuple");
+		if (!InRecovery)
+			elog(STOP, "heap_update_undo: unused/deleted old tuple in rollback");
+		if (ItemIdDeleted(lp))
+		{
+			lp->lp_flags &= ~LP_USED;
+			PageRepairFragmentation(page);
+			UnlockAndWriteBuffer(buffer);
+		}
+		else
+			UnlockAndReleaseBuffer(buffer);
+		goto newt;
+	}
+	htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+	if (redo)
+	{
+		htup->t_xmax = record->xl_xid;
+		htup->t_cmax = xlrec->target.cid;
+		htup->t_infomask &= ~(HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+		htup->t_infomask |= HEAP_XMAX_COMMITTED;
+		PageSetLSN(page, lsn);
+		PageSetSUI(page, ThisStartUpID);
+		UnlockAndWriteBuffer(buffer);
+		goto newt;
+	}
+
+	/* undo... is it our tuple ? */
+	if (htup->t_xmax != record->xl_xid || htup->t_cmax != xlrec->target.cid)
+	{
+		if (!InRecovery)
+			elog(STOP, "heap_update_undo: invalid old tuple in rollback");
+		UnlockAndReleaseBuffer(buffer);
+	}
+	else	/* undo */
+	{
+		htup->t_infomask |= HEAP_XMAX_INVALID;
+		UnlockAndWriteBuffer(buffer);
+	}
+
+	/* Deal with new tuple */
+
+newt:;
+
+	buffer = XLogReadBuffer((redo) ? true : false, reln, 
+					ItemPointerGetBlockNumber(&(xlrec->newtid)));
+	if (!BufferIsValid(buffer))
+		return;
+
+	page = (Page) BufferGetPage(buffer);
+	if (PageIsNew((PageHeader) page))
+	{
+		PageInit(page, BufferGetPageSize(buffer), 0);
+		if (!redo)
+		{
+			PageSetLSN(page, lsn);
+			PageSetSUI(page, ThisStartUpID);
+			UnlockAndWriteBuffer(buffer);
+			return;
+		}
+	}
+
+	if (redo)
+	{
+		if (XLByteLE(lsn, PageGetLSN(page)))	/* changes are applied */
+		{
+			UnlockAndReleaseBuffer(buffer);
+			return;
+		}
+
+		char			tbuf[MaxTupleSize];
+		uint32			newlen = record->xl_len - SizeOfHeapUpdate;
+
+		htup = (HeapTupleHeader) tbuf;
+		memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits), 
+			(char*)xlrec + SizeOfHeapUpdate, newlen);
+		newlen += offsetof(HeapTupleHeaderData, t_bits);
+		htup->t_oid = xlrec->t_oid;
+		htup->t_natts = xlrec->t_natts;
+		htup->t_hoff = xlrec->t_hoff;
+		htup->t_xmin = record->xl_xid;
+		htup->t_cmin = xlrec->target.cid;
+		htup->t_infomask = HEAP_XMAX_INVALID | HEAP_XMIN_COMMITTED | xlrec->mask;
+		
+		PageManagerModeSet(OverwritePageManagerMode);
+		OffsetNumber offnum = PageAddItem(page, htup, newlen, 
+			ItemPointerGetOffsetNumber(&(xlrec->newtid)), LP_USED);
+		PageManagerModeSet(ShufflePageManagerMode);
+		if (offnum == InvalidOffsetNumber)
+			elog(STOP, "heap_update_redo: failed to add tuple");
+		PageSetLSN(page, lsn);
+		PageSetSUI(page, ThisStartUpID);	/* prev sui */
+		UnlockAndWriteBuffer(buffer);
+		return;
+	}
+
+	/* undo */
+	if (XLByteLT(PageGetLSN(page), lsn))	/* changes are not applied ?! */
+		elog(STOP, "heap_update_undo: bad new tuple page LSN");
+
+	offnum = ItemPointerGetOffsetNumber(&(xlrec->newtid));
+	lp = PageGetItemId(page, offnum);
+
+	if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
+	{
+		if (!InRecovery)
+			elog(STOP, "heap_update_undo: unused/deleted new tuple in rollback");
+		if (ItemIdDeleted(lp))
+		{
+			lp->lp_flags &= ~LP_USED;
+			PageRepairFragmentation(page);
+			UnlockAndWriteBuffer(buffer);
+		}
+		else
+			UnlockAndReleaseBuffer(buffer);
+		return;
+	}
+	htup = (HeapTupleHeader) PageGetItem(page, lp);
+
+	/* is it our tuple ? */
+	if (htup->t_xmin != record->xl_xid || htup->t_cmin != xlrec->target.cid)
+	{
+		if (!InRecovery)
+			elog(STOP, "heap_update_undo: invalid new tuple in rollback");
+		UnlockAndReleaseBuffer(buffer);
+		return;
+	}
+
+	if (InRecovery || BufferIsUpdatable(buffer))
+	{
+		lp->lp_flags &= ~LP_USED;
+		PageRepairFragmentation(page);
+		UnlockAndWriteBuffer(buffer);
+	}
+	else	/* we can't delete tuple right now */
+	{
+		lp->lp_flags |= LP_DELETE;	/* mark for deletion */
+		MarkBufferForCleanup(buffer, PageCleanup);
+	}
+
 }
 
+
 #endif	/* XLOG */
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 9181a7984d7826be9b764dc70294449e7ea8f6e8..04725b4a7b471c4f140ac7d9de0a3aa60537af0d 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Id: hio.c,v 1.32 2000/07/03 02:54:15 vadim Exp $
+ *	  $Id: hio.c,v 1.33 2000/09/07 09:58:35 vadim Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -67,16 +67,19 @@ RelationPutHeapTuple(Relation relation,
 /*
  * RelationGetBufferForTuple
  *
- * Returns (locked) buffer to add tuple with given len.
- * If Ubuf is valid then no attempt to lock it should be made -
- * this is for heap_update...
+ * Returns (locked) buffer with free space >= given len.
+ *
+ * Note that we use LockPage to lock relation for extension. We can 
+ * do this as long as in all other places we use page-level locking
+ * for indices only. Alternatively, we could define pseudo-table as
+ * we do for transactions with XactLockTable.
  *
  * ELOG(ERROR) is allowed here, so this routine *must* be called
  * before any (unlogged) changes are made in buffer pool.
  *
  */
 Buffer
-RelationGetBufferForTuple(Relation relation, Size len, Buffer Ubuf)
+RelationGetBufferForTuple(Relation relation, Size len)
 {
 	Buffer		buffer;
 	Page		pageHeader;
@@ -91,12 +94,6 @@ RelationGetBufferForTuple(Relation relation, Size len, Buffer Ubuf)
 		elog(ERROR, "Tuple is too big: size %u, max size %ld",
 			 len, MaxTupleSize);
 
-	/*
-	 * Lock relation for extension. We can use LockPage here as long as in
-	 * all other places we use page-level locking for indices only.
-	 * Alternatively, we could define pseudo-table as we do for
-	 * transactions with XactLockTable.
-	 */
 	if (!relation->rd_myxactonly)
 		LockPage(relation, 0, ExclusiveLock);
 
@@ -114,31 +111,29 @@ RelationGetBufferForTuple(Relation relation, Size len, Buffer Ubuf)
 	 */
 	if (lastblock == 0)
 	{
-		/* what exactly is this all about??? */
-		buffer = ReadBuffer(relation, lastblock);
+		buffer = ReadBuffer(relation, P_NEW);
+		LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 		pageHeader = (Page) BufferGetPage(buffer);
 		Assert(PageIsNew((PageHeader) pageHeader));
-		buffer = ReleaseAndReadBuffer(buffer, relation, P_NEW);
-		pageHeader = (Page) BufferGetPage(buffer);
 		PageInit(pageHeader, BufferGetPageSize(buffer), 0);
 	}
 	else
+	{
 		buffer = ReadBuffer(relation, lastblock - 1);
-
-	if (buffer != Ubuf)
 		LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
-	pageHeader = (Page) BufferGetPage(buffer);
+		pageHeader = (Page) BufferGetPage(buffer);
+	}
 
 	/*
 	 * Is there room on the last existing page?
 	 */
 	if (len > PageGetFreeSpace(pageHeader))
 	{
-		if (buffer != Ubuf)
-			LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 		buffer = ReleaseAndReadBuffer(buffer, relation, P_NEW);
 		LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 		pageHeader = (Page) BufferGetPage(buffer);
+		Assert(PageIsNew((PageHeader) pageHeader));
 		PageInit(pageHeader, BufferGetPageSize(buffer), 0);
 
 		if (len > PageGetFreeSpace(pageHeader))
@@ -147,14 +142,6 @@ RelationGetBufferForTuple(Relation relation, Size len, Buffer Ubuf)
 			elog(STOP, "Tuple is too big: size %u", len);
 		}
 	}
-	/*
-	 * Caller should check space in Ubuf but...
-	 */
-	else if (buffer == Ubuf)
-	{
-		ReleaseBuffer(buffer);
-		buffer = Ubuf;
-	}
 
 	if (!relation->rd_myxactonly)
 		UnlockPage(relation, 0, ExclusiveLock);
diff --git a/src/include/access/hio.h b/src/include/access/hio.h
index c0636a4ff34f280dc3758a73ed525d6e2d945dd0..df39aed6cba25f16b6b85ad6f5ca6b8637653ae4 100644
--- a/src/include/access/hio.h
+++ b/src/include/access/hio.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: hio.h,v 1.15 2000/07/03 02:54:17 vadim Exp $
+ * $Id: hio.h,v 1.16 2000/09/07 09:58:35 vadim Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -18,6 +18,6 @@
 
 extern void RelationPutHeapTuple(Relation relation, Buffer buffer,
 					 HeapTuple tuple);
-extern Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer Ubuf);
+extern Buffer RelationGetBufferForTuple(Relation relation, Size len);
 
 #endif	 /* HIO_H */
diff --git a/src/include/access/htup.h b/src/include/access/htup.h
index 73e6655634c2d61e5f6de33626f63e75ffbf20c8..f105dafee27f530f5a3318879bb88e1dd5610c51 100644
--- a/src/include/access/htup.h
+++ b/src/include/access/htup.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: htup.h,v 1.34 2000/08/07 20:15:40 tgl Exp $
+ * $Id: htup.h,v 1.35 2000/09/07 09:58:35 vadim Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -15,6 +15,7 @@
 #define HTUP_H
 
 #include "storage/bufpage.h"
+#include "storage/relfilenode.h"
 
 #define MinHeapTupleBitmapSize	32		/* 8 * 4 */
 
@@ -81,8 +82,7 @@ typedef HeapTupleHeaderData *HeapTupleHeader;
  */
 typedef struct xl_heaptid
 {
-	Oid					dbId;		/* database */
-	Oid					relId;		/* relation */
+	RelFileNode			node;
 	CommandId			cid;		/* this is for "better" tuple' */
 									/* identification - it allows to avoid */
 									/* "compensation" records for undo */
@@ -92,7 +92,7 @@ typedef struct xl_heaptid
 /* This is what we need to know about delete - ALIGN(18) = 24 bytes */
 typedef struct xl_heap_delete
 {
-	xl_heaptid			dtid;		/* deleted tuple id */
+	xl_heaptid			target;		/* deleted tuple id */
 } xl_heap_delete;
 
 #define	SizeOfHeapDelete	(offsetof(xl_heaptid, tid) + SizeOfIptrData))
@@ -100,7 +100,7 @@ typedef struct xl_heap_delete
 /* This is what we need to know about insert - 26 + data */
 typedef struct xl_heap_insert
 {
-	xl_heaptid			itid;		/* inserted tuple id */
+	xl_heaptid			target;		/* inserted tuple id */
 	/* something from tuple header */
 	int16				t_natts;
 	Oid					t_oid;
@@ -114,8 +114,8 @@ typedef struct xl_heap_insert
 /* This is what we need to know about update - 28 + data */
 typedef struct xl_heap_update
 {
-	xl_heaptid			dtid;		/* deleted tuple id */
-	ItemPointerData		itid;		/* new inserted tuple id */
+	xl_heaptid			target;		/* deleted tuple id */
+	ItemPointerData		newtid;		/* new inserted tuple id */
 	/* something from header of new tuple version */
 	int16				t_natts;
 	uint8				t_hoff;
@@ -128,8 +128,8 @@ typedef struct xl_heap_update
 /* This is what we need to know about tuple move - 24 bytes */
 typedef struct xl_heap_move
 {
-	xl_heaptid			ftid;		/* moved from */
-	ItemPointerData		ttid;		/* moved to */
+	xl_heaptid			target;		/* moved from */
+	ItemPointerData		newtid;		/* moved to */
 } xl_heap_move;
 
 #define	SizeOfHeapMove	(offsetof(xl_heap_move, ttid) + SizeOfIptrData))
@@ -238,6 +238,9 @@ typedef HeapTupleData *HeapTuple;
 #define HEAP_HASCOMPRESSED		0x0008	/* has compressed stored */
  /* attribute(s) */
 #define HEAP_HASEXTENDED		0x000C	/* the two above combined */
+
+#define HEAP_XMAX_UNLOGGED		0x0080	/* to lock tuple for update */
+										/* without logging */
 #define HEAP_XMIN_COMMITTED		0x0100	/* t_xmin committed */
 #define HEAP_XMIN_INVALID		0x0200	/* t_xmin invalid/aborted */
 #define HEAP_XMAX_COMMITTED		0x0400	/* t_xmax committed */
@@ -249,7 +252,7 @@ typedef HeapTupleData *HeapTuple;
 #define HEAP_MOVED_IN			0x8000	/* moved from another place by
 										 * vacuum */
 
-#define HEAP_XACT_MASK			0xFF00	/* */
+#define HEAP_XACT_MASK			0xFFF0	/* */
 
 #define HeapTupleNoNulls(tuple) \
 		(!(((HeapTuple) (tuple))->t_data->t_infomask & HEAP_HASNULL))
diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h
index 8498c783a1168c3978f974b82a349b445ad639d6..58ba61f68d86c276aeedae78257ed52dfdaef864 100644
--- a/src/include/storage/bufpage.h
+++ b/src/include/storage/bufpage.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: bufpage.h,v 1.31 2000/07/21 06:42:39 tgl Exp $
+ * $Id: bufpage.h,v 1.32 2000/09/07 09:58:36 vadim Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -296,6 +296,19 @@ typedef enum
 			 (sizeof(PageHeaderData) - sizeof(ItemIdData)))) \
 	 / ((int) sizeof(ItemIdData)))
 
+#ifdef XLOG
+
+#define PageGetLSN(page) \
+	(((PageHeader) (page))->pd_lsn)
+#define PageSetLSN(page, lsn) \
+	(((PageHeader) (page))->pd_lsn = (XLogRecPtr) (lsn))
+
+#define PageGetSUI(page) \
+	(((PageHeader) (page))->pd_sui)
+#define PageSetSUI(page, sui) \
+	(((PageHeader) (page))->pd_sui = (StartUpID) (sui))
+
+#endif
 
 /* ----------------------------------------------------------------
  *		extern declarations
diff --git a/src/include/storage/itemid.h b/src/include/storage/itemid.h
index 87aa82ac3c62fbf3cf30c6ec863c067a997819c0..0b330ce56fec14896af346020b0b46593d205e30 100644
--- a/src/include/storage/itemid.h
+++ b/src/include/storage/itemid.h
@@ -7,14 +7,13 @@
  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: itemid.h,v 1.11 2000/08/07 20:15:50 tgl Exp $
+ * $Id: itemid.h,v 1.12 2000/09/07 09:58:36 vadim Exp $
  *
  *-------------------------------------------------------------------------
  */
 #ifndef ITEMID_H
 #define ITEMID_H
 
-
 /*
  * An item pointer (also called line pointer) on a buffer page
  */
@@ -31,8 +30,15 @@ typedef ItemIdData *ItemId;
  * lp_flags contains these flags:
  */
 #define LP_USED			0x01	/* this line pointer is being used */
-/* currently, there is one unused flag bit ... */
 
+#ifdef XLOG
+
+#define LP_DELETE		0x02    /* item is to be deleted */
+
+#define ItemIdDeleted(itemId) \
+	(((itemId)->lp_flags & LP_DELETE) != 0)
+
+#endif
 
 /*
  * Item offsets, lengths, and flags are represented by these types when
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index c90ba3c5ee0900e0d2ceb860dac9176bfe685211..4deec0618a81ea98a19577e6cd72ffe22a35f7e8 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2000, PostgreSQL, Inc
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $Id: rel.h,v 1.40 2000/07/14 22:18:02 tgl Exp $
+ * $Id: rel.h,v 1.41 2000/09/07 09:58:38 vadim Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -19,6 +19,7 @@
 #include "catalog/pg_am.h"
 #include "catalog/pg_class.h"
 #include "rewrite/prs2lock.h"
+#include "storage/relfilenode.h"
 #include "storage/fd.h"
 
 /* added to prevent circular dependency.  bjm 1999/11/15 */
@@ -86,6 +87,7 @@ typedef struct TriggerDesc
 typedef struct RelationData
 {
 	File		rd_fd;			/* open file descriptor, or -1 if none */
+	RelFileNode	rd_node;		/* relation file node */
 	int			rd_nblocks;		/* number of blocks in rel */
 	uint16		rd_refcnt;		/* reference count */
 	bool		rd_myxactonly;	/* rel uses the local buffer mgr */