Skip to content
Snippets Groups Projects
Commit 33d3ad46 authored by Tom Lane's avatar Tom Lane
Browse files

Code review for patch to avoid second scan when vacuuming index-less

table: avoid invoking LockBufferForCleanup without need, put out the
same log message we would have before, minor code beautification.
parent ea21eea6
No related branches found
No related tags found
No related merge requests found
...@@ -16,10 +16,6 @@ ...@@ -16,10 +16,6 @@
* perform a pass of index cleanup and page compaction, then resume the heap * perform a pass of index cleanup and page compaction, then resume the heap
* scan with an empty TID array. * scan with an empty TID array.
* *
* As a special exception if we're processing a table with no indexes we can
* vacuum each page as we go so we don't need to allocate more space than
* enough to hold as many heap tuples fit on one page.
*
* We can limit the storage for page free space to MaxFSMPages entries, * We can limit the storage for page free space to MaxFSMPages entries,
* since that's the most the free space map will be willing to remember * since that's the most the free space map will be willing to remember
* anyway. If the relation has fewer than that many pages with free space, * anyway. If the relation has fewer than that many pages with free space,
...@@ -29,13 +25,18 @@ ...@@ -29,13 +25,18 @@
* have more than MaxFSMPages entries in all. The surviving page entries * have more than MaxFSMPages entries in all. The surviving page entries
* are passed to the free space map at conclusion of the scan. * are passed to the free space map at conclusion of the scan.
* *
* If we're processing a table with no indexes, we can just vacuum each page
* as we go; there's no need to save up multiple tuples to minimize the number
* of index scans performed. So we don't use maintenance_work_mem memory for
* the TID array, just enough to hold as many heap tuples as fit on one page.
*
* *
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.77 2006/09/04 21:40:23 momjian Exp $ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.78 2006/09/13 17:47:08 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -68,6 +69,8 @@ ...@@ -68,6 +69,8 @@
typedef struct LVRelStats typedef struct LVRelStats
{ {
/* hasindex = true means two-pass strategy; false means one-pass */
bool hasindex;
/* Overall statistics about rel */ /* Overall statistics about rel */
BlockNumber rel_pages; BlockNumber rel_pages;
double rel_tuples; double rel_tuples;
...@@ -110,7 +113,7 @@ static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats, ...@@ -110,7 +113,7 @@ static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats,
TransactionId OldestXmin); TransactionId OldestXmin);
static BlockNumber count_nondeletable_pages(Relation onerel, static BlockNumber count_nondeletable_pages(Relation onerel,
LVRelStats *vacrelstats, TransactionId OldestXmin); LVRelStats *vacrelstats, TransactionId OldestXmin);
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks, unsigned nindexes); static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
static void lazy_record_dead_tuple(LVRelStats *vacrelstats, static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
ItemPointer itemptr); ItemPointer itemptr);
static void lazy_record_free_space(LVRelStats *vacrelstats, static void lazy_record_free_space(LVRelStats *vacrelstats,
...@@ -137,7 +140,6 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt) ...@@ -137,7 +140,6 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
LVRelStats *vacrelstats; LVRelStats *vacrelstats;
Relation *Irel; Relation *Irel;
int nindexes; int nindexes;
bool hasindex;
BlockNumber possibly_freeable; BlockNumber possibly_freeable;
TransactionId OldestXmin, TransactionId OldestXmin,
FreezeLimit; FreezeLimit;
...@@ -169,7 +171,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt) ...@@ -169,7 +171,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
/* Open all indexes of the relation */ /* Open all indexes of the relation */
vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel); vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
hasindex = (nindexes > 0); vacrelstats->hasindex = (nindexes > 0);
/* Do the vacuuming */ /* Do the vacuuming */
lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, FreezeLimit, OldestXmin); lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, FreezeLimit, OldestXmin);
...@@ -195,7 +197,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt) ...@@ -195,7 +197,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
vac_update_relstats(RelationGetRelid(onerel), vac_update_relstats(RelationGetRelid(onerel),
vacrelstats->rel_pages, vacrelstats->rel_pages,
vacrelstats->rel_tuples, vacrelstats->rel_tuples,
hasindex, vacrelstats->hasindex,
vacrelstats->minxid, OldestXmin); vacrelstats->minxid, OldestXmin);
/* report results to the stats collector, too */ /* report results to the stats collector, too */
...@@ -210,11 +212,13 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt) ...@@ -210,11 +212,13 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
* This routine sets commit status bits, builds lists of dead tuples * This routine sets commit status bits, builds lists of dead tuples
* and pages with free space, and calculates statistics on the number * and pages with free space, and calculates statistics on the number
* of live tuples in the heap. When done, or when we run low on space * of live tuples in the heap. When done, or when we run low on space
* for dead-tuple TIDs, or after every page if the table has no indexes * for dead-tuple TIDs, invoke vacuuming of indexes and heap.
* invoke vacuuming of indexes and heap.
* *
* It also updates the minimum Xid found anywhere on the table in * It also updates the minimum Xid found anywhere on the table in
* vacrelstats->minxid, for later storing it in pg_class.relminxid. * vacrelstats->minxid, for later storing it in pg_class.relminxid.
*
* If there are no indexes then we just vacuum each dirty page as we
* process it, since there's no point in gathering many tuples.
*/ */
static void static void
lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
...@@ -225,7 +229,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, ...@@ -225,7 +229,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
blkno; blkno;
HeapTupleData tuple; HeapTupleData tuple;
char *relname; char *relname;
BlockNumber empty_pages; BlockNumber empty_pages,
vacuumed_pages;
double num_tuples, double num_tuples,
tups_vacuumed, tups_vacuumed,
nkeep, nkeep,
...@@ -242,7 +247,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, ...@@ -242,7 +247,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
get_namespace_name(RelationGetNamespace(onerel)), get_namespace_name(RelationGetNamespace(onerel)),
relname))); relname)));
empty_pages = 0; empty_pages = vacuumed_pages = 0;
num_tuples = tups_vacuumed = nkeep = nunused = 0; num_tuples = tups_vacuumed = nkeep = nunused = 0;
indstats = (IndexBulkDeleteResult **) indstats = (IndexBulkDeleteResult **)
...@@ -252,7 +257,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, ...@@ -252,7 +257,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
vacrelstats->rel_pages = nblocks; vacrelstats->rel_pages = nblocks;
vacrelstats->nonempty_pages = 0; vacrelstats->nonempty_pages = 0;
lazy_space_alloc(vacrelstats, nblocks, nindexes); lazy_space_alloc(vacrelstats, nblocks);
for (blkno = 0; blkno < nblocks; blkno++) for (blkno = 0; blkno < nblocks; blkno++)
{ {
...@@ -287,14 +292,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, ...@@ -287,14 +292,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
buf = ReadBuffer(onerel, blkno); buf = ReadBuffer(onerel, blkno);
/* In this phase we only need shared access to the buffer unless we're /* In this phase we only need shared access to the buffer */
* going to do the vacuuming now which we do if there are no indexes LockBuffer(buf, BUFFER_LOCK_SHARE);
*/
if (nindexes)
LockBuffer(buf, BUFFER_LOCK_SHARE);
else
LockBufferForCleanup(buf);
page = BufferGetPage(buf); page = BufferGetPage(buf);
...@@ -451,22 +450,34 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, ...@@ -451,22 +450,34 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
} }
} /* scan along page */ } /* scan along page */
/*
* If there are no indexes then we can vacuum the page right now
* instead of doing a second scan.
*/
if (nindexes == 0 &&
vacrelstats->num_dead_tuples > 0)
{
/* Trade in buffer share lock for super-exclusive lock */
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBufferForCleanup(buf);
/* Remove tuples from heap */
lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats);
/* Forget the now-vacuumed tuples, and press on */
vacrelstats->num_dead_tuples = 0;
vacuumed_pages++;
}
/* /*
* If we remembered any tuples for deletion, then the page will be * If we remembered any tuples for deletion, then the page will be
* visited again by lazy_vacuum_heap, which will compute and record * visited again by lazy_vacuum_heap, which will compute and record
* its post-compaction free space. If not, then we're done with this * its post-compaction free space. If not, then we're done with this
* page, so remember its free space as-is. * page, so remember its free space as-is. (This path will always
* be taken if there are no indexes.)
*/ */
if (vacrelstats->num_dead_tuples == prev_dead_count) if (vacrelstats->num_dead_tuples == prev_dead_count)
{ {
lazy_record_free_space(vacrelstats, blkno, lazy_record_free_space(vacrelstats, blkno,
PageGetFreeSpace(page)); PageGetFreeSpace(page));
} else if (!nindexes) {
/* If there are no indexes we can vacuum the page right now instead
* of doing a second scan */
lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats);
lazy_record_free_space(vacrelstats, blkno, PageGetFreeSpace(BufferGetPage(buf)));
vacrelstats->num_dead_tuples = 0;
} }
/* Remember the location of the last page with nonremovable tuples */ /* Remember the location of the last page with nonremovable tuples */
...@@ -499,6 +510,13 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, ...@@ -499,6 +510,13 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
for (i = 0; i < nindexes; i++) for (i = 0; i < nindexes; i++)
lazy_cleanup_index(Irel[i], indstats[i], vacrelstats); lazy_cleanup_index(Irel[i], indstats[i], vacrelstats);
/* If no indexes, make log report that lazy_vacuum_heap would've made */
if (vacuumed_pages)
ereport(elevel,
(errmsg("\"%s\": removed %.0f row versions in %u pages",
RelationGetRelationName(onerel),
tups_vacuumed, vacuumed_pages)));
ereport(elevel, ereport(elevel,
(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages", (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u pages",
RelationGetRelationName(onerel), RelationGetRelationName(onerel),
...@@ -908,18 +926,21 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats, ...@@ -908,18 +926,21 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats,
* See the comments at the head of this file for rationale. * See the comments at the head of this file for rationale.
*/ */
static void static void
lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks, unsigned nindexes) lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
{ {
long maxtuples; long maxtuples;
int maxpages; int maxpages;
if (nindexes) { if (vacrelstats->hasindex)
{
maxtuples = (maintenance_work_mem * 1024L) / sizeof(ItemPointerData); maxtuples = (maintenance_work_mem * 1024L) / sizeof(ItemPointerData);
maxtuples = Min(maxtuples, INT_MAX); maxtuples = Min(maxtuples, INT_MAX);
maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData)); maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData));
/* stay sane if small maintenance_work_mem */ /* stay sane if small maintenance_work_mem */
maxtuples = Max(maxtuples, MaxHeapTuplesPerPage); maxtuples = Max(maxtuples, MaxHeapTuplesPerPage);
} else { }
else
{
maxtuples = MaxHeapTuplesPerPage; maxtuples = MaxHeapTuplesPerPage;
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment