diff --git a/doc/src/sgml/indexam.sgml b/doc/src/sgml/indexam.sgml index 65da721de47d3a7998e1e15db7635abe263c4265..1b42ff0231aa265a2d07d7e8422236cf6c54f567 100644 --- a/doc/src/sgml/indexam.sgml +++ b/doc/src/sgml/indexam.sgml @@ -1,4 +1,4 @@ -<!-- $PostgreSQL: pgsql/doc/src/sgml/indexam.sgml,v 2.24 2008/04/10 22:25:25 tgl Exp $ --> +<!-- $PostgreSQL: pgsql/doc/src/sgml/indexam.sgml,v 2.25 2008/04/13 19:18:13 tgl Exp $ --> <chapter id="indexam"> <title>Index Access Method Interface Definition</title> @@ -315,7 +315,15 @@ amgettuple (IndexScanDesc scan, TID is stored into the <literal>scan</> structure. Note that <quote>success</> means only that the index contains an entry that matches the scan keys, not that the tuple necessarily still exists in the heap or - will pass the caller's snapshot test. + will pass the caller's snapshot test. On success, <function>amgettuple</> + must also set <literal>scan->xs_recheck</> to TRUE or FALSE. + FALSE means it is certain that the index entry matches the scan keys. + TRUE means this is not certain, and the conditions represented by the + scan keys must be rechecked against the heap tuple after fetching it. + This provision supports <quote>lossy</> index operators. + Note that rechecking will extend only to the scan conditions; a partial + index predicate (if any) is never rechecked by <function>amgettuple</> + callers. </para> <para> @@ -327,6 +335,14 @@ amgetbitmap (IndexScanDesc scan, Fetch all tuples in the given scan and add them to the caller-supplied TIDBitmap (that is, OR the set of tuple IDs into whatever set is already in the bitmap). The number of tuples fetched is returned. + While inserting tuple IDs into the bitmap, <function>amgetbitmap</> can + indicate that rechecking of the scan conditions is required for specific + tuple IDs. This is analogous to the <literal>xs_recheck</> output parameter + of <function>amgettuple</>. Note: in the current implementation, support + for this feature is conflated with support for lossy storage of the bitmap + itself, and therefore callers recheck both the scan conditions and the + partial index predicate (if any) for recheckable tuples. That might not + always be true, however. <function>amgetbitmap</> and <function>amgettuple</> cannot be used in the same index scan; there are other restrictions too when using <function>amgetbitmap</>, as explained diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index 4fb5ee556c5dc45fa956d1787bf2219cbe558d25..af38717340ad821c2cbbca4f5c28ccd3d7d96a84 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.11 2008/04/10 22:25:25 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.12 2008/04/13 19:18:13 tgl Exp $ *------------------------------------------------------------------------- */ @@ -428,11 +428,14 @@ keyGetItem(Relation index, GinState *ginstate, MemoryContext tempCtx, GinScanKey * returns true if found */ static bool -scanGetItem(IndexScanDesc scan, ItemPointerData *item) +scanGetItem(IndexScanDesc scan, ItemPointerData *item, bool *recheck) { uint32 i; GinScanOpaque so = (GinScanOpaque) scan->opaque; + /* XXX for the moment, treat all GIN operators as lossy */ + *recheck = true; + ItemPointerSetMin(item); for (i = 0; i < so->nkeys; i++) { @@ -496,13 +499,14 @@ gingetbitmap(PG_FUNCTION_ARGS) for (;;) { ItemPointerData iptr; + bool recheck; CHECK_FOR_INTERRUPTS(); - if (!scanGetItem(scan, &iptr)) + if (!scanGetItem(scan, &iptr, &recheck)) break; - tbm_add_tuples(tbm, &iptr, 1, false); + tbm_add_tuples(tbm, &iptr, 1, recheck); ntids++; } @@ -528,7 +532,7 @@ gingettuple(PG_FUNCTION_ARGS) PG_RETURN_BOOL(false); startScan(scan); - res = scanGetItem(scan, &scan->xs_ctup.t_self); + res = scanGetItem(scan, &scan->xs_ctup.t_self, &scan->xs_recheck); stopScan(scan); PG_RETURN_BOOL(res); diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 4533ff8c85f9997232afac5369aad109b6825589..8e92139582599480298676e7a8b868edda139c22 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.70 2008/04/10 22:25:25 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.71 2008/04/13 19:18:14 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -23,9 +23,7 @@ static OffsetNumber gistfindnext(IndexScanDesc scan, OffsetNumber n, ScanDirection dir); -static int64 gistnext(IndexScanDesc scan, ScanDirection dir, - ItemPointer tid, TIDBitmap *tbm, - bool ignore_killed_tuples); +static int64 gistnext(IndexScanDesc scan, ScanDirection dir, TIDBitmap *tbm); static bool gistindex_keytest(IndexTuple tuple, IndexScanDesc scan, OffsetNumber offset); @@ -100,7 +98,6 @@ gistgettuple(PG_FUNCTION_ARGS) IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1); GISTScanOpaque so; - ItemPointerData tid; bool res; so = (GISTScanOpaque) scan->opaque; @@ -113,11 +110,9 @@ gistgettuple(PG_FUNCTION_ARGS) killtuple(scan->indexRelation, so, &(so->curpos)); /* - * Get the next tuple that matches the search key. If asked to skip killed - * tuples, continue looping until we find a non-killed tuple that matches - * the search key. + * Get the next tuple that matches the search key. */ - res = (gistnext(scan, dir, &tid, NULL, scan->ignore_killed_tuples) > 0) ? true : false; + res = (gistnext(scan, dir, NULL) > 0); PG_RETURN_BOOL(res); } @@ -129,7 +124,7 @@ gistgetbitmap(PG_FUNCTION_ARGS) TIDBitmap *tbm = (TIDBitmap *) PG_GETARG_POINTER(1); int64 ntids; - ntids = gistnext(scan, ForwardScanDirection, NULL, tbm, false); + ntids = gistnext(scan, ForwardScanDirection, tbm); PG_RETURN_INT64(ntids); } @@ -140,20 +135,23 @@ gistgetbitmap(PG_FUNCTION_ARGS) * * This function is used by both gistgettuple and gistgetbitmap. When * invoked from gistgettuple, tbm is null and the next matching tuple - * is returned in *tid. When invoked from getbitmap, tid is null and - * all matching tuples are added to tbm. In both cases, the function - * result is the number of returned tuples. + * is returned in scan->xs_ctup.t_self. When invoked from getbitmap, + * tbm is non-null and all matching tuples are added to tbm before + * returning. In both cases, the function result is the number of + * returned tuples. + * + * If scan specifies to skip killed tuples, continue looping until we find a + * non-killed tuple that matches the search key. */ static int64 -gistnext(IndexScanDesc scan, ScanDirection dir, - ItemPointer tid, TIDBitmap *tbm, - bool ignore_killed_tuples) +gistnext(IndexScanDesc scan, ScanDirection dir, TIDBitmap *tbm) { Page p; OffsetNumber n; GISTScanOpaque so; GISTSearchStack *stk; IndexTuple it; + bool recheck; GISTPageOpaque opaque; bool resetoffset = false; int64 ntids = 0; @@ -194,7 +192,7 @@ gistnext(IndexScanDesc scan, ScanDirection dir, if (XLogRecPtrIsInvalid(so->stack->lsn) || !XLByteEQ(so->stack->lsn, PageGetLSN(p))) { - /* page changed from last visit or visit first time , reset offset */ + /* first visit or page changed from last visit, reset offset */ so->stack->lsn = PageGetLSN(p); resetoffset = true; @@ -259,6 +257,8 @@ gistnext(IndexScanDesc scan, ScanDirection dir, for (;;) { n = gistfindnext(scan, n, dir); + /* XXX for the moment, treat all GIST operators as lossy */ + recheck = true; if (!OffsetNumberIsValid(n)) { @@ -298,15 +298,17 @@ gistnext(IndexScanDesc scan, ScanDirection dir, ItemPointerSet(&(so->curpos), BufferGetBlockNumber(so->curbuf), n); - if (!(ignore_killed_tuples && ItemIdIsDead(PageGetItemId(p, n)))) + if (!(scan->ignore_killed_tuples && + ItemIdIsDead(PageGetItemId(p, n)))) { it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n)); ntids++; if (tbm != NULL) - tbm_add_tuples(tbm, &it->t_tid, 1, false); + tbm_add_tuples(tbm, &it->t_tid, 1, recheck); else { - *tid = scan->xs_ctup.t_self = it->t_tid; + scan->xs_ctup.t_self = it->t_tid; + scan->xs_recheck = recheck; LockBuffer(so->curbuf, GIST_UNLOCK); return ntids; /* always 1 */ diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index c090cfef8bb2042c7e7f278ae539508fe24cf4f1..997eff31058a59ef6cba508f5f4f11ca9d108494 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.101 2008/04/10 22:25:25 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.102 2008/04/13 19:18:14 tgl Exp $ * * NOTES * This file contains only the public interface routines. @@ -210,6 +210,9 @@ hashgettuple(PG_FUNCTION_ARGS) OffsetNumber offnum; bool res; + /* Hash indexes are never lossy (at the moment anyway) */ + scan->xs_recheck = false; + /* * We hold pin but not lock on current buffer while outside the hash AM. * Reacquire the read lock here. diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index cafb1e6867aa1bcade2849c801c65370cc7dbf0c..e8958abca2b4538052d2ed19be3f7e3e1bdcaf01 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.67 2008/04/12 23:14:21 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.68 2008/04/13 19:18:14 tgl Exp $ * * NOTES * many of the old access method routines have been turned into @@ -96,9 +96,9 @@ RelationGetIndexScan(Relation indexRelation, ItemPointerSetInvalid(&scan->xs_ctup.t_self); scan->xs_ctup.t_data = NULL; scan->xs_cbuf = InvalidBuffer; - scan->xs_prev_xmax = InvalidTransactionId; - scan->xs_next_hot = InvalidOffsetNumber; scan->xs_hot_dead = false; + scan->xs_next_hot = InvalidOffsetNumber; + scan->xs_prev_xmax = InvalidTransactionId; /* * Let the AM fill in the key and any opaque data it wants. @@ -233,7 +233,18 @@ systable_getnext(SysScanDesc sysscan) HeapTuple htup; if (sysscan->irel) + { htup = index_getnext(sysscan->iscan, ForwardScanDirection); + /* + * We currently don't need to support lossy index operators for + * any system catalog scan. It could be done here, using the + * scan keys to drive the operator calls, if we arranged to save + * the heap attnums during systable_beginscan(); this is practical + * because we still wouldn't need to support indexes on expressions. + */ + if (htup && sysscan->iscan->xs_recheck) + elog(ERROR, "system catalog scans with lossy index conditions are not implemented"); + } else htup = heap_getnext(sysscan->scan, ForwardScanDirection); @@ -328,6 +339,9 @@ systable_getnext_ordered(SysScanDesc sysscan, ScanDirection direction) Assert(sysscan->irel); htup = index_getnext(sysscan->iscan, direction); + /* See notes in systable_getnext */ + if (htup && sysscan->iscan->xs_recheck) + elog(ERROR, "system catalog scans with lossy index conditions are not implemented"); return htup; } diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index 7a06d0074e8516b89e8a03ef8df675be18c02f99..ece3ae8ea40b0ba850b8abd90d9a2e110306c409 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.106 2008/04/12 23:14:21 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.107 2008/04/13 19:18:14 tgl Exp $ * * INTERFACE ROUTINES * index_open - open an index relation by relation OID @@ -402,6 +402,10 @@ index_restrpos(IndexScanDesc scan) * snapshot, or NULL if no more matching tuples exist. On success, * the buffer containing the heap tuple is pinned (the pin will be dropped * at the next index_getnext or index_endscan). + * + * Note: caller must check scan->xs_recheck, and perform rechecking of the + * scan keys if required. We do not do that here because we don't have + * enough information to do it efficiently in the general case. * ---------------- */ HeapTuple @@ -455,6 +459,8 @@ index_getnext(IndexScanDesc scan, ScanDirection direction) /* * The AM's gettuple proc finds the next index entry matching the * scan keys, and puts the TID in xs_ctup.t_self (ie, *tid). + * It should also set scan->xs_recheck, though we pay no + * attention to that here. */ found = DatumGetBool(FunctionCall2(procedure, PointerGetDatum(scan), diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index d96348ace46eec87ebf1226d0bdccc14baa98809..24f9b4c77b21ca1d2cbff8c492862c8ed61fde06 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -12,7 +12,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.157 2008/04/10 22:25:25 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.158 2008/04/13 19:18:14 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -240,6 +240,9 @@ btgettuple(PG_FUNCTION_ARGS) BTScanOpaque so = (BTScanOpaque) scan->opaque; bool res; + /* btree indexes are never lossy */ + scan->xs_recheck = false; + /* * If we've already initialized this scan, we can just advance it in the * appropriate direction. If we haven't done so yet, we call a routine to diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index b83a0999254e3a30fd48f47da6252b9f8eb5f18a..0c83e237fe81677c5eb235e64567a4bc371a4756 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -11,7 +11,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.172 2008/03/26 21:10:37 alvherre Exp $ + * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.173 2008/04/13 19:18:14 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -776,6 +776,10 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex) CHECK_FOR_INTERRUPTS(); + /* Since we used no scan keys, should never need to recheck */ + if (scan->xs_recheck) + elog(ERROR, "CLUSTER does not support lossy index conditions"); + LockBuffer(scan->xs_cbuf, BUFFER_LOCK_SHARE); switch (HeapTupleSatisfiesVacuum(tuple->t_data, OldestXmin, diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 59f54ee5cb42c75cbc1143a4f3016a9806c5b826..7b6e3df65067717f3f0f5c6bbf3ec66181a18118 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.126 2008/03/18 03:54:52 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.127 2008/04/13 19:18:14 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -106,7 +106,7 @@ IndexNext(IndexScanState *node) /* * ok, now that we have what we need, fetch the next tuple. */ - if ((tuple = index_getnext(scandesc, direction)) != NULL) + while ((tuple = index_getnext(scandesc, direction)) != NULL) { /* * Store the scanned tuple in the scan tuple slot of the scan state. @@ -118,6 +118,18 @@ IndexNext(IndexScanState *node) scandesc->xs_cbuf, /* buffer containing tuple */ false); /* don't pfree */ + /* + * If the index was lossy, we have to recheck the index quals using + * the real tuple. + */ + if (scandesc->xs_recheck) + { + econtext->ecxt_scantuple = slot; + ResetExprContext(econtext); + if (!ExecQual(node->indexqualorig, econtext, false)) + continue; /* nope, so ask index for another one */ + } + return slot; } diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index aafa7b539ffc193b2ef3af0be590b677adfe1715..286ded8ef38a0624dcab5b0fbc9c476740c26615 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -10,7 +10,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.237 2008/01/01 19:45:50 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.238 2008/04/13 19:18:14 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -47,13 +47,12 @@ static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path); static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path, List *tlist, List *scan_clauses); static IndexScan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path, - List *tlist, List *scan_clauses, - List **nonlossy_clauses); + List *tlist, List *scan_clauses); static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root, BitmapHeapPath *best_path, List *tlist, List *scan_clauses); static Plan *create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, - List **qual, List **indexqual); + List **qual); static TidScan *create_tidscan_plan(PlannerInfo *root, TidPath *best_path, List *tlist, List *scan_clauses); static SubqueryScan *create_subqueryscan_plan(PlannerInfo *root, Path *best_path, @@ -70,7 +69,6 @@ static HashJoin *create_hashjoin_plan(PlannerInfo *root, HashPath *best_path, Plan *outer_plan, Plan *inner_plan); static void fix_indexqual_references(List *indexquals, IndexPath *index_path, List **fixed_indexquals, - List **nonlossy_indexquals, List **indexstrategy, List **indexsubtype); static Node *fix_indexqual_operand(Node *node, IndexOptInfo *index, @@ -239,8 +237,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path) plan = (Plan *) create_indexscan_plan(root, (IndexPath *) best_path, tlist, - scan_clauses, - NULL); + scan_clauses); break; case T_BitmapHeapScan: @@ -840,16 +837,12 @@ create_seqscan_plan(PlannerInfo *root, Path *best_path, * The indexquals list of the path contains implicitly-ANDed qual conditions. * The list can be empty --- then no index restrictions will be applied during * the scan. - * - * If nonlossy_clauses isn't NULL, *nonlossy_clauses receives a list of the - * nonlossy indexquals. */ static IndexScan * create_indexscan_plan(PlannerInfo *root, IndexPath *best_path, List *tlist, - List *scan_clauses, - List **nonlossy_clauses) + List *scan_clauses) { List *indexquals = best_path->indexquals; Index baserelid = best_path->path.parent->relid; @@ -857,7 +850,6 @@ create_indexscan_plan(PlannerInfo *root, List *qpqual; List *stripped_indexquals; List *fixed_indexquals; - List *nonlossy_indexquals; List *indexstrategy; List *indexsubtype; ListCell *l; @@ -876,18 +868,13 @@ create_indexscan_plan(PlannerInfo *root, /* * The executor needs a copy with the indexkey on the left of each clause * and with index attr numbers substituted for table ones. This pass also - * gets strategy info and looks for "lossy" operators. + * gets strategy info. */ fix_indexqual_references(indexquals, best_path, &fixed_indexquals, - &nonlossy_indexquals, &indexstrategy, &indexsubtype); - /* pass back nonlossy quals if caller wants 'em */ - if (nonlossy_clauses) - *nonlossy_clauses = nonlossy_indexquals; - /* * If this is an innerjoin scan, the indexclauses will contain join * clauses that are not present in scan_clauses (since the passed-in value @@ -907,9 +894,8 @@ create_indexscan_plan(PlannerInfo *root, * by the index. All the predicates in the indexquals will be checked * (either by the index itself, or by nodeIndexscan.c), but if there are * any "special" operators involved then they must be included in qpqual. - * Also, any lossy index operators must be rechecked in the qpqual. The - * upshot is that qpqual must contain scan_clauses minus whatever appears - * in nonlossy_indexquals. + * The upshot is that qpqual must contain scan_clauses minus whatever + * appears in indexquals. * * In normal cases simple pointer equality checks will be enough to spot * duplicate RestrictInfos, so we try that first. In some situations @@ -932,13 +918,13 @@ create_indexscan_plan(PlannerInfo *root, Assert(IsA(rinfo, RestrictInfo)); if (rinfo->pseudoconstant) continue; /* we may drop pseudoconstants here */ - if (list_member_ptr(nonlossy_indexquals, rinfo)) + if (list_member_ptr(indexquals, rinfo)) continue; if (!contain_mutable_functions((Node *) rinfo->clause)) { List *clausel = list_make1(rinfo->clause); - if (predicate_implied_by(clausel, nonlossy_indexquals)) + if (predicate_implied_by(clausel, indexquals)) continue; if (best_path->indexinfo->indpred) { @@ -990,7 +976,6 @@ create_bitmap_scan_plan(PlannerInfo *root, Index baserelid = best_path->path.parent->relid; Plan *bitmapqualplan; List *bitmapqualorig; - List *indexquals; List *qpqual; ListCell *l; BitmapHeapScan *scan_plan; @@ -999,9 +984,9 @@ create_bitmap_scan_plan(PlannerInfo *root, Assert(baserelid > 0); Assert(best_path->path.parent->rtekind == RTE_RELATION); - /* Process the bitmapqual tree into a Plan tree and qual lists */ + /* Process the bitmapqual tree into a Plan tree and qual list */ bitmapqualplan = create_bitmap_subplan(root, best_path->bitmapqual, - &bitmapqualorig, &indexquals); + &bitmapqualorig); /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */ scan_clauses = extract_actual_clauses(scan_clauses, false); @@ -1023,9 +1008,9 @@ create_bitmap_scan_plan(PlannerInfo *root, * The qpqual list must contain all restrictions not automatically handled * by the index. All the predicates in the indexquals will be checked * (either by the index itself, or by nodeBitmapHeapscan.c), but if there - * are any "special" or lossy operators involved then they must be added - * to qpqual. The upshot is that qpqual must contain scan_clauses minus - * whatever appears in indexquals. + * are any "special" operators involved then they must be added to qpqual. + * The upshot is that qpqual must contain scan_clauses minus whatever + * appears in bitmapqualorig. * * In normal cases simple equal() checks will be enough to spot duplicate * clauses, so we try that first. In some situations (particularly with @@ -1037,22 +1022,22 @@ create_bitmap_scan_plan(PlannerInfo *root, * * Unlike create_indexscan_plan(), we need take no special thought here * for partial index predicates; this is because the predicate conditions - * are already listed in bitmapqualorig and indexquals. Bitmap scans have - * to do it that way because predicate conditions need to be rechecked if - * the scan becomes lossy. + * are already listed in bitmapqualorig. Bitmap scans have to do it that + * way because predicate conditions need to be rechecked if the scan's + * bitmap becomes lossy. */ qpqual = NIL; foreach(l, scan_clauses) { Node *clause = (Node *) lfirst(l); - if (list_member(indexquals, clause)) + if (list_member(bitmapqualorig, clause)) continue; if (!contain_mutable_functions(clause)) { List *clausel = list_make1(clause); - if (predicate_implied_by(clausel, indexquals)) + if (predicate_implied_by(clausel, bitmapqualorig)) continue; } qpqual = lappend(qpqual, clause); @@ -1062,7 +1047,7 @@ create_bitmap_scan_plan(PlannerInfo *root, qpqual = order_qual_clauses(root, qpqual); /* - * When dealing with special or lossy operators, we will at this point + * When dealing with special operators, we will at this point * have duplicate clauses in qpqual and bitmapqualorig. We may as well * drop 'em from bitmapqualorig, since there's no point in making the * tests twice. @@ -1086,19 +1071,19 @@ create_bitmap_scan_plan(PlannerInfo *root, /* * Given a bitmapqual tree, generate the Plan tree that implements it * - * As byproducts, we also return in *qual and *indexqual the qual lists - * (in implicit-AND form, without RestrictInfos) describing the original index - * conditions and the generated indexqual conditions. The latter is made to - * exclude lossy index operators. Both lists include partial-index predicates, - * because we have to recheck predicates as well as index conditions if the - * bitmap scan becomes lossy. + * As a byproduct, we also return in *qual a qual list (in implicit-AND + * form, without RestrictInfos) describing the generated indexqual + * conditions, as needed for rechecking heap tuples in lossy cases. + * This list also includes partial-index predicates, because we have to + * recheck predicates as well as index conditions if the scan's bitmap + * becomes lossy. * * Note: if you find yourself changing this, you probably need to change * make_restrictinfo_from_bitmapqual too. */ static Plan * create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, - List **qual, List **indexqual) + List **qual) { Plan *plan; @@ -1107,7 +1092,6 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, BitmapAndPath *apath = (BitmapAndPath *) bitmapqual; List *subplans = NIL; List *subquals = NIL; - List *subindexquals = NIL; ListCell *l; /* @@ -1121,13 +1105,11 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, { Plan *subplan; List *subqual; - List *subindexqual; subplan = create_bitmap_subplan(root, (Path *) lfirst(l), - &subqual, &subindexqual); + &subqual); subplans = lappend(subplans, subplan); subquals = list_concat_unique(subquals, subqual); - subindexquals = list_concat_unique(subindexquals, subindexqual); } plan = (Plan *) make_bitmap_and(subplans); plan->startup_cost = apath->path.startup_cost; @@ -1136,16 +1118,13 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, clamp_row_est(apath->bitmapselectivity * apath->path.parent->tuples); plan->plan_width = 0; /* meaningless */ *qual = subquals; - *indexqual = subindexquals; } else if (IsA(bitmapqual, BitmapOrPath)) { BitmapOrPath *opath = (BitmapOrPath *) bitmapqual; List *subplans = NIL; List *subquals = NIL; - List *subindexquals = NIL; bool const_true_subqual = false; - bool const_true_subindexqual = false; ListCell *l; /* @@ -1161,21 +1140,15 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, { Plan *subplan; List *subqual; - List *subindexqual; subplan = create_bitmap_subplan(root, (Path *) lfirst(l), - &subqual, &subindexqual); + &subqual); subplans = lappend(subplans, subplan); if (subqual == NIL) const_true_subqual = true; else if (!const_true_subqual) subquals = lappend(subquals, make_ands_explicit(subqual)); - if (subindexqual == NIL) - const_true_subindexqual = true; - else if (!const_true_subindexqual) - subindexquals = lappend(subindexquals, - make_ands_explicit(subindexqual)); } /* @@ -1207,23 +1180,15 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, *qual = subquals; else *qual = list_make1(make_orclause(subquals)); - if (const_true_subindexqual) - *indexqual = NIL; - else if (list_length(subindexquals) <= 1) - *indexqual = subindexquals; - else - *indexqual = list_make1(make_orclause(subindexquals)); } else if (IsA(bitmapqual, IndexPath)) { IndexPath *ipath = (IndexPath *) bitmapqual; IndexScan *iscan; - List *nonlossy_clauses; ListCell *l; /* Use the regular indexscan plan build machinery... */ - iscan = create_indexscan_plan(root, ipath, NIL, NIL, - &nonlossy_clauses); + iscan = create_indexscan_plan(root, ipath, NIL, NIL); /* then convert to a bitmap indexscan */ plan = (Plan *) make_bitmap_indexscan(iscan->scan.scanrelid, iscan->indexid, @@ -1237,7 +1202,6 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, clamp_row_est(ipath->indexselectivity * ipath->path.parent->tuples); plan->plan_width = 0; /* meaningless */ *qual = get_actual_clauses(ipath->indexclauses); - *indexqual = get_actual_clauses(nonlossy_clauses); foreach(l, ipath->indexinfo->indpred) { Expr *pred = (Expr *) lfirst(l); @@ -1249,10 +1213,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, * generating redundant conditions. */ if (!predicate_implied_by(list_make1(pred), ipath->indexclauses)) - { *qual = lappend(*qual, pred); - *indexqual = lappend(*indexqual, pred); - } } } else @@ -1794,9 +1755,9 @@ create_hashjoin_plan(PlannerInfo *root, /* * fix_indexqual_references * Adjust indexqual clauses to the form the executor's indexqual - * machinery needs, and check for recheckable (lossy) index conditions. + * machinery needs. * - * We have five tasks here: + * We have four tasks here: * * Remove RestrictInfo nodes from the input clauses. * * Index keys must be represented by Var nodes with varattno set to the * index's attribute number, not the attribute number in the original rel. @@ -1804,24 +1765,18 @@ create_hashjoin_plan(PlannerInfo *root, * left. * * We must construct lists of operator strategy numbers and subtypes * for the top-level operators of each index clause. - * * We must detect any lossy index operators. The API is that we return - * a list of the input clauses whose operators are NOT lossy. * * fixed_indexquals receives a modified copy of the indexquals list --- the * original is not changed. Note also that the copy shares no substructure * with the original; this is needed in case there is a subplan in it (we need * two separate copies of the subplan tree, or things will go awry). * - * nonlossy_indexquals receives a list of the original input clauses (with - * RestrictInfos) that contain non-lossy operators. - * * indexstrategy receives an integer list of strategy numbers. * indexsubtype receives an OID list of strategy subtypes. */ static void fix_indexqual_references(List *indexquals, IndexPath *index_path, List **fixed_indexquals, - List **nonlossy_indexquals, List **indexstrategy, List **indexsubtype) { @@ -1829,7 +1784,6 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path, ListCell *l; *fixed_indexquals = NIL; - *nonlossy_indexquals = NIL; *indexstrategy = NIL; *indexsubtype = NIL; @@ -1837,7 +1791,7 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path, * For each qual clause, commute if needed to put the indexkey operand on * the left, and then fix its varattno. (We do not need to change the * other side of the clause.) Then determine the operator's strategy - * number and subtype number, and check for lossy index behavior. + * number and subtype number. */ foreach(l, indexquals) { @@ -1848,7 +1802,6 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path, int stratno; Oid stratlefttype; Oid stratrighttype; - bool recheck; bool is_null_op = false; Assert(IsA(rinfo, RestrictInfo)); @@ -1961,8 +1914,6 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path, /* IS NULL doesn't have a clause_op */ stratno = InvalidStrategy; stratrighttype = InvalidOid; - /* We assume it's non-lossy ... might need more work someday */ - recheck = false; } else { @@ -1971,6 +1922,8 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path, * to get its strategy number and the recheck indicator. This also * double-checks that we found an operator matching the index. */ + bool recheck; + get_op_opfamily_properties(clause_op, opfamily, &stratno, &stratlefttype, @@ -1980,10 +1933,6 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path, *indexstrategy = lappend_int(*indexstrategy, stratno); *indexsubtype = lappend_oid(*indexsubtype, stratrighttype); - - /* If it's not lossy, add to nonlossy_indexquals */ - if (!recheck) - *nonlossy_indexquals = lappend(*nonlossy_indexquals, rinfo); } } diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h index 637b363f52855beb3750224a28a1d331237fef18..cf8f8a0c4dfd857f17021c977719d4e92b04c9fd 100644 --- a/src/include/access/relscan.h +++ b/src/include/access/relscan.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/access/relscan.h,v 1.63 2008/04/12 23:14:21 tgl Exp $ + * $PostgreSQL: pgsql/src/include/access/relscan.h,v 1.64 2008/04/13 19:18:14 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -76,15 +76,16 @@ typedef struct IndexScanDescData /* index access method's private state */ void *opaque; /* access-method-specific info */ - /* xs_ctup/xs_cbuf are valid after a successful index_getnext */ + /* xs_ctup/xs_cbuf/xs_recheck are valid after a successful index_getnext */ HeapTupleData xs_ctup; /* current heap tuple, if any */ Buffer xs_cbuf; /* current heap buffer in scan, if any */ /* NB: if xs_cbuf is not InvalidBuffer, we hold a pin on that buffer */ + bool xs_recheck; /* T means scan keys must be rechecked */ /* state data for traversing HOT chains in index_getnext */ - TransactionId xs_prev_xmax; /* previous HOT chain member's XMAX, if any */ - OffsetNumber xs_next_hot; /* next member of HOT chain, if any */ bool xs_hot_dead; /* T if all members of HOT chain are dead */ + OffsetNumber xs_next_hot; /* next member of HOT chain, if any */ + TransactionId xs_prev_xmax; /* previous HOT chain member's XMAX, if any */ } IndexScanDescData; typedef IndexScanDescData *IndexScanDesc;