diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 60926af2db8cfad747757d8b2456a9ed16c9dccc..2b811d9606e4afc19e35ef2eba3366e49460b239 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -27,7 +27,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.167 2002/06/25 17:58:10 momjian Exp $ + * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.168 2002/06/26 21:58:56 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,14 +62,14 @@ static TupleTableSlot *ExecutePlan(EState *estate, Plan *plan, long numberTuples, ScanDirection direction, DestReceiver *destfunc); -static void ExecRetrieve(TupleTableSlot *slot, +static void ExecSelect(TupleTableSlot *slot, DestReceiver *destfunc, EState *estate); -static void ExecAppend(TupleTableSlot *slot, ItemPointer tupleid, +static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid, EState *estate); static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid, EState *estate); -static void ExecReplace(TupleTableSlot *slot, ItemPointer tupleid, +static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, EState *estate); static TupleTableSlot *EvalPlanQualNext(EState *estate); static void EndEvalPlanQual(EState *estate); @@ -583,7 +583,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate) /* * Get the tuple descriptor describing the type of tuples to return. * (this is especially important if we are creating a relation with - * "retrieve into") + * "SELECT INTO") */ tupType = ExecGetTupType(plan); /* tuple descriptor */ @@ -892,7 +892,7 @@ EndPlan(Plan *plan, EState *estate) * Retrieves all tuples if numberTuples is 0 * * result is either a slot containing the last tuple in the case - * of a RETRIEVE or NULL otherwise. + * of a SELECT or NULL otherwise. * * Note: the ctid attribute is a 'junk' attribute that is removed before the * user can see it @@ -1068,29 +1068,26 @@ lnext: ; slot = ExecStoreTuple(newTuple, /* tuple to store */ junkfilter->jf_resultSlot, /* dest slot */ - InvalidBuffer, /* this tuple has no - * buffer */ + InvalidBuffer, /* this tuple has no buffer */ true); /* tuple should be pfreed */ - } /* if (junkfilter... */ + } /* * now that we have a tuple, do the appropriate thing with it.. * either return it to the user, add it to a relation someplace, * delete it from a relation, or modify some of its attributes. */ - switch (operation) { case CMD_SELECT: - ExecRetrieve(slot, /* slot containing tuple */ - destfunc, /* destination's tuple-receiver - * obj */ - estate); /* */ + ExecSelect(slot, /* slot containing tuple */ + destfunc, /* destination's tuple-receiver obj */ + estate); result = slot; break; case CMD_INSERT: - ExecAppend(slot, tupleid, estate); + ExecInsert(slot, tupleid, estate); result = NULL; break; @@ -1100,7 +1097,7 @@ lnext: ; break; case CMD_UPDATE: - ExecReplace(slot, tupleid, estate); + ExecUpdate(slot, tupleid, estate); result = NULL; break; @@ -1121,25 +1118,25 @@ lnext: ; /* * here, result is either a slot containing a tuple in the case of a - * RETRIEVE or NULL otherwise. + * SELECT or NULL otherwise. */ return result; } /* ---------------------------------------------------------------- - * ExecRetrieve + * ExecSelect * - * RETRIEVEs are easy.. we just pass the tuple to the appropriate + * SELECTs are easy.. we just pass the tuple to the appropriate * print function. The only complexity is when we do a - * "retrieve into", in which case we insert the tuple into + * "SELECT INTO", in which case we insert the tuple into * the appropriate relation (note: this is a newly created relation * so we don't need to worry about indices or locks.) * ---------------------------------------------------------------- */ static void -ExecRetrieve(TupleTableSlot *slot, - DestReceiver *destfunc, - EState *estate) +ExecSelect(TupleTableSlot *slot, + DestReceiver *destfunc, + EState *estate) { HeapTuple tuple; TupleDesc attrtype; @@ -1169,16 +1166,15 @@ ExecRetrieve(TupleTableSlot *slot, } /* ---------------------------------------------------------------- - * ExecAppend + * ExecInsert * - * APPENDs are trickier.. we have to insert the tuple into + * INSERTs are trickier.. we have to insert the tuple into * the base relation and insert appropriate tuples into the * index relations. * ---------------------------------------------------------------- */ - static void -ExecAppend(TupleTableSlot *slot, +ExecInsert(TupleTableSlot *slot, ItemPointer tupleid, EState *estate) { @@ -1227,7 +1223,7 @@ ExecAppend(TupleTableSlot *slot, * Check the constraints of the tuple */ if (resultRelationDesc->rd_att->constr) - ExecConstraints("ExecAppend", resultRelInfo, slot, estate); + ExecConstraints("ExecInsert", resultRelInfo, slot, estate); /* * insert the tuple @@ -1259,7 +1255,7 @@ ExecAppend(TupleTableSlot *slot, /* ---------------------------------------------------------------- * ExecDelete * - * DELETE is like append, we delete the tuple and its + * DELETE is like UPDATE, we delete the tuple and its * index tuples. * ---------------------------------------------------------------- */ @@ -1346,18 +1342,18 @@ ldelete:; } /* ---------------------------------------------------------------- - * ExecReplace + * ExecUpdate * - * note: we can't run replace queries with transactions - * off because replaces are actually appends and our - * scan will mistakenly loop forever, replacing the tuple - * it just appended.. This should be fixed but until it + * note: we can't run UPDATE queries with transactions + * off because UPDATEs are actually INSERTs and our + * scan will mistakenly loop forever, updating the tuple + * it just inserted.. This should be fixed but until it * is, we don't want to get stuck in an infinite loop * which corrupts your database.. * ---------------------------------------------------------------- */ static void -ExecReplace(TupleTableSlot *slot, +ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid, EState *estate) { @@ -1472,7 +1468,7 @@ lreplace:; /* * Note: instead of having to update the old index tuples associated * with the heap tuple, all we do is form and insert new index tuples. - * This is because replaces are actually deletes and inserts and index + * This is because UPDATEs are actually DELETEs and INSERTs and index * tuple deletion is done automagically by the vacuum daemon. All we * do is insert new index tuples. -cim 9/27/89 */ @@ -1481,7 +1477,7 @@ lreplace:; * process indices * * heap_update updates a tuple in the base relation by invalidating it - * and then appending a new tuple to the relation. As a side effect, + * and then inserting a new tuple to the relation. As a side effect, * the tupleid of the new tuple is placed in the new tuple's t_ctid * field. So we now insert index tuples using the new tupleid stored * there. @@ -1554,7 +1550,7 @@ ExecRelCheck(ResultRelInfo *resultRelInfo, } void -ExecConstraints(char *caller, ResultRelInfo *resultRelInfo, +ExecConstraints(const char *caller, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate) { Relation rel = resultRelInfo->ri_RelationDesc; diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index f337ef9d14510af861e90562481a87312c3be1f0..7799ed4d9353122c7b5643e8a9869120ab427a52 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.85 2002/06/25 17:58:10 momjian Exp $ + * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.86 2002/06/26 21:58:56 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -18,7 +18,7 @@ * * ExecOpenIndices \ * ExecCloseIndices | referenced by InitPlan, EndPlan, - * ExecInsertIndexTuples / ExecAppend, ExecReplace + * ExecInsertIndexTuples / ExecInsert, ExecUpdate * * RegisterExprContextCallback Register function shutdown callback * UnregisterExprContextCallback Deregister function shutdown callback diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index ecf22eb118933ea49a158beda8c3c1885e88ce52..4a75d13761e5252e25c301784d7ad88463a6e8df 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -42,7 +42,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.87 2002/06/25 17:58:10 momjian Exp $ + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.88 2002/06/26 21:58:56 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -154,11 +154,11 @@ cost_seqscan(Path *path, Query *root, * * Given a guesstimated cache size, we estimate the actual I/O cost per page * with the entirely ad-hoc equations: - * for rel_size <= effective_cache_size: - * 1 + (random_page_cost/2-1) * (rel_size/effective_cache_size) ** 2 - * for rel_size >= effective_cache_size: - * random_page_cost * (1 - (effective_cache_size/rel_size)/2) - * These give the right asymptotic behavior (=> 1.0 as rel_size becomes + * if relpages >= effective_cache_size: + * random_page_cost * (1 - (effective_cache_size/relpages)/2) + * if relpages < effective_cache_size: + * 1 + (random_page_cost/2-1) * (relpages/effective_cache_size) ** 2 + * These give the right asymptotic behavior (=> 1.0 as relpages becomes * small, => random_page_cost as it becomes large) and meet in the middle * with the estimate that the cache is about 50% effective for a relation * of the same size as effective_cache_size. (XXX this is probably all diff --git a/src/backend/optimizer/prep/_deadcode/prepkeyset.c b/src/backend/optimizer/prep/_deadcode/prepkeyset.c index b67431716fb3a5da2617b4b0e08f7e064fe09180..9de8ebb6c7fbc6f85e520c147d43d862a66e86a3 100644 --- a/src/backend/optimizer/prep/_deadcode/prepkeyset.c +++ b/src/backend/optimizer/prep/_deadcode/prepkeyset.c @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * prepkeyset.c - * Special preperation for keyset queries. + * Special preparation for keyset queries (KSQO). * * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -14,12 +14,6 @@ #include "postgres.h" #include "optimizer/planmain.h" -/* - * Node_Copy - * a macro to simplify calling of copyObject on the specified field - */ -#define Node_Copy(from, newnode, field) newnode->field = copyObject(from->field) - bool _use_keyset_query_optimizer = FALSE; #ifdef ENABLE_KEY_SET_QUERY @@ -55,13 +49,20 @@ static int TotalExpr; * a HAVING, or a GROUP BY. It must be a single table and have KSQO * set to 'on'. * - * The primary use of this transformation is to avoid the exponrntial + * The primary use of this transformation is to avoid the exponential * memory consumption of cnfify() and to make use of index access * methods. * * daveh@insightdist.com 1998-08-31 * * May want to also prune out duplicate terms. + * + * XXX: this code is currently not compiled because it has not been + * updated to work with the re-implementation of UNION/INTERSECT/EXCEPT + * in PostgreSQL 7.1. However, it is of questionable value in any + * case, because it changes the semantics of the original query: + * UNION will add an implicit SELECT DISTINCT, which might change + * the results that are returned. **********************************************************************/ void transformKeySetQuery(Query *origNode) diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h index c960c0e1d78f277a1e50ed825f717017e3204cb6..ee399f9b3a8bae02eeec703bc9853551d95eaa8a 100644 --- a/src/include/executor/executor.h +++ b/src/include/executor/executor.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $Id: executor.h,v 1.68 2002/06/25 17:58:10 momjian Exp $ + * $Id: executor.h,v 1.69 2002/06/26 21:58:56 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -52,7 +52,7 @@ extern TupleDesc ExecutorStart(QueryDesc *queryDesc, EState *estate); extern TupleTableSlot *ExecutorRun(QueryDesc *queryDesc, EState *estate, ScanDirection direction, long count); extern void ExecutorEnd(QueryDesc *queryDesc, EState *estate); -extern void ExecConstraints(char *caller, ResultRelInfo *resultRelInfo, +extern void ExecConstraints(const char *caller, ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate); extern TupleTableSlot *EvalPlanQual(EState *estate, Index rti, ItemPointer tid); diff --git a/src/test/regress/expected/create_misc.out b/src/test/regress/expected/create_misc.out index 1842314ce9a8e878cf924600be676ce7975b9311..2067e5ea47d31dd062905fecd5488a3bf672a05b 100644 --- a/src/test/regress/expected/create_misc.out +++ b/src/test/regress/expected/create_misc.out @@ -151,3 +151,13 @@ SELECT * FROM serialTest; force | 100 (3 rows) +CREATE SEQUENCE sequence_test; +BEGIN; +SELECT nextval('sequence_test'); + nextval +--------- + 1 +(1 row) + +DROP SEQUENCE sequence_test; +END; diff --git a/src/test/regress/expected/select_having.out b/src/test/regress/expected/select_having.out index 3f069996fc9fa7471c9b00042bbab070a61168fd..29321e441464b8aff9bc3a3eef43cc28dd910f3d 100644 --- a/src/test/regress/expected/select_having.out +++ b/src/test/regress/expected/select_having.out @@ -21,6 +21,15 @@ SELECT b, c FROM test_having 3 | bbbb (2 rows) +-- HAVING is equivalent to WHERE in this case +SELECT b, c FROM test_having + GROUP BY b, c HAVING b = 3; + b | c +---+---------- + 3 | BBBB + 3 | bbbb +(2 rows) + SELECT lower(c), count(c) FROM test_having GROUP BY lower(c) HAVING count(*) > 2 OR min(a) = max(a); lower | count diff --git a/src/test/regress/sql/create_misc.sql b/src/test/regress/sql/create_misc.sql index 078450a754416d6fa421bebc42080f260b66f05f..2277d5c8b2b813eb98de605452dcf5669627bfe3 100644 --- a/src/test/regress/sql/create_misc.sql +++ b/src/test/regress/sql/create_misc.sql @@ -217,3 +217,10 @@ INSERT INTO serialTest VALUES ('force', 100); INSERT INTO serialTest VALUES ('wrong', NULL); SELECT * FROM serialTest; + +CREATE SEQUENCE sequence_test; + +BEGIN; +SELECT nextval('sequence_test'); +DROP SEQUENCE sequence_test; +END; diff --git a/src/test/regress/sql/select_having.sql b/src/test/regress/sql/select_having.sql index 44b0329ee5f8df4467c62f463a4a73b2796fb950..28b22d9859f7ad601c24ef2eb2bd5dd56d54816b 100644 --- a/src/test/regress/sql/select_having.sql +++ b/src/test/regress/sql/select_having.sql @@ -18,6 +18,10 @@ INSERT INTO test_having VALUES (9, 4, 'CCCC', 'j'); SELECT b, c FROM test_having GROUP BY b, c HAVING count(*) = 1; +-- HAVING is equivalent to WHERE in this case +SELECT b, c FROM test_having + GROUP BY b, c HAVING b = 3; + SELECT lower(c), count(c) FROM test_having GROUP BY lower(c) HAVING count(*) > 2 OR min(a) = max(a);