From e55985d3be4aed47c703674c735c124ff0b3c03c Mon Sep 17 00:00:00 2001
From: Tom Lane <tgl@sss.pgh.pa.us>
Date: Thu, 30 Mar 2000 00:53:30 +0000
Subject: [PATCH] Tweak indexscan cost estimation: round estimated # of tuples
 visited up to next integer.  Previously, if selectivity was small, we could
 compute very tiny scan cost on the basis of estimating that only 0.001 tuple
 would be fetched, which is silly.  This naturally led to some rather silly
 plans...

---
 src/backend/optimizer/path/costsize.c | 10 +++++-----
 src/backend/utils/adt/selfuncs.c      |  8 +++++---
 2 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 4270032d278..37b4dfc42e7 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -42,7 +42,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.54 2000/03/22 22:08:33 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.55 2000/03/30 00:53:29 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -262,11 +262,11 @@ cost_index(Path *path, Query *root,
 	 * effect.  Would be nice to do better someday.
 	 */
 
-	tuples_fetched = indexSelectivity * baserel->tuples;
+	tuples_fetched = ceil(indexSelectivity * baserel->tuples);
 
 	if (tuples_fetched > 0 && baserel->pages > 0)
-		pages_fetched = baserel->pages *
-			log(tuples_fetched / baserel->pages + 1.0);
+		pages_fetched = ceil(baserel->pages *
+							 log(tuples_fetched / baserel->pages + 1.0));
 	else
 		pages_fetched = tuples_fetched;
 
@@ -594,7 +594,7 @@ cost_hashjoin(Path *path,
 	 * conservatively as the inner disbursion times the inner tuple count.
 	 */
 	run_cost += cpu_operator_cost * outer_path->parent->rows *
-		(inner_path->parent->rows * innerdisbursion);
+		ceil(inner_path->parent->rows * innerdisbursion);
 
 	/*
 	 * Estimate the number of tuples that get through the hashing filter
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index af7a449f697..0cd408ed78f 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -15,13 +15,15 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v 1.61 2000/03/23 00:55:42 tgl Exp $
+ *	  $Header: /cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v 1.62 2000/03/30 00:53:30 tgl Exp $
  *
  *-------------------------------------------------------------------------
  */
 
 #include "postgres.h"
 
+#include <math.h>
+
 #include "access/heapam.h"
 #include "catalog/catname.h"
 #include "catalog/pg_operator.h"
@@ -900,10 +902,10 @@ genericcostestimate(Query *root, RelOptInfo *rel,
 											   lfirsti(rel->relids));
 
 	/* Estimate the number of index tuples that will be visited */
-	numIndexTuples = *indexSelectivity * index->tuples;
+	numIndexTuples = ceil(*indexSelectivity * index->tuples);
 
 	/* Estimate the number of index pages that will be retrieved */
-	numIndexPages = *indexSelectivity * index->pages;
+	numIndexPages = ceil(*indexSelectivity * index->pages);
 
 	/*
 	 * Compute the index access cost.
-- 
GitLab