diff --git a/contrib/Contrib.index b/contrib/Contrib.index
index 3fc460cc473e6d742da405c2a6c05ad065697261..fe183127ea3e4959e5b177111cab207a6d2a72d3 100644
--- a/contrib/Contrib.index
+++ b/contrib/Contrib.index
@@ -2,7 +2,7 @@
 The PostgreSQL contrib:
 ~~~~~~~~~~~~~~~~~~~~~~
 
-apache_logging -
+tips/apache_logging -
 	Getting Apache to log to PostgreSQL
 	by Terry Mackintosh <terry@terrym.com>
 
@@ -27,12 +27,6 @@ isbn_issn -
 	PostgreSQL type extensions for ISBN (books) and ISSN (serials)
 	by Garrett A. Wollman <wollman@khavrinen.lcs.mit.edu>
 
-likeplanning -
-	Scripts to enable/disable new planning code for LIKE and regexp
-	pattern match operators.  These will go away again once the code
-	is mature enough to enable by default.
-	by Tom Lane <tgl@sss.pgh.pa.us>
-
 linux -
 	Start postgres back end system
 	by Thomas Lockhart <lockhart@alumni.caltech.edu>
diff --git a/contrib/Makefile b/contrib/Makefile
index 70c0cddbab1edcc5ce1ff8bba97baa9dbeafc5fa..2a3b099afaf37ceac8846bc39eb50f56d3ccee7b 100644
--- a/contrib/Makefile
+++ b/contrib/Makefile
@@ -3,7 +3,7 @@
 #
 #	Portions Copyright (c) 1999-2000, PostgreSQL, Inc
 #
-# $Header: /cvsroot/pgsql/contrib/Makefile,v 1.4 2000/06/15 18:54:29 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/Makefile,v 1.5 2000/06/19 13:52:59 momjian Exp $
 #
 
 TOPDIR = ..
@@ -14,7 +14,6 @@ WANTED_DIRS =	array		\
 		findoidjoins	\
 		fulltextindex	\
 		isbn_issn	\
-		likeplanning	\
 		linux		\
 		lo		\
 		mSQL-interface	\
@@ -31,7 +30,6 @@ WANTED_DIRS =	array		\
 		userlock	\
 		vacuumlo	
 	#	odbc
-	#	os2client	
 
 
 all:
diff --git a/contrib/Makefile.global b/contrib/Makefile.global
index 43cbf853b8aacac44301454b91477c3f94321d42..f1efad49d1c149050485d9556e5ce2a5873d2c92 100644
--- a/contrib/Makefile.global
+++ b/contrib/Makefile.global
@@ -8,7 +8,7 @@
 #
 #
 # IDENTIFICATION
-#    $Header: /cvsroot/pgsql/contrib/Attic/Makefile.global,v 1.1 2000/06/15 19:04:37 momjian Exp $
+#    $Header: /cvsroot/pgsql/contrib/Attic/Makefile.global,v 1.2 2000/06/19 13:52:59 momjian Exp $
 #
 #-------------------------------------------------------------------------
 
diff --git a/contrib/README b/contrib/README
index 97166f27cf5121d59232757529041bfcd03dbdf8..23cfe8c4d29e612fa2902686b3e0147201a88fa5 100644
--- a/contrib/README
+++ b/contrib/README
@@ -3,18 +3,18 @@
  ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 	FIXME:
-		os2client	
 		odbc
 		spi/preprocessor
 		tools		
-	
+
+ - the contrib contain is in the "Contrib.index" 	
 
  - in each directory must be Makefile, possible Makefile template
    is below this text,
 
 --------
 #
-# $Header: /cvsroot/pgsql/contrib/README,v 1.18 2000/06/15 18:54:29 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/README,v 1.19 2000/06/19 13:52:59 momjian Exp $
 #
 
 TOPDIR=../..
diff --git a/contrib/apache_logging/apachelog.sql b/contrib/apache_logging/apachelog.sql
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/apache_logging/httpconf.txt b/contrib/apache_logging/httpconf.txt
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/array/Makefile b/contrib/array/Makefile
index 199ed1d1311b777687af4a0f82acaaa0b441089d..f85c2a85d8187436f970e7019a124f9afa252289 100644
--- a/contrib/array/Makefile
+++ b/contrib/array/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/array/Attic/Makefile,v 1.9 2000/06/16 18:58:25 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/array/Attic/Makefile,v 1.10 2000/06/19 13:53:03 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= array_iterator
 
 PROGRAM	= 
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= $(NAME).sql
 BINS	=
 EXAMPLES=
@@ -29,7 +29,7 @@ install: install_doc install_sql install_mod
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file (CONTRIB_DOCDIR)/$(DOCS).$(NAME) ; \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/array/README b/contrib/array/README
index b072ebe397005dbf6882808827784df836423862..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/array/README
+++ b/contrib/array/README
@@ -1,49 +0,0 @@
-Array iterator functions, by Massimo Dal Zotto <dz@cs.unitn.it>
-Copyright (C) 1999, Massimo Dal Zotto <dz@cs.unitn.it>
-
-This software is distributed under the GNU General Public License
-either version 2, or (at your option) any later version.
-
-
-This loadable module defines a new class of functions which take
-an array and a scalar value, iterate a scalar operator over the
-elements of the array and the value, and compute a result as
-the logical OR or AND of the iteration results.
-For example array_int4eq returns true if some of the elements
-of an array of int4 is equal to the given value:
-
-	array_int4eq({1,2,3}, 1)  -->  true
-	array_int4eq({1,2,3}, 4)  -->  false
-
-If we have defined T array types and O scalar operators we can
-define T x O x 2 array functions, each of them has a name like
-"array_[all_]<basetype><operation>" and takes an array of type T
-iterating the operator O over all the elements. Note however
-that some of the possible combination are invalid, for example
-the array_int4_like because there is no like operator for int4.
-
-We can then define new operators based on these functions and use
-them to write queries with qualification clauses based on the
-values of some of the elements of an array.
-For example to select rows having some or all element of an array
-attribute equal to a given value or matching a regular expression:
-
-	create table t(id int4[], txt text[]);
-
-	-- select tuples with some id element equal to 123
-	select * from t where t.id *= 123;
-
-	-- select tuples with some txt element matching '[a-z]'
-	select * from t where t.txt *~ '[a-z]';
-
-	-- select tuples with all txt elements matching '^[A-Z]'
-	select * from t where t.txt[1:3] **~ '^[A-Z]';
-
-The scheme is quite general, each operator which operates on a base type
-can be iterated over the elements of an array. It seem to work well but
-defining each new operators requires writing a different C function.
-Furthermore in each function there are two hardcoded OIDs which reference
-a base type and a procedure. Not very portable. Can anyone suggest a
-better and more portable way to do it ?
-
-See also array_iterator.sql for an example on how to use this module.
diff --git a/contrib/bit/Makefile b/contrib/bit/Makefile
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/bit/README b/contrib/bit/README
deleted file mode 100644
index 18a1fe1820ba496a94ea14d3934d129158c2a26c..0000000000000000000000000000000000000000
--- a/contrib/bit/README
+++ /dev/null
@@ -1,74 +0,0 @@
-A set of C routines to implement an SQL-compliant bitstring type.
-
-The file varbit.c contains the c-functions to implement both BIT and
-BIT VARYING. Both types are implemented in essentially the same way,
-except that BIT is zero padded to a specified length. I've tried to
-make this code as independent as possible of the byte length, but it
-is quite possible that there may be problems on machines that don't
-have 8 bits/byte (are there still any around?).
-
-The SQL standard only defines comparison, SUBSTR and concatenation
-operators, and these have been implemented. In addition all logical
-operators have been implemented, i.e. ~,|,&,^,<< and >>. This is
-useful if one wants to build bit masks. If the two strings are not of
-the same length the longer string is truncated (truncation was the
-only real option, as padding with zeros could give unintuitive results
-for ^) and the result has the length of the shorter string. If there
-is a requirement for any other functions, let me know, and I will have
-a look.
-
-My knowledge of postgres is not up to integrating a type, so I'm hoping
-that somebody can integrate this type for me, or give me some hints as
-to what needs to be done. These routines were developed outside the
-postgres source tree, with a hacked version of postgres.h. The header
-files probably need some ammending.
-
-The included files are 
-
-varbit.h       -- bit string header type
-varbit.c       -- the routines
-vartest.c      -- a few calls to the routines to 
-
-The following routines are available. 
-
-char * zpbitin(char *s, int dummy,  int32 atttypmod);
-	Read in a zero padded bit string of the form X'...' or B'...'
-    
-char * zpbitout(char *s);
-	Print a zero padded bit string in hex X'...'
-
-char * zpbitsout(char *s);
-	Print a zero padded bit string in binary B'...'
-
-char * varbitin(char *s, int dummy,  int32 atttypmod);
-	Read in a varying length bit string of the form X'...' or B'...'
-
-[There is no need for separate output functions for varying bit, as 
- zpbitout will print them out correctly]
-
-char * bitcat (char *arg1, char *arg2);
-	Bit concatenation. 
-
-char * bitsubstr (char *arg, int32 s, int32 l);
-	Substring of a bit string.
-
-bool biteq (char *arg1, char *arg2);
-bool bitne (char *arg1, char *arg2);
-bool bitge (char *arg1, char *arg2);
-bool bitgt (char *arg1, char *arg2);
-bool bitle (char *arg1, char *arg2);
-bool bitlt (char *arg1, char *arg2);
-int bitcmp (char *arg1, char *arg2);
-	Comparison operators
-
-char * bitand (char * arg1, char * arg2);
-char * bitor (char * arg1, char * arg2);
-char * bitxor (char * arg1, char * arg2);
-char * bitnot (char * arg);
-char * bitshiftright (char * arg, int shft);
-char * bitshiftleft (char * arg, int shft);
-	Bit operations.
-
-If anything else needs to be done, please let me know.
-
-Adriaan (adriaan@albourne.com)
diff --git a/contrib/bit/varbit.c b/contrib/bit/varbit.c
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/bit/varbit.demo.sql b/contrib/bit/varbit.demo.sql
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/bit/varbit.drop.sql b/contrib/bit/varbit.drop.sql
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/bit/varbit.h b/contrib/bit/varbit.h
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/bit/varbit.source b/contrib/bit/varbit.source
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/bit/varbit_glue.c b/contrib/bit/varbit_glue.c
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/bit/vartest.c b/contrib/bit/vartest.c
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/datetime/Makefile b/contrib/datetime/Makefile
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/datetime/README b/contrib/datetime/README
deleted file mode 100644
index 66f4d376d61dafa38fa6e03bda984181e7fd0a0a..0000000000000000000000000000000000000000
--- a/contrib/datetime/README
+++ /dev/null
@@ -1,18 +0,0 @@
-Datetime functions.
-Copyright (C) 1999, Massimo Dal Zotto <dz@cs.unitn.it>
-
-This software is distributed under the GNU General Public License
-either version 2, or (at your option) any later version.
-
-
-I have written some new funtions for time and date data types which can
-be used to extract hour,minutes,seconds from time values, and year,
-month,day from a date. There is also a time_difference and functions
-to convert a time to minutes or seconds.
-
-There are also new input/output functions for the time data type which
-allow the insertion of time attributes with value 24:00:00.
-This can be useful if your application needs to compute time difference
-from two time values representing an elapsed time of 24 hours.
-
-Massimo Dal Zotto <dz@cs.unitn.it>
diff --git a/contrib/datetime/datetime_functions.c b/contrib/datetime/datetime_functions.c
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/datetime/datetime_functions.h b/contrib/datetime/datetime_functions.h
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/datetime/datetime_functions.sql.in b/contrib/datetime/datetime_functions.sql.in
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/earthdistance/Makefile b/contrib/earthdistance/Makefile
index 21156347cfec3fe8ce7a0eddc2d20f691c0aaec7..68203b6b8129f0b76cc29dbf0361d48da1d3990d 100644
--- a/contrib/earthdistance/Makefile
+++ b/contrib/earthdistance/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/earthdistance/Makefile,v 1.3 2000/06/16 18:58:26 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/earthdistance/Makefile,v 1.4 2000/06/19 13:53:18 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= earthdistance
 
 PROGRAM	= 
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= $(NAME).sql
 BINS    =
 EXAMPLES=
@@ -29,7 +29,7 @@ install: install_doc install_sql install_mod
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/earthdistance/README b/contrib/earthdistance/README
index f4ecef80fef74847bc930f10f09770bac999ff1c..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/earthdistance/README
+++ b/contrib/earthdistance/README
@@ -1,31 +0,0 @@
-Date: Wed, 1 Apr 1998 15:19:32 -0600 (CST)
-From: Hal Snyder <hal@vailsys.com>
-To: vmehr@ctp.com
-Subject: [QUESTIONS] Re: Spatial data, R-Trees
-
-> From: Vivek Mehra <vmehr@ctp.com>
-> Date: Wed, 1 Apr 1998 10:06:50 -0500
-
->  Am just starting out with PostgreSQL and would like to learn more about
-> the spatial data handling ablilities of postgreSQL - in terms of using
-> R-tree indexes, user defined types, operators and functions. 
-> 
-> Would you be able to suggest where I could find some code and SQL to
-> look at to create these?
-
-Here's the setup for adding an operator '<@>' to give distance in
-statute miles between two points on the earth's surface. Coordinates
-are in degrees. Points are taken as (longitude, latitude) and not vice
-versa as longitude is closer to the intuitive idea of x-axis and
-latitude to y-axis.
-
-There's C source, Makefile for FreeBSD, and SQL for installing and
-testing the function.
-
-Let me know if anything looks fishy!
-
-A note on testing C extensions - it seems not enough to drop a function
-and re-create it - if I change a function, I have to stop and restart
-the backend for the new version to be seen. I guess it would be too
-messy to track which functions are added from a .so and do a dlclose
-when the last one is dropped.
diff --git a/contrib/earthdistance/earthdistance.sql b/contrib/earthdistance/earthdistance.sql
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/findoidjoins/Makefile b/contrib/findoidjoins/Makefile
index 4990bfbdaa1673c8461a5dac5d48ca9c63cfe0cd..530e218d44571f8bc72043b46156a5beea173015 100644
--- a/contrib/findoidjoins/Makefile
+++ b/contrib/findoidjoins/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/findoidjoins/Attic/Makefile,v 1.6 2000/06/16 18:58:29 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/findoidjoins/Attic/Makefile,v 1.7 2000/06/19 13:53:22 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= findoidjoins
 
 PROGRAM	= $(NAME)
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= 
 BINS    = $(PROGRAM) make_oidjoins_check
 EXAMPLES=
@@ -31,7 +31,7 @@ install: install_doc nstall_bin
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_bin:
diff --git a/contrib/findoidjoins/README b/contrib/findoidjoins/README
index 44c150a43f93b44e0b266cd6dc942901440e56ad..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/findoidjoins/README
+++ b/contrib/findoidjoins/README
@@ -1,90 +0,0 @@
-
-   			      findoidjoins
-
-This program scans a database, and prints oid fields (also regproc fields)
-and the tables they join to.  CAUTION: it is ver-r-r-y slow on a large
-database, or even a not-so-large one.  We don't really recommend running
-it on anything but an empty database, such as template1.
-
-Uses pgeasy library.
-
-Run on an empty database, it returns the system join relationships (shown
-below for 7.0).  Note that unexpected matches may indicate bogus entries
-in system tables --- don't accept a peculiar match without question.
-In particular, a field shown as joining to more than one target table is
-probably messed up.  In 7.0, the *only* field that should join to more
-than one target is pg_description.objoid.  (Running make_oidjoins_check
-is an easy way to spot fields joining to more than one table, BTW.)
-
-The shell script make_oidjoins_check converts findoidjoins' output
-into an SQL script that checks for dangling links (entries in an
-OID or REGPROC column that don't match any row in the expected table).
-Note that fields joining to more than one table are NOT processed.
-
-The result of make_oidjoins_check should be installed as the "oidjoins"
-regression test.  The oidjoins test should be updated after any
-revision in the patterns of cross-links between system tables.
-(Ideally we'd just regenerate the script as part of the regression
-tests themselves, but that seems too slow...)
-
----------------------------------------------------------------------------
-
-Join pg_aggregate.aggtransfn1 => pg_proc.oid
-Join pg_aggregate.aggtransfn2 => pg_proc.oid
-Join pg_aggregate.aggfinalfn => pg_proc.oid
-Join pg_aggregate.aggbasetype => pg_type.oid
-Join pg_aggregate.aggtranstype1 => pg_type.oid
-Join pg_aggregate.aggtranstype2 => pg_type.oid
-Join pg_aggregate.aggfinaltype => pg_type.oid
-Join pg_am.amgettuple => pg_proc.oid
-Join pg_am.aminsert => pg_proc.oid
-Join pg_am.amdelete => pg_proc.oid
-Join pg_am.ambeginscan => pg_proc.oid
-Join pg_am.amrescan => pg_proc.oid
-Join pg_am.amendscan => pg_proc.oid
-Join pg_am.ammarkpos => pg_proc.oid
-Join pg_am.amrestrpos => pg_proc.oid
-Join pg_am.ambuild => pg_proc.oid
-Join pg_am.amcostestimate => pg_proc.oid
-Join pg_amop.amopid => pg_am.oid
-Join pg_amop.amopclaid => pg_opclass.oid
-Join pg_amop.amopopr => pg_operator.oid
-Join pg_amproc.amid => pg_am.oid
-Join pg_amproc.amopclaid => pg_opclass.oid
-Join pg_amproc.amproc => pg_proc.oid
-Join pg_attribute.attrelid => pg_class.oid
-Join pg_attribute.atttypid => pg_type.oid
-Join pg_class.reltype => pg_type.oid
-Join pg_class.relam => pg_am.oid
-Join pg_description.objoid => pg_proc.oid
-Join pg_description.objoid => pg_type.oid
-Join pg_index.indexrelid => pg_class.oid
-Join pg_index.indrelid => pg_class.oid
-Join pg_opclass.opcdeftype => pg_type.oid
-Join pg_operator.oprleft => pg_type.oid
-Join pg_operator.oprright => pg_type.oid
-Join pg_operator.oprresult => pg_type.oid
-Join pg_operator.oprcom => pg_operator.oid
-Join pg_operator.oprnegate => pg_operator.oid
-Join pg_operator.oprlsortop => pg_operator.oid
-Join pg_operator.oprrsortop => pg_operator.oid
-Join pg_operator.oprcode => pg_proc.oid
-Join pg_operator.oprrest => pg_proc.oid
-Join pg_operator.oprjoin => pg_proc.oid
-Join pg_proc.prolang => pg_language.oid
-Join pg_proc.prorettype => pg_type.oid
-Join pg_rewrite.ev_class => pg_class.oid
-Join pg_statistic.starelid => pg_class.oid
-Join pg_statistic.staop => pg_operator.oid
-Join pg_trigger.tgrelid => pg_class.oid
-Join pg_trigger.tgfoid => pg_proc.oid
-Join pg_type.typrelid => pg_class.oid
-Join pg_type.typelem => pg_type.oid
-Join pg_type.typinput => pg_proc.oid
-Join pg_type.typoutput => pg_proc.oid
-Join pg_type.typreceive => pg_proc.oid
-Join pg_type.typsend => pg_proc.oid
-
----------------------------------------------------------------------------
-
-Bruce Momjian (root@candle.pha.pa.us)
diff --git a/contrib/fulltextindex/Makefile b/contrib/fulltextindex/Makefile
index caa9a26e2df54f0c77b32a67021d53286f1ca7ac..ceb51a04404bbc2b0c50850fa0f97da37d8a1241 100644
--- a/contrib/fulltextindex/Makefile
+++ b/contrib/fulltextindex/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/fulltextindex/Attic/Makefile,v 1.4 2000/06/16 18:58:32 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/fulltextindex/Attic/Makefile,v 1.5 2000/06/19 13:53:30 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= fti
 
 PROGRAM	= 
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= $(NAME).sql
 BINS	= fti.pl
 EXAMPLES=
@@ -29,7 +29,7 @@ install: install_doc install_sql install_mod install_bin
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/fulltextindex/README b/contrib/fulltextindex/README
index fdb6fcf3b10663d9b4ea6d442f9cbee5a52cbb75..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/fulltextindex/README
+++ b/contrib/fulltextindex/README
@@ -1,197 +0,0 @@
-An attempt at some sort of Full Text Indexing for PostgreSQL.
-
-The included software is an attempt to add some sort of Full Text Indexing
-support to PostgreSQL. I mean by this that we can ask questions like:
-
-	Give me all rows that have 'still' and 'nash' in the 'artist' field.
-
-Ofcourse we can write this as:
-
-	select * from cds where artist ~* 'stills' and artist ~* 'nash';
-
-But this does not use any indices, and therefore, if your database
-gets very large, it will not have very high performance (the above query
-requires at least one sequential scan, it probably takes 2 due to the
-self-join).
-
-The approach used by this add-on is to define a trigger on the table and
-column you want to do this queries on. On every insert in the table, it
-takes the value in the specified column, breaks the text in this column
-up into pieces, and stores all sub-strings into another table, together
-with a reference to the row in the original table that contained this
-sub-string (it uses the oid of that row).
-
-By now creating an index over the 'fti-table', we can search for
-substrings that occur in the original table. By making a join between
-the fti-table and the orig-table, we can get the actual rows we want
-(this can also be done by using subselects, and maybe there're other
-ways too).
-
-The trigger code also allows an array called StopWords, that prevents
-certain words from being indexed.
-
-As an example we take the previous query, where we assume we have all
-sub-strings in the table 'cds-fti':
-
-	select c.*
-	from cds c, cds-fti f1, cds-fti f2
-	where	f1.string ~ '^stills' and
-		f2.string ~ '^nash' and
-		f1.id = c.oid and
-		f2.id = c.oid ;
-
-We can use the ~ (case-sensitive regular expression) here, because of
-the way sub-strings are built: from right to left, ie. house -> 'se' +
-'use' + 'ouse' + 'house'. If a ~ search starts with a ^ (match start of
-string), btree indices can be used by PostgreSQL.
-
-Now, how do we create the trigger that maintains the fti-table? First: the
-fti-table should have the following schema:
-
-	create cds-fti ( string varchar(N), id oid );
-
-Don't change the *names* of the columns, the varchar() can in fact also
-be of text-type. If you do use varchar, make sure the largest possible
-sub-string will fit.
-
-The create the function that contains the trigger::
-
-	create function fti() returns opaque as
-	    '/path/to/fti.so' language 'newC';
-
-And finally define the trigger on the 'cds' table:
-
-	create trigger cds-fti-trigger after update or insert or delete on cds
-	    for each row execute procedure fti(cds-fti, artist);
-
-Here, the trigger will be defined on table 'cds', it will create
-sub-strings from the field 'artist', and it will place those sub-strings
-in the table 'cds-fti'.
-
-Now populate the table 'cds'. This will also populate the table 'cds-fti'.
-It's fastest to populate the table *before* you create the indices.
-
-Before you start using the system, you should at least have the following
-indices:
-
-	create index cds-fti-idx on cds-fti (string, id);
-	create index cds-oid-idx on cds (oid);
-
-To get the most performance out of this, you should have 'cds-fti'
-clustered on disk, ie. all rows with the same sub-strings should be
-close to each other. There are 3 ways of doing this:
-
-1. After you have created the indices, execute 'cluster cds-fti-idx on cds-fti'.
-2. Do a 'select * into tmp-table from cds-fti order by string' *before*
-   you create the indices, then 'drop table cds-fti' and
-   'alter table tmp-table rename to cds-fti'
-3. *Before* creating indices, dump the contents of the cds-fti table using
-   'pg_dump -a -t cds-fti dbase-name', remove the \connect
-   from the beginning and the \. from the end, and sort it using the
-   UNIX 'sort' program, and reload the data.
-
-Method 1 is very slow, 2 a lot faster, and for very large tables, 3 is
-preferred.
-
-
-BENCH:
-~~~~~
-
-Maarten Boekhold <maartenb@dutepp0.et.tudelft.nl>
-The following data was generated by the 'timings.sh' script included
-in this directory. It uses a very large table with music-related
-articles as a source for the fti-table. The tables used are:
-
-product    : contains product information  :   540.429 rows
-artist_fti : fti table for product         : 4.501.321 rows
-clustered  : same as above, only clustered : 4.501.321 rows 
-
-A sequential scan of the artist_fti table (and thus also the clustered table)
-takes around 6:16 minutes....
-
-Unfortunately I cannot probide anybody else with this test-date, since I
-am not allowed to redistribute the data (it's a database being sold by
-a couple of wholesale companies). Anyways, it's megabytes, so you probably
-wouldn't want it in this distribution anyways.
-
-I haven't tested this with less data.
-
-The test-machine is a Pentium 133, 64 MB, Linux 2.0.32 with the database
-on a 'QUANTUM BIGFOOT_CY4320A, 4134MB w/67kB Cache, CHS=8960/15/63'. This
-is a very slow disk.
-
-The postmaster was running with:
-
-        postmaster -i -b /usr/local/pgsql/bin/postgres -S 1024 -B 256 \
-                -o -o /usr/local/pgsql/debug-output -F -d 1 
-
-('trashing' means a 'select count(*) from artist_fti' to completely trash
-any disk-caches and buffers....)
-
-TESTING ON UNCLUSTERED FTI
-trashing
-1: ^lapton and ^ric           : 0.050u 0.000s 5m37.484s 0.01%
-2: ^lapton and ^ric           : 0.050u 0.030s 5m32.447s 0.02%
-3: ^lapton and ^ric           : 0.030u 0.020s 5m28.822s 0.01%
-trashing
-1: ^lling and ^tones          : 0.020u 0.030s 0m54.313s 0.09%
-2: ^lling and ^tones          : 0.040u 0.030s 0m5.057s 1.38%
-3: ^lling and ^tones          : 0.010u 0.050s 0m2.072s 2.89%
-trashing
-1: ^aughan and ^evie          : 0.020u 0.030s 0m26.241s 0.19%
-2: ^aughan and ^evie          : 0.050u 0.010s 0m1.316s 4.55%
-3: ^aughan and ^evie          : 0.030u 0.020s 0m1.029s 4.85%
-trashing
-1: ^lling                     : 0.040u 0.010s 0m55.104s 0.09%
-2: ^lling                     : 0.030u 0.030s 0m4.716s 1.27%
-3: ^lling                     : 0.040u 0.010s 0m2.157s 2.31%
-trashing
-1: ^stev and ^ray and ^vaugh  : 0.040u 0.000s 1m5.630s 0.06%
-2: ^stev and ^ray and ^vaugh  : 0.050u 0.020s 1m3.561s 0.11%
-3: ^stev and ^ray and ^vaugh  : 0.050u 0.010s 1m5.923s 0.09%
-trashing
-1: ^lling (no join)           : 0.050u 0.020s 0m24.139s 0.28%
-2: ^lling (no join)           : 0.040u 0.040s 0m1.087s 7.35%
-3: ^lling (no join)           : 0.020u 0.030s 0m0.772s 6.48%
-trashing
-1: ^vaughan (no join)         : 0.040u 0.030s 0m9.075s 0.77%
-2: ^vaughan (no join)         : 0.030u 0.010s 0m0.609s 6.56%
-3: ^vaughan (no join)         : 0.040u 0.010s 0m0.503s 9.94%
-trashing
-1: ^rol (no join)             : 0.020u 0.030s 0m49.898s 0.10%
-2: ^rol (no join)             : 0.030u 0.020s 0m3.136s 1.59%
-3: ^rol (no join)             : 0.030u 0.020s 0m1.231s 4.06%
-
-TESTING ON CLUSTERED FTI
-trashing
-1: ^lapton and ^ric           : 0.020u 0.020s 2m17.120s 0.02%
-2: ^lapton and ^ric           : 0.030u 0.020s 2m11.767s 0.03%
-3: ^lapton and ^ric           : 0.040u 0.010s 2m8.128s 0.03%
-trashing
-1: ^lling and ^tones          : 0.020u 0.030s 0m18.179s 0.27%
-2: ^lling and ^tones          : 0.030u 0.010s 0m1.897s 2.10%
-3: ^lling and ^tones          : 0.040u 0.010s 0m1.619s 3.08%
-trashing
-1: ^aughan and ^evie          : 0.070u 0.010s 0m11.765s 0.67%
-2: ^aughan and ^evie          : 0.040u 0.010s 0m1.198s 4.17%
-3: ^aughan and ^evie          : 0.030u 0.020s 0m0.872s 5.73%
-trashing
-1: ^lling                     : 0.040u 0.000s 0m28.623s 0.13%
-2: ^lling                     : 0.030u 0.010s 0m2.339s 1.70%
-3: ^lling                     : 0.030u 0.010s 0m1.975s 2.02%
-trashing
-1: ^stev and ^ray and ^vaugh  : 0.020u 0.010s 0m17.667s 0.16%
-2: ^stev and ^ray and ^vaugh  : 0.030u 0.010s 0m3.745s 1.06%
-3: ^stev and ^ray and ^vaugh  : 0.030u 0.020s 0m3.439s 1.45%
-trashing
-1: ^lling (no join)           : 0.020u 0.040s 0m2.218s 2.70%
-2: ^lling (no join)           : 0.020u 0.020s 0m0.506s 7.90%
-3: ^lling (no join)           : 0.030u 0.030s 0m0.510s 11.76%
-trashing
-1: ^vaughan (no join)         : 0.040u 0.050s 0m2.048s 4.39%
-2: ^vaughan (no join)         : 0.030u 0.020s 0m0.332s 15.04%
-3: ^vaughan (no join)         : 0.040u 0.010s 0m0.318s 15.72%
-trashing
-1: ^rol (no join)             : 0.020u 0.030s 0m2.384s 2.09%
-2: ^rol (no join)             : 0.020u 0.030s 0m0.676s 7.39%
-3: ^rol (no join)             : 0.020u 0.030s 0m0.697s 7.17%
diff --git a/contrib/fulltextindex/fti.sql.in b/contrib/fulltextindex/fti.sql.in
index e0da2353c52447eb1b019fb516d827abf4842c21..c0b3662ae592645680e25606a441cd3f7729a643 100644
--- a/contrib/fulltextindex/fti.sql.in
+++ b/contrib/fulltextindex/fti.sql.in
@@ -1,3 +1,3 @@
 create function fti() returns opaque as
 	'MODULE_PATHNAME'
-	language 'C';
\ No newline at end of file
+	language 'newC';
\ No newline at end of file
diff --git a/contrib/fulltextindex/fticopy b/contrib/fulltextindex/fticopy
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/isbn_issn/Makefile b/contrib/isbn_issn/Makefile
index 814c52be481e608c0e3b8e0f71f7577f814c01c1..3890d26f94a95aa5b8cbae1c315420780e9497bb 100644
--- a/contrib/isbn_issn/Makefile
+++ b/contrib/isbn_issn/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/isbn_issn/Attic/Makefile,v 1.4 2000/06/16 18:58:46 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/isbn_issn/Attic/Makefile,v 1.5 2000/06/19 13:53:36 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= isbn_issn
 
 PROGRAM	= 
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= $(NAME).sql
 BINS	=
 EXAMPLES=
@@ -30,7 +30,7 @@ install: install_doc install_sql install_mod
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/isbn_issn/README b/contrib/isbn_issn/README
index 6b734ce905fe32b0a985bc51ebb435216b9566ff..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/isbn_issn/README
+++ b/contrib/isbn_issn/README
@@ -1,22 +0,0 @@
-
-ISBN (books) and ISSN (serials)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This directory contains definitions for a couple of PostgreSQL
-external types, for a couple of international-standard namespaces:
-ISBN (books) and ISSN (serials).  Rather than just using a char()
-member of the appropriate length, I wanted my database to include
-the validity-checking that both these numbering systems were designed
-to encompass.  A little bit of research revealed the formulae
-for computing the check digits, and I also included some validity
-constraints on the number of hyphens.
-
-The internal representation of these types is intended to be
-compatible with `char16', in the (perhaps vain) hope that
-this will make it possible to create indices of these types
-using char16_ops.
-
-These are based on Tom Ivar Helbekkmo's IP address type definition,
-from which I have copied the entire form of the implementation.
-
-Garrett A. Wollman, August 1998
diff --git a/contrib/isbn_issn/isbn.c b/contrib/isbn_issn/isbn.c
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/isbn_issn/isbn.sql b/contrib/isbn_issn/isbn.sql
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/isbn_issn/isbn_issn.c b/contrib/isbn_issn/isbn_issn.c
index a1d891d7bcf4b30b63e0fa838583e916cb95699b..15d96d35db39f0562d0a0a5d33e28feceb4ec0a3 100644
--- a/contrib/isbn_issn/isbn_issn.c
+++ b/contrib/isbn_issn/isbn_issn.c
@@ -1,7 +1,7 @@
 /*
  *	PostgreSQL type definitions for ISBNs.
  *
- *	$Id: isbn_issn.c,v 1.1 2000/06/15 19:04:50 momjian Exp $
+ *	$Id: isbn_issn.c,v 1.2 2000/06/19 13:53:39 momjian Exp $
  */
 
 #include <stdio.h>
diff --git a/contrib/isbn_issn/isbn_issn.sql.in b/contrib/isbn_issn/isbn_issn.sql.in
index e0b90eca338ff782bcf8c1895afeb88cb5d8d706..f837fad3789540ed9f1d0f60d3ea68b3671a75a9 100644
--- a/contrib/isbn_issn/isbn_issn.sql.in
+++ b/contrib/isbn_issn/isbn_issn.sql.in
@@ -1,7 +1,7 @@
 --
 --	PostgreSQL code for ISSNs.
 --
---	$Id: isbn_issn.sql.in,v 1.1 2000/06/15 19:04:50 momjian Exp $
+--	$Id: isbn_issn.sql.in,v 1.2 2000/06/19 13:53:39 momjian Exp $
 --
 
 
@@ -116,7 +116,7 @@ create operator <> (
 --
 --	PostgreSQL code for ISBNs.
 --
---	$Id: isbn_issn.sql.in,v 1.1 2000/06/15 19:04:50 momjian Exp $
+--	$Id: isbn_issn.sql.in,v 1.2 2000/06/19 13:53:39 momjian Exp $
 --
 --
 --	Input and output functions and the type itself:
diff --git a/contrib/isbn_issn/issn.c b/contrib/isbn_issn/issn.c
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/isbn_issn/issn.sql b/contrib/isbn_issn/issn.sql
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/linux/Makefile b/contrib/linux/Makefile
index 947ced4f487252b9b895df0536c8edf37bab5818..3e11fd11cd8ca9dcb5c66db48ed3555d6faa2e57 100644
--- a/contrib/linux/Makefile
+++ b/contrib/linux/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/linux/Attic/Makefile,v 1.1 2000/06/15 19:04:57 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/linux/Attic/Makefile,v 1.2 2000/06/19 13:53:41 momjian Exp $
 #
 
 TOPDIR=../..
diff --git a/contrib/lo/Makefile b/contrib/lo/Makefile
index 08bff07b8428f552da688bf12fda75b630136734..791804722a236956725f89bd051f0eec793d3f12 100644
--- a/contrib/lo/Makefile
+++ b/contrib/lo/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/lo/Makefile,v 1.4 2000/06/16 18:58:49 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/lo/Makefile,v 1.5 2000/06/19 13:53:42 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,8 +10,8 @@ NAME	= lo
 
 PROGRAM	= 
 OBJS	= $(NAME).o
-DOCS	= README
-SQLS	= $(NAME).sql
+DOCS	= README.$(NAME)
+SQLS	= $(NAME).sql lo_drop.sql lo_test.sql
 BINS	=
 EXAMPLES=
 MODS	= $(NAME)$(DLSUFFIX)
@@ -35,7 +35,7 @@ install: install_doc install_sql install_mod
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/lo/README b/contrib/lo/README
index aa06adf0848010fd77b455281ed8b260662f08bd..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100755
--- a/contrib/lo/README
+++ b/contrib/lo/README
@@ -1,69 +0,0 @@
-PostgreSQL type extension for managing Large Objects
-----------------------------------------------------
-
-Overview
-
-One of the problems with the JDBC driver (and this affects the ODBC driver
-also), is that the specification assumes that references to BLOBS (Binary
-Large OBjectS) are stored within a table, and if that entry is changed, the
-associated BLOB is deleted from the database.
-
-As PostgreSQL stands, this doesn't occur. It allocates an OID for each object,
-and it is up to the application to store, and ultimately delete the objects.
-
-Now this is fine for new postgresql specific applications, but existing ones
-using JDBC or ODBC wont delete the objects, arising to orphaning - objects
-that are not referenced by anything, and simply occupy disk space.
-
-The Fix
-
-I've fixed this by creating a new data type 'lo', some support functions, and
-a Trigger which handles the orphaning problem.
-
-The 'lo' type was created because we needed to differenciate between normal
-Oid's and Large Objects. Currently the JDBC driver handles this dilema easily,
-but (after talking to Byron), the ODBC driver needed a unique type. They had created an 'lo' type, but not the solution to orphaning.
-
-Install
-
-Ok, first build the shared library, and install. Typing 'make install' in the
-contrib/lo directory should do it.
-
-Then, as the postgres super user, run the lo.sql script. This will install the
-type, and define the support functions.
-
-How to Use
-
-The easiest way is by an example:
-
-> create table image (title text,raster lo);
-> create trigger t_image before update or delete on image for each row execute procedure lo_manage(raster);
-
-Here, a trigger is created for each column that contains a lo type.
-
-Issues
-
-* dropping a table will still orphan any objects it contains, as the trigger
-  is not actioned.
-
-  For now, precede the 'drop table' with 'delete from {table}'. However, this
-  could be fixed by having 'drop table' perform an additional
-
-      'select lo_unlink({colname}::oid) from {tablename}'
-
-  for each column, before actually dropping the table.
-
-* Some frontends may create their own tables, and will not create the
-  associated trigger(s). Also, users may not remember (or know) to create
-  the triggers.
-
-  This can be solved, but would involve changes to the parser.
-
-As the ODBC driver needs a permanent lo type (& JDBC could be optimised to
-use it if it's Oid is fixed), and as the above issues can only be fixed by
-some internal changes, I feel it should become a permanent built-in type.
-
-I'm releasing this into contrib, just to get it out, and tested.
-
-Peter Mount <peter@retep.org.uk> June 13 1998
-
diff --git a/contrib/lo/drop.sql b/contrib/lo/drop.sql
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/lo/lo.sql.in b/contrib/lo/lo.sql.in
index 58390028d0caadd2befc53dc2c5334682d2a6b1e..4aa75a60fe17ec9935e6eb411d829d178bcb90af 100644
--- a/contrib/lo/lo.sql.in
+++ b/contrib/lo/lo.sql.in
@@ -1,7 +1,7 @@
 --
 --	PostgreSQL code for LargeObjects
 --
---	$Id: lo.sql.in,v 1.3 2000/06/15 18:54:56 momjian Exp $
+--	$Id: lo.sql.in,v 1.4 2000/06/19 13:53:42 momjian Exp $
 --
 --
 --	Create the data type
diff --git a/contrib/mSQL-interface/Makefile b/contrib/mSQL-interface/Makefile
index 4dc20f520400550f56cfd9f055fc91f9270138b7..7950b6934dbad7a6fe06fd71c42a9d44a34005c0 100644
--- a/contrib/mSQL-interface/Makefile
+++ b/contrib/mSQL-interface/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/mSQL-interface/Attic/Makefile,v 1.2 2000/06/16 18:58:54 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/mSQL-interface/Attic/Makefile,v 1.3 2000/06/19 13:53:47 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= mpgsql
 
 PROGRAM	= 
 OBJS	= 
-DOCS	= README $(NAME).c
+DOCS	= README.$(NAME) $(NAME).c
 SQLS	= 
 BINS    = 
 EXAMPLES=
@@ -24,7 +24,7 @@ install: install_doc
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 clean:
diff --git a/contrib/mSQL-interface/README b/contrib/mSQL-interface/README
index 714ab2905653a376f4ccdbbbdcd8597799fca117..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/mSQL-interface/README
+++ b/contrib/mSQL-interface/README
@@ -1,86 +0,0 @@
-
-Hello! :)
-
-(Sorry for my english. But if i wrote in portuguese, you wouldn't
- understand nothing. :])
-
-	I found it's the right place to post this. I'm a newcomer in these
-lists. I hope i did it right. :]
-
-<BOREDOM>
-	When i started using SQL, i started with mSQL. I developed a lot
-of useful apps for me and my job with C, mainly because i loved it's
-elegant, simple api. But for a large project i'm doing in these days, i
-thought is was not enough, because it felt a lot of features i started to
-need, like security and subselects. (and it's not free :))
-	So after looking at the options, choose to start again with
-postgres. It offered everything that i needed, and the documentation is
-really good (remember me to thank the one who wrote'em).
-	But for my little apps, i needed to start porting them to libpq.
-After looking at pq's syntax, i found it was better to write a bridge
-between the mSQL api and libpq. I found that rewriting the libmsql.a
-routines that calls libpq would made things much easier. I guess the
-results are quite good right now.
-</BOREDOM>
-
-	Ok. Lets' summarize it:
-
-	mpgsql.c is the bridge. Acting as a wrapper, it's really good,
-since i could run mSQL. But it's not accurate. Some highlights:
-
-	CONS:
-	* It's not well documented 
-		(this post is it's first documentation attempt, in fact);
-	* It doesn't handle field types correctly. I plan to fix it,
-	  if people start doing feedbacks;
-	* It's limited to 10 simultaneous connections. I plan to enhance
-	  this, i'm just figuring out;
-	* I'd like to make it reentrant/thread safe, although i don't
-	  think this could be done without changing the API structure;
-	* Error Management should be better. This is my first priority
-          now;
-	* Some calls are just empty implementations.
-
-	PROS:
-	* the mSQL Monitor runs Okay. :]
-	* It's really cool. :)
-	* Make mSQL-made applications compatible with postgresql just by
-	  changing link options.
-	* Uses postgreSQL. :]
-	* the mSQL API it's far easier to use and understand than libpq.
-          Consider this example:
-
-#include "msql.h"
-
-void main(int argc, char **argv, char **envp) {
-   int sid;
-	
-   sid = msqlConnect(NULL); /* Connects via unix socket */
-
-   if (sid >= 0) {
-      m_result *rlt;
-      m_row *row;
-      msqlSelectDB(sid, "hosts");
-      if (msqlQuery(sid, "select host_id from hosts")) {
-	 rlt = msqlStoreResult();
-         while (row = (m_row*)msqlFetchRow(rlt)) 
-            printf("hostid: %s\n", row[0]);
-         msqlFreeResult(rlt);
-      }
-      msqlClose(sid);
-   }
-}
-
-	I enclose mpgsql.c inside. I'd like to maintain it, and (maybe, am
-i dreaming) make it as part of the pgsql distribution. I guess it doesn't
-depends on me, but mainly on it's acceptance by its users.
-
-	Hm... i forgot: you'll need a msql.h copy, since it's copyrighted
-by Hughes Technologies Pty Ltd. If you haven't it yes, fetch one
-from www.hughes.com.au.
-
-	I would like to catch users ideas. My next goal is to add better
-error handling, and to make it better documented, and try to let relshow
-run through it. :)
-
-	done. Aldrin Leal <aldrin@americasnet.com>
diff --git a/contrib/miscutil/Makefile b/contrib/miscutil/Makefile
index 313772640d6632ead0cd9a50fc5bb9c8ba74669c..42a1185531ef11c6c02c344bfe3a0a664db31bf5 100644
--- a/contrib/miscutil/Makefile
+++ b/contrib/miscutil/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/miscutil/Attic/Makefile,v 1.9 2000/06/16 18:58:55 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/miscutil/Attic/Makefile,v 1.10 2000/06/19 13:53:47 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= misc_utils
 
 PROGRAM	= 
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= $(NAME).sql
 BINS	=
 EXAMPLES=
@@ -29,7 +29,7 @@ install: install_doc install_sql install_mod
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/miscutil/README b/contrib/miscutil/README
index 903455513a2533535abd223e0df478477b205e6b..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/miscutil/README
+++ b/contrib/miscutil/README
@@ -1,43 +0,0 @@
-Miscellaneous utility functions for PostgreSQL.
-Copyright (C) 1999, Massimo Dal Zotto <dz@cs.unitn.it>
-
-This software is distributed under the GNU General Public License
-either version 2, or (at your option) any later version.
-
-query_limit(n)
-
-	sets a limit on the maximum numbers of query returned from
-	a backend. It can be used to limit the result size retrieved
-	by the application for poor input data or to avoid accidental
-	table product while playying with sql.
- 
-backend_pid()
-
-	return the pid of our corresponding backend.
-
-unlisten(relname)
-
-	unlisten from a relation or from all relations if the argument
-	is null, empty or '*'.
-	It is now obsoleted by the new unlisten command but still useful
-	if you want unlisten a name computed by the query.
-	Note that a listen/notify relname can be any ascii string, not
-	just valid relation names.
-
-min(x,y)
-max(x,y)
-
-	return the min or max bteween two integers.
-
-assert_enable(bool)
-
-	enable/disable assert checkings in the backend, if it has been
-	compiled with USE_ASSERT_CHECKING.
-
-assert_test(bool)
-
-	test the assert enable/disable code, if the backend has been
-	compiled with ASSERT_CHECKING_TEST.
-
--- 
-Massimo Dal Zotto <dz@cs.unitn.it>
diff --git a/contrib/noupdate/Makefile b/contrib/noupdate/Makefile
index dd21ec1fdf58c608624e1d3b7d27de7e65b40c7b..2dd53b79cced2ba81451e6cd65d2217345b75f3d 100644
--- a/contrib/noupdate/Makefile
+++ b/contrib/noupdate/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/noupdate/Attic/Makefile,v 1.2 2000/06/16 18:59:01 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/noupdate/Attic/Makefile,v 1.3 2000/06/19 13:53:54 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= noup
 
 PROGRAM	= 
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= $(NAME).sql
 BINS	=
 EXAMPLES=
@@ -29,7 +29,7 @@ install: install_doc install_sql install_mod
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/noupdate/README b/contrib/noupdate/README
index 1c773dc77889dba3bfa3dc4ebdba3a6757fa796b..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/noupdate/README
+++ b/contrib/noupdate/README
@@ -1,20 +0,0 @@
-
-
-noupdate
-~~~~~~~~
-
- - trigger to prevent updates on single columns.
-
-
-Example:
-~~~~~~~
-
-CREATE TABLE TEST ( COL1 INT, COL2 INT, COL3 INT );
-
-CREATE TRIGGER BT BEFORE UPDATE ON TEST FOR EACH ROW
-	EXECUTE PROCEDURE 
-	noup ('COL1');
-
--- Now Try
-INSERT INTO TEST VALUES (10,20,30);
-UPDATE TEST SET COL1 = 5;
diff --git a/contrib/noupdate/noup.example b/contrib/noupdate/noup.example
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/noupdate/noup.source b/contrib/noupdate/noup.source
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/noupdate/noup.sql.in b/contrib/noupdate/noup.sql.in
index d5107492089e399efc88f0d4b3fa28db7a01eb63..cc1e6fa146fce8cb2c4599588f1d6bc1725788f0 100644
--- a/contrib/noupdate/noup.sql.in
+++ b/contrib/noupdate/noup.sql.in
@@ -3,5 +3,5 @@ DROP FUNCTION noup ();
 CREATE FUNCTION noup ()
 	RETURNS opaque
 	AS 'MODULE_PATHNAME'
-	LANGUAGE 'C'
+	LANGUAGE 'newC'
 ;
diff --git a/contrib/odbc/README b/contrib/odbc/README
index c36f183c0544b29f043fa69128394310c2f8bfa7..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/odbc/README
+++ b/contrib/odbc/README
@@ -1,38 +0,0 @@
-This directory contains support functions for the ODBC driver
-supplied with PostgreSQL-7.0.
-
-To enable additional ODBC functions with PostgreSQL-7.0, simply
-execute the commands in odbc.sql:
-
-psql
-Welcome to psql, the PostgreSQL interactive terminal.
-
-Type:  \copyright for distribution terms
-       \h for help with SQL commands
-       \? for help on internal slash commands
-       \g or terminate with semicolon to execute query
-       \q to quit
-
-postgres=# \i odbc.sql
-CREATE
-...
-
-
-To enable additional ODBC functions with versions of PostgreSQL
-prior to PostgreSQL-7.0 (e.g. PostgreSQL-6.5.3), build the shared
-library and SQL commands as follows:
-
-make pre7
-psql
-Welcome to psql, the PostgreSQL interactive terminal.
-
-Type:  \copyright for distribution terms
-       \h for help with SQL commands
-       \? for help on internal slash commands
-       \g or terminate with semicolon to execute query
-       \q to quit
-
-postgres=# \i odbc-pre7.sql
-CREATE
-...
-
diff --git a/contrib/odbc/odbc.sql b/contrib/odbc/odbc.sql
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..bec7f486b4fe4ee43e4cca2b1572aa2c8827c7c6 100644
--- a/contrib/odbc/odbc.sql
+++ b/contrib/odbc/odbc.sql
@@ -0,0 +1,145 @@
+-- ODBC.sql
+--
+
+--
+-- Character string manipulation
+--
+
+--
+-- Extensions for ODBC compliance in v7.0.
+-- In the current driver, ODBC functions must map directly into a
+-- Postgres function. So in some cases we must create a compatible
+-- function.
+--
+
+-- truncate on the left
+CREATE FUNCTION ltrunc(text, integer)
+    RETURNS text
+    AS 'SELECT substring($1 FROM 1 FOR $2)'
+    LANGUAGE 'SQL';
+
+-- truncate on the right
+CREATE FUNCTION rtrunc(text, integer)
+    RETURNS text
+    AS 'SELECT substring($1 FROM (char_length($1)-($2)+1) FOR $2)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION space(integer)
+    RETURNS text
+    AS 'SELECT lpad('''', $1, '' '')'
+    LANGUAGE 'SQL';
+
+--
+-- Mathematical functions
+--
+
+CREATE FUNCTION truncate(numeric,integer)
+    RETURNS numeric
+    AS 'SELECT trunc($1, $2)'
+    LANGUAGE 'SQL';
+
+--
+-- Date/time functions for v7.0
+--
+
+CREATE FUNCTION curdate()
+    RETURNS date
+    AS 'SELECT CAST(''now'' AS date)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION curtime()
+    RETURNS time
+    AS 'SELECT CAST(''now'' AS time)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION dayname(timestamp)
+    RETURNS text
+    AS 'SELECT to_char($1,''Day'')'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION dayofmonth(timestamp)
+    RETURNS integer
+    AS 'SELECT  CAST(date_part(''day'', $1) AS integer)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION dayofweek(timestamp)
+    RETURNS integer
+    AS 'SELECT ( CAST(date_part(''dow'', $1) AS integer) + 1)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION dayofyear(timestamp)
+    RETURNS integer
+    AS 'SELECT  CAST(date_part(''doy'', $1) AS integer)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION hour(timestamp)
+    RETURNS integer
+    AS 'SELECT CAST(date_part(''hour'', $1) AS integer)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION minute(timestamp)
+    RETURNS integer
+    AS 'SELECT CAST(date_part(''minute'', $1) AS integer)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION odbc_month(timestamp)
+    RETURNS integer
+    AS 'SELECT CAST(date_part(''month'', $1) AS integer)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION monthname(timestamp)
+    RETURNS text
+    AS 'SELECT to_char($1, ''Month'')'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION quarter(timestamp)
+    RETURNS integer
+    AS 'SELECT CAST(date_part(''quarter'', $1) AS integer)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION second(timestamp)
+    RETURNS integer
+    AS 'SELECT CAST(date_part(''second'', $1) AS integer)'
+    LANGUAGE 'SQL';
+
+/*
+-- The first argument is an integer constant denoting the units
+-- of the second argument. Until we know the actual values, we
+-- cannot implement these. - thomas 2000-04-11
+CREATE FUNCTION timestampadd(integer,integer,timestamp)
+    RETURNS timestamp
+    AS 'SELECT CAST(($3 + ($2 * $1)) AS timestamp)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION timestampdiff(integer,integer,timestamp)
+    RETURNS timestamp
+    AS 'SELECT CAST(($3 + ($2 * $1)) AS timestamp)'
+    LANGUAGE 'SQL';
+*/
+
+CREATE FUNCTION week(timestamp)
+    RETURNS integer
+    AS 'SELECT CAST(date_part(''week'', $1) AS integer)'
+    LANGUAGE 'SQL';
+
+CREATE FUNCTION year(timestamp)
+    RETURNS integer
+    AS 'SELECT CAST(date_part(''year'', $1) AS integer)'
+    LANGUAGE 'SQL';
+
+--
+-- System functions.
+--
+
+/*
+CREATE FUNCTION database()
+    RETURNS text
+    AS 'SELECT ...'
+    LANGUAGE 'SQL';
+*/
+
+CREATE FUNCTION odbc_user()
+    RETURNS text
+    AS 'SELECT CAST(USER AS text)'
+    LANGUAGE 'SQL';
+
diff --git a/contrib/pg_dumplo/INSTALL b/contrib/pg_dumplo/INSTALL
index 15733527a90c54b3ea4c7ee3142c6586366091c9..8959a462b1faf4676e6f3026d430e0b3b849ee89 100644
--- a/contrib/pg_dumplo/INSTALL
+++ b/contrib/pg_dumplo/INSTALL
@@ -14,11 +14,6 @@ Compilation:
 		* run 'make'
 		* run 'make install'
 
-	- Out of PG contrib:
-		* edit Makefile.out
-		* run 'make -f Makefile.out' 
-
-	
 THANKS:
 ~~~~~~ 
 	<??? I lost his e-mail ???>
diff --git a/contrib/pg_dumplo/Makefile b/contrib/pg_dumplo/Makefile
index a038d2925a9c6384fc27d1a268573817adc65a1e..d981e380071c1eaba36f988265c72565c500333c 100644
--- a/contrib/pg_dumplo/Makefile
+++ b/contrib/pg_dumplo/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/Makefile,v 1.3 2000/06/16 18:59:02 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/Makefile,v 1.4 2000/06/19 13:54:01 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= pg_dumplo
 
 PROGRAM	= $(NAME)
 OBJS	= main.o lo_export.o lo_import.o utils.o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= 
 BINS	= $(PROGRAM)
 EXAMPLES=
@@ -29,7 +29,7 @@ install: install_doc install_bin
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_bin:
@@ -47,4 +47,4 @@ ifeq (depend,$(wildcard depend))
 include depend
 endif
 
- 
+ 
\ No newline at end of file
diff --git a/contrib/pg_dumplo/Makefile.out b/contrib/pg_dumplo/Makefile.out
index 15bae55e2067968f4b289d652b57b87dc40a04c7..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/pg_dumplo/Makefile.out
+++ b/contrib/pg_dumplo/Makefile.out
@@ -1,42 +0,0 @@
-
-# ----------
-# pg_dumplo - Makefile for compilation out of PostgreSQL contrib tree 
-# ----------
-
-
-# Set correct values
-#
-CFLAGS 	= -Wall -fpic
-CC	= gcc
-RM	= rm
-INCLUDE = -I/usr/include/pgsql
-LIBS	=-L/usr/lib/postgresql/lib -lpq
-
-# Comment this option if your system not has getopt_long()
-#
-HAVE_GETOPT_LONG = -DHAVE_GETOPT_LONG
-
-
-# --------------------------- not edit ---------------------------------
-
-PROGRAM	= pg_dumplo
-	
-OBJECTS	= main.o lo_export.o lo_import.o utils.o
-
-CFLAGS  += -DOUT_OF_PG $(HAVE_GETOPT_LONG)
-
-COMPILE = $(CC) $(CPPFLAGS) $(CFLAGS) $(INCLUDE)
-LINK    = $(CC) $(CFLAGS) -o $@ $(LIBS)
-
-
-all: $(PROGRAM) 	
-
-$(PROGRAM): $(OBJECTS)
-	$(LINK) $(OBJECTS)
-
-.c.o: $<
-	$(COMPILE) -c $<
-
-clean:
-	$(RM) -f *~ $(OBJECTS) $(PROGRAM)
-
diff --git a/contrib/pg_dumplo/README b/contrib/pg_dumplo/README
index aef8f9f71d03b760fdd7bd9eabdc3d81710c9cc9..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/pg_dumplo/README
+++ b/contrib/pg_dumplo/README
@@ -1,117 +0,0 @@
-
-How to use pg_dumplo?
-=====================
-
-(c) 2000, Pavel Jan�k ml. <Pavel.Janik@linux.cz>
-
-
-Q: How do you use pg_dumplo?
-============================
-
-A: This is a small demo of backing up the database table with Large Objects:
-
-
-We will create a demo database and a small and useless table `lo' inside
-it:
-
-SnowWhite:$ createdb test
-CREATE DATABASE
-
-Ok, our database with the name 'test' is created. Now we should create demo
-table which will contain only one column with the name 'id' which will hold
-the oid number of Large Object:
-
-SnowWhite:$ psql test
-Welcome to psql, the PostgreSQL interactive terminal.
-
-Type:  \copyright for distribution terms
-       \h for help with SQL commands
-       \? for help on internal slash commands
-       \g or terminate with semicolon to execute query
-       \q to quit
-
-test=# CREATE TABLE lo (id oid);
-CREATE
-test=# \lo_import /etc/aliases
-lo_import 19338
-test=# INSERT INTO lo VALUES (19338);
-INSERT 19352 1
-test=# select * from lo;
-  id   
--------
- 19338
-(1 row)
-
-test=# \q
-
-In the above example you can see that we have also imported one "Large
-Object" - the file /etc/aliases. It has an oid of 19338 so we have inserted
-this oid number to the database table lo to the column id. The final SELECT
-shows that we have one record in the table.
-
-Now we can demonstrate the work of pg_dumplo. We will create dump directory
-which will contain the whole dump of large objects (/tmp/dump):
-
-mkdir -p /tmp/dump
-
-Now we can dump all large objects from the database `test' which has an oid
-stored in the column `id' in the table `lo':
-
-SnowWhite:$ pg_dumplo -s /tmp/dump -d test -l lo.id
-pg_dumplo: dump lo.id (1 large obj)
-
-Voila, we have the dump of all Large Objects in our directory:
-
-SnowWhite:$ tree /tmp/dump/
-/tmp/dump/
-`-- test
-    |-- lo
-    |   `-- id
-    |       `-- 19338
-    `-- lo_dump.index
-
-3 directories, 2 files
-SnowWhite:$ 
-
-Isn't it nice :-) Yes, it is, but we are on the half of our way. We should
-also be able to recreate the contents of the table lo and the Large Object
-database when something went wrong. It is very easy, we will demonstrate
-this via dropping the database and recreating it from scratch with
-pg_dumplo:
-
-SnowwWite:$ dropdb test
-DROP DATABASE
-
-SnowWhite:$ createdb test
-CREATE DATABASE
-
-Ok, our database with the name `test' is created again. We should also
-create the table `lo' again:
-
-SnowWhite:$ psql test
-Welcome to psql, the PostgreSQL interactive terminal.
-
-Type:  \copyright for distribution terms
-       \h for help with SQL commands
-       \? for help on internal slash commands
-       \g or terminate with semicolon to execute query
-       \q to quit
-
-test=# CREATE TABLE lo (id oid);
-CREATE
-test=# \q
-SnowWhite:$
-
-Now the database with the table `lo' is created again, but we do not have
-any information stored in it. But have the dump of complete Large Object
-database, so we can recreate the contents of the whole database from the
-directory /tmp/dump:
-
-SnowWhite:$ pg_dumplo -s /tmp/dump -d test -i
-19338	lo	id	test/lo/id/19338
-SnowWhite:$
-
-And this is everything.
-
-Summary: In this small example we have shown that pg_dumplo can be used to
-completely dump the database's Large Objects very easily.
diff --git a/contrib/pg_dumplo/VERSION b/contrib/pg_dumplo/VERSION
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/pg_dumplo/main.c b/contrib/pg_dumplo/main.c
index bc75476a1d8b91e689fcc842ed8fd5f24bb84d5b..eafaed8c657ec3692cd9471fbcf2b8cc285c59ee 100644
--- a/contrib/pg_dumplo/main.c
+++ b/contrib/pg_dumplo/main.c
@@ -4,7 +4,7 @@
  *
  *	Portions Copyright (c) 1999-2000, PostgreSQL, Inc
  *
- * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.1 2000/06/15 19:05:08 momjian Exp $
+ * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.2 2000/06/19 13:54:01 momjian Exp $
  *
  *					Karel Zak 1999-2000
  * -------------------------------------------------------------------------
@@ -152,13 +152,18 @@ main(int argc, char **argv)
 	 * Check space
 	 * ----------
 	 */
-	if (! pgLO->space) {
+	if (! pgLO->space && ! pgLO->action == ACTION_SHOW) {
 		if (!(pgLO->space = getenv("PWD"))) {
 			fprintf(stderr, "%s: not set space for dump-tree (option '-s' or $PWD).\n", progname);
 			exit(RE_ERROR);
 		}	
 	}
 	
+	if (!pgLO->action) {
+		fprintf(stderr, "%s: What do you want - export or import?\n", progname);
+		exit(RE_ERROR);
+	}
+	
 	/* ----------
 	 * Make connection
 	 * ----------
@@ -179,12 +184,8 @@ main(int argc, char **argv)
 	 * Init index file
 	 * ----------
 	 */
-	if (pgLO->action) {
+	if (pgLO->action != ACTION_SHOW) 
 		index_file(pgLO);
-	} else {
-		fprintf(stderr, "%s: What do you want - export or import?\n", progname);
-		exit(RE_ERROR);
-	}
 	
 	PQexec(pgLO->conn, "BEGIN");
 
@@ -198,7 +199,7 @@ main(int argc, char **argv)
 			pglo_export(pgLO);
 			if (!pgLO->quiet) {
 				if (pgLO->action == ACTION_SHOW)
-					printf("\nDatabase '%s' content %d large objects.\n\n", pgLO->db, pgLO->counter);
+					printf("\nDatabase '%s' contains %d large objects.\n\n", pgLO->db, pgLO->counter);
 				else	
 					printf("\nExported %d large objects.\n\n", pgLO->counter);
 			}
diff --git a/contrib/pg_dumplo/pg_dumplo.c b/contrib/pg_dumplo/pg_dumplo.c
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/pgbench/Makefile b/contrib/pgbench/Makefile
index 6c1246969b0b19ccee429008804f75621af13284..c9f7c3f2b22bddc743313678d40b90cf81bcd437 100644
--- a/contrib/pgbench/Makefile
+++ b/contrib/pgbench/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/pgbench/Makefile,v 1.3 2000/06/16 18:59:07 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/pgbench/Makefile,v 1.4 2000/06/19 13:54:09 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= pgbench
 
 PROGRAM	= $(NAME)
 OBJS	= $(NAME).o
-DOCS	= README README.jis
+DOCS	= README.$(NAME) README.$(NAME)_jis
 SQLS	= 
 BINS    = $(PROGRAM) 
 EXAMPLES=
@@ -29,7 +29,7 @@ install: install_doc install_bin
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_bin:
diff --git a/contrib/pgbench/README b/contrib/pgbench/README
index 093f2b9869637e7539d5d638db42932315888b61..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/pgbench/README
+++ b/contrib/pgbench/README
@@ -1,149 +0,0 @@
-pgbench 1.2 README		2000/1/15 Tatsuo Ishii (t-ishii@sra.co.jp)
-
-o What is pgbench?
-
-  pgbench is a simple program to run a benchmark test sort of
-  "TPC-B". pgbench is a client application of PostgreSQL and runs
-  with PostgreSQL only. It performs lots of small and simple
-  transactions including select/update/insert operations then
-  calculates number of transactions successfully completed within a
-  second (transactions per second, tps). Targeting data includes a
-  table with at least 100k tuples.
-
-  Example outputs from pgbench look like:
-
-	number of clients: 4
-	number of transactions per client: 100
-	number of processed transactions: 400/400
-	tps = 19.875015(including connections establishing)
-	tps = 20.098827(excluding connections establishing)
-
-  Similar program called "JDBCBench" already exists, but it requires
-  Java that may not be available on every platform. Moreover some
-  people concerned about the overhead of Java that might lead
-  inaccurate results. So I decided to write in pure C, and named
-  it "pgbench."
-
-o features of pgbench
-
-  - pgbench is written in C using libpq only. So it is very portable
-    and easy to install.
-
-  - pgbench can simulate concurrent connections using asynchronous
-    capability of libpq. No threading is required.
-
-o How to install pgbench
-
- (1) Edit the first line in Makefile
-
-	POSTGRESHOME = /usr/local/pgsql
-
-     so that it points to the directory where PostgreSQL installed.
-
- (2) Run configure
-
- (3) Run make. You will see an executable file "pgbench" there.
-
-o How to use pgbench?
-
-  (1) Initialize database by:
-
-	pgbench -i <dbname>
-
-      where <dbname> is the name of database. pgbench uses four tables
-      accounts, branches, history and tellers. These tables will be
-      destroyed. Be very carefully if you have tables having same
-      names. Default test data contains:
-
-	table		# of tuples
-	-------------------------
-	branches	1
-	tellers		10
-	accounts	100000
-	history		0
-
-	You can increase the number of tuples by using -s option. See
-	below.
-
-  (2) Run the benchmark test
-
-	pgbench <dbname>
-
-      The default configuration is:
-
-	number of clients: 1
-	number of transactions per client: 10
-
-o options
-
-  pgbench has number of options.
-
-	-h hostname
-		hostname where the backend is running. If this option
-		is omitted, pgbench will connect to the localhost via
-		Unix domain socket.
-
-	-p port
-		the port number that the backend is accepting. default is
-		5432.
-
-	-c number_of_clients
-		Number of clients simulated. default is 1.
-
-	-t number_of_transactions
-		Number of transactions each client runs. default is 10.
-
-	-s scaling_factor
-		this should be used with -i (initialize) option.
-		number of tuples generated will be multiple of the
-		scaling factor. For example, -s 100 will imply 10M
-		(10,000,000) tuples in the accounts table.
-		default is 1.
-
-	-n
-		No vacuuming and cleaning the history table prior the
-		test is performed.
-
-	-v
-		Do vacuuming before testing. This will take some time.
-		Without both -n and -v pgbench will vacuum tellers and
-		branches tables only.
-
-	-S
-		Perform select only transactions instead of TPC-B.
-
-	-d
-		debug option.
-
-
-o What is the "transaction" actually performed in pgbench?
-
-  (1) begin;
-
-  (2) update accounts set abalance = abalance + :delta where aid = :aid;
-
-  (3) select abalance from accounts where aid = :aid;
-
-  (4) update tellers set tbalance = tbalance + :delta where tid = :tid;
-
-  (5) update branches set bbalance = bbalance + :delta where bid = :bid;
-
-  (6) insert into history(tid,bid,aid,delta) values(:tid,:bid,:aid,:delta);
-
-  (7) end;
-
-o License?
-
-Basically it is same as BSD license. See pgbench.c for more details.
-
-o History
-
-2000/1/15 pgbench-1.2 contributed to PostgreSQL
-	* Add -v option
-
-1999/09/29 pgbench-1.1 released
-	* Apply cygwin patches contributed by Yutaka Tanida
-	* More robust when backends die
-	* Add -S option (select only)
-
-1999/09/04 pgbench-1.0 released
\ No newline at end of file
diff --git a/contrib/pgbench/README.jis b/contrib/pgbench/README.jis
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/pgbench/pgbench_jis.doc b/contrib/pgbench/pgbench_jis.doc
index da33c317446e9fb14f10e540843c2ee35bd7f78f..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/pgbench/pgbench_jis.doc
+++ b/contrib/pgbench/pgbench_jis.doc
@@ -1,166 +0,0 @@
-pgbench 1.2 README		2000/1/15 Tatsuo Ishii (t-ishii@sra.co.jp)
-
-$B"#(Bpgbench $B$H$O!)(B
-
-pgbench $B$O(B TPC-B$B$K;w$?%Y%s%A%^!<%/%F%9%H$r9T$J$&%W%m%0%i%`$G$9!#:#$N$H(B
-$B$3$m(B PostgreSQL $B@lMQ$G$9!#(B
-
-pgbench $B$O(B select/update/insert $B$r4^$`%H%i%s%6%/%7%g%s$r<B9T$7!"A4BN$N(B
-$B<B9T;~4V$H<B:]$K40N;$7$?%H%i%s%6%/%7%g%s$N?t$+$i(B 1 $BIC4V$K<B9T$G$-$?%H(B
-$B%i%s%6%/%7%g%s?t(B (tps) $B$rI=<($7$^$9!#=hM}$NBP>]$H$J$k%F!<%V%k$O%G%U%)(B
-$B%k%H$G$O(B 10$BK|%?%W%k$N%G!<%?$r4^$_$^$9!#(B
-
-$B<B:]$NI=<($O0J2<$N$h$&$J46$8$G$9!#(B
-
-number of clients: 4
-number of transactions per client: 100
-number of processed transactions: 400/400
-tps = 19.875015(including connections establishing)
-tps = 20.098827(excluding connections establishing)
-
-pgbench $B$O(B JDBCBench $B$H$$$&!"$b$H$b$H$O(B MySQL $BMQ$K=q$+$l$?(B JDBC $BMQ$N%Y(B
-$B%s%A%^!<%/%W%m%0%i%`$r;29M$K:n@.$5$l$^$7$?!#(B
-
-$B"#(Bpgbench $B$NFCD'(B
-
-o C $B8@8l$H(B libpq $B$@$1$G=q$+$l$F$$$k$N$G0\?"@-$,9b$/!"4JC1$K%$%s%9%H!<(B
-$B%k$G$-$^$9!#(B
-
-o pgbench $B$O(B libpq $B$NHsF14|=hM}5!G=$r;H$C$F%^%k%A%f!<%64D6-$r%7%_%e%l!<(B
-$B%H$7$^$9!#MF0W$KF1;~@\B34D6-$r%F%9%H$G$-$^$9!#(B
-
-$B"#(Bpgbench $B$N%$%s%9%H!<%k(B
-
-Makefile$B$N0lHV>e$K$"$k(B
-
-	POSTGRESHOME = /usr/local/pgsql
-
-$B$rI,MW$K1~$8$F=$@5$7!"(Bconfigure;make $B$9$k$@$1$G$9!#(B
-
-$B"#(Bpgbench $B$N;H$$J}(B
-
-$B4pK\E*$J;H$$J}$O!"(B
-
-$ pgbench [$B%G!<%?%Y!<%9L>(B]
-
-$B$G$9!#%G!<%?%Y!<%9L>$r>JN,$9$k$H!"%f!<%6L>$HF1$8%G!<%?%Y!<%9$r;XDj$7$?(B
-$B$b$N$H$_$J$7$^$9!#%G!<%?%Y!<%9$O8e=R$N(B -i $B%*%W%7%g%s$r;H$C$F$"$i$+$8$a(B
-$B=i4|2=$7$F$*$/I,MW$,$"$j$^$9!#(B
-
-pgbench $B$K$O$$$m$$$m$J%*%W%7%g%s$,$"$j$^$9!#(B
-
--h $B%[%9%HL>(B	PostgreSQL$B$N%G!<%?%Y!<%9%G!<%b%s(B postmaster $B$NF0(B
-		$B$$$F$$$k%[%9%HL>$r;XDj$7$^$9!#>JN,$9$k$H<+%[%9%H$K(B Unix domain
-		socket $B$G@\B3$7$^$9!#(B
-
--p $B%]!<%HHV9f(B	postmaster $B$N;HMQ$9$k%]!<%HHV9f$r;XDj$7$^$9!#>JN,$9$k$H(B 5432
-		$B$,;XDj$5$l$?$b$N$H$_$J$7$^$9!#(B
-
--c $B%/%i%$%"%s%H?t(B	$BF1;~<B9T%/%i%$%"%s%H?t$r;XDj$7$^$9!#>JN,;~$O(B
-			1 $B$H$J$j$^$9!#(Bpgbench $B$OF1;~<B9T%/%i%$%"%s%HKh$K(B
-			$B%U%!%$%k%G%#%9%/%j%W%?$r;HMQ$9$k$N$G!";HMQ2DG=(B
-			$B%U%!%$%k%G%#%9%/%j%W%??t$r1[$($k%/%i%$%"%s%H?t$O(B
-			$B;XDj$G$-$^$;$s!#;HMQ2DG=%U%!%$%k%G%#%9%/%j%W%??t(B
-			$B$O(B limit $B$d(B ulimit $B%3%^%s%I$GCN$k$3$H$,$G$-$^$9!#(B
-
--t $B%H%i%s%6%/%7%g%s?t(B	$B3F%/%i%$%"%s%H$,<B9T$9$k%H%i%s%6%/%7%g%s?t$r(B
-			$B;XDj$7$^$9!#>JN,;~$O(B 10 $B$H$J$j$^$9!#(B
-
--s $B%9%1!<%j%s%0%U%!%/%?!<(B
-
-		-i $B%*%W%7%g%s$H0l=o$K;HMQ$7$^$9!#(B
-		$B%9%1!<%j%s%0%U%!%/%?!<$O(B1$B0J>e$N@0?t!#%9%1!<%j%s%0%U%!(B
-		$B%/%?!<$rJQ$($k$3$H$K$h$j!"%F%9%H$NBP>]$H$J$k%F!<%V%k$N(B
-		$BBg$-$5$,(B 10$BK|(B x [$B%9%1!<%j%s%0%U%!%/%?!<(B]$B$K$J$j$^$9!#(B
-		$B%G%U%)%k%H$N%9%1!<%j%s%0%U%!%/%?!<$O(B 1 $B$G$9!#(B
-
--v		$B$3$N%*%W%7%g%s$r;XDj$9$k$H!"%Y%s%A%^!<%/3+;OA0$K(B vacuum $B$H(B
-		history $B$N%/%j%"$r9T$J$$$^$9!#(B-v $B$H(B -n $B$r>JN,$9$k$H!"(B
-		$B:G>.8B$N(B vacuum $B$J$I$r9T$$$^$9!#$9$J$o$A!"(Bhistory $B$N:o=|!"(B
-		$B$H(B history, branches, history $B$N(B vacuum $B$r9T$$$^$9!#(B
-		$B$3$l$O!"(Bvacuum $B$N;~4V$r:G>.8B$K$7$J$,$i!"%Q%U%)!<%^%s%9$K(B
-		$B1F6A$9$k%4%_A]=|$r8z2LE*$K9T$$$^$9!#DL>o$O(B -v $B$H(B -n $B$r(B
-		$B>JN,$9$k$3$H$r$*$9$9$a$7$^$9!#(B
-
--n		$B$3$N%*%W%7%g%s$r;XDj$9$k$H!"%Y%s%A%^!<%/3+;OA0$K(B vacuum $B$H(B
-		history $B$N%/%j%"$r9T$J$$$^$;$s!#(B
-
--S		TPC-B$B$N%H%i%s%6%/%7%g%s$G$O$J$/!"8!:w$N$_$N%H%i%s%6%/%7%g%s$r(B
-		$B<B9T$7$^$9!#8!:w%9%T!<%I$rB,Dj$7$?$$$H$-$K;H$$$^$9!#(B
-
--d		$B%G%P%C%0%*%W%7%g%s!#MM!9$J>pJs$,I=<($5$l$^$9!#(B
-
-$B"#%G!<%?%Y!<%9$N=i4|2=(B
-
-pgbench $B$G%Y%s%A%^!<%/%F%9%H$r<B;\$9$k$?$a$K$O!"$"$i$+$8$a%G!<%?%Y!<%9(B
-$B$r=i4|2=$7!"%F%9%H%G!<%?$r:n$kI,MW$,$"$j$^$9!#(B
-
-$ pgbench -i [$B%G!<%?%Y!<%9L>(B]
-
-$B$3$l$K$h$j0J2<$N%F!<%V%k$,:n$i$l$^$9(B($B%9%1!<%j%s%0%U%!%/%?!<(B == 1 $B$N>l9g(B)$B!#(B
-
-$B!vCm0U!v(B
-$BF1$8L>A0$N%F!<%V%k$,$"$k$H:o=|$5$l$F$7$^$&$N$G$4Cm0U2<$5$$!*!*(B
-
-$B%F!<%V%kL>(B	$B%?%W%k?t(B
--------------------------
-branches	1
-tellers		10
-accounts	100000
-history		0
-
-$B%9%1!<%j%s%0%U%!%/%?!<$r(B 10,100,1000 $B$J$I$KJQ99$9$k$H!">e5-%?%W%k?t$O(B
-$B$=$l$K1~$8$F(B10$BG\!"(B100$BG\!"(B1000$BG\$K$J$j$^$9!#$?$H$($P!"%9%1!<%j%s%0%U%!(B
-$B%/%?!<$r(B 10 $B$H$9$k$H!"(B
-
-$B%F!<%V%kL>(B	$B%?%W%k?t(B
--------------------------
-branches	10
-tellers		100
-accounts	1000000
-history		0
-
-$B$K$J$j$^$9!#(B
-
-$B"#!V%H%i%s%6%/%7%g%s!W$NDj5A(B
-
-pgbench $B$G$O!"0J2<$N%7!<%1%s%9$rA4It40N;$7$F(B1$B%H%i%s%6%/%7%g%s$H?t$($F(B
-$B$$$^$9!#(B
-
-(1) begin;
-
-(2) update accounts set abalance = abalance + :delta where aid = :aid;
-    $B$3$3$G!"(B:delta$B$O(B1$B$+$i(B1000$B$^$G$NCM$r<h$kMp?t!"(B:aid $B$O(B 1$B$+$i(B100000$B$^$G(B
-    $B$NCM$r<h$kMp?t$G$9!#0J2<!"Mp?t$NCM$O$=$l$>$l$3$N%H%i%s%6%/%7%g%s$N(B
-    $BCf$G$OF1$8CM$r;H$$$^$9!#(B
-
-(3) select abalance from accounts where aid = :aid;
-    $B$3$3$G$O(B1$B7o$@$18!:w$5$l$^$9!#(B
-
-(4) update tellers set tbalance = tbalance + :delta where tid = :tid;
-    $B$3$3$G(B :tid $B$O(B 1$B$+$i(B10$B$N4V$NCM$r$H$kMp?t$G$9!#(B
-
-(5) update branches set bbalance = bbalance + :delta where bid = :bid;
-    $B$3$3$G(B :bid $B$O(B 1 $B$+$i(B[$B%9%1%j%s%0%U%!%/%?!<(B]$B$N4V$NCM$r<h$kMp?t$G$9!#(B
-
-(6) insert into history(tid,bid,aid,delta) values(:tid,:bid,:aid,:delta);
-
-(7) end;
-
-$B"#:n<T$H%i%$%;%s%9>r7o(B
-
-pgbench $B$O@P0f(B $BC#IW$K$h$C$F=q$+$l$^$7$?!#%i%$%;%s%9>r7o$O(B pgbench.c $B$N(B
-$BKAF,$K=q$$$F$"$j$^$9!#$3$N>r7o$r<i$k8B$jL5=~$GMxMQ$7!"$^$?<+M3$K:FG[IU(B
-$B$G$-$^$9!#(B
-
-$B"#2~DjMzNr(B
-
-2000/1/15 pgbench-1.2 $B$O(B PostgreSQL $B$K(B contribute $B$5$l$^$7$?!#(B
-	* -v $B%*%W%7%g%sDI2C(B
-
-1999/09/29 pgbench-1.1 $B%j%j!<%9(B
-	* $BC+ED$5$s$K$h$k(Bcygwin$BBP1~%Q%C%A<h$j9~$_(B
-	* $B%P%C%/%(%s%I%/%i%C%7%e;~$NBP1~(B
-	* -S $B%*%W%7%g%sDI2C(B
-
-1999/09/04 pgbench-1.0 $B%j%j!<%9(B
diff --git a/contrib/soundex/Makefile b/contrib/soundex/Makefile
index bf9a0bf7bbb4b358256e4d5bc04708f10ead0936..55e61b3ede37f506e183e8ecb38e0f97dfd44675 100644
--- a/contrib/soundex/Makefile
+++ b/contrib/soundex/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/soundex/Attic/Makefile,v 1.4 2000/06/16 18:59:13 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/soundex/Attic/Makefile,v 1.5 2000/06/19 13:54:11 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= soundex
 
 PROGRAM	= 
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= $(NAME).sql
 BINS	=
 EXAMPLES=
@@ -30,7 +30,7 @@ install: install_doc install_sql install_mod
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/soundex/README b/contrib/soundex/README
index e3ba4ee23178f9ad17fe7fd33e7ea062d1372a4d..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/soundex/README
+++ b/contrib/soundex/README
@@ -1,53 +0,0 @@
-
-SELECT text_soundex('hello world!');
-
-CREATE TABLE s (nm text)\g
-
-insert into s values ('john')\g
-insert into s values ('joan')\g
-insert into s values ('wobbly')\g
-
-select * from s
-where text_soundex(nm) = text_soundex('john')\g
-
-select nm from s a, s b
-where text_soundex(a.nm) = text_soundex(b.nm)
-and a.oid <> b.oid\g
-
-CREATE FUNCTION text_sx_eq(text, text) RETURNS bool AS
-'select text_soundex($1) = text_soundex($2)'
-LANGUAGE 'sql'\g
-
-CREATE FUNCTION text_sx_lt(text,text) RETURNS bool AS
-'select text_soundex($1) < text_soundex($2)'
-LANGUAGE 'sql'\g
-
-CREATE FUNCTION text_sx_gt(text,text) RETURNS bool AS
-'select text_soundex($1) > text_soundex($2)'
-LANGUAGE 'sql';
-
-CREATE FUNCTION text_sx_le(text,text) RETURNS bool AS
-'select text_soundex($1) <= text_soundex($2)'
-LANGUAGE 'sql';
-
-CREATE FUNCTION text_sx_ge(text,text) RETURNS bool AS
-'select text_soundex($1) >= text_soundex($2)'
-LANGUAGE 'sql';
-
-CREATE FUNCTION text_sx_ne(text,text) RETURNS bool AS
-'select text_soundex($1) <> text_soundex($2)'
-LANGUAGE 'sql';
-
-DROP OPERATOR #= (text,text)\g
-
-CREATE OPERATOR #= (leftarg=text, rightarg=text, procedure=text_sx_eq,
-commutator=text_sx_eq)\g
-
-SELECT *
-FROM s
-WHERE text_sx_eq(nm,'john')\g
-
-SELECT *
-from s
-where s.nm #= 'john';
-
diff --git a/contrib/spi/Makefile b/contrib/spi/Makefile
index 1922d4be684cbe5a6c45bf88b30c84e241843cfe..43105293e4da76b50ca65fe41485f8703f5ed948 100644
--- a/contrib/spi/Makefile
+++ b/contrib/spi/Makefile
@@ -1,19 +1,19 @@
 #
-# $Header: /cvsroot/pgsql/contrib/spi/Makefile,v 1.14 2000/06/16 18:59:17 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/spi/Makefile,v 1.15 2000/06/19 13:54:15 momjian Exp $
 #
 
 TOPDIR=../..
 
 include ../Makefile.global
 
-NAME	= spi
+NAME	= 
 
 PROGRAM	= 
 OBJS	= autoinc.o insert_username.o moddatetime.o refint.o timetravel.o
-DOCS	= README
+DOCS	= README.spi
 SQLS	= $(OBJS:.o=.sql)
 BINS	=
-EXAMPLES= $(OBJS:.o=.example)
+EXAMPLES= $(OBJS:.o=.example) new_example.example
 MODS	= $(OBJS:.o=$(DLSUFFIX))
 
 CFLAGS += -I. $(CFLAGS_SL)
@@ -33,7 +33,7 @@ install: install_doc install_sql install_mod install_example
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/spi/README b/contrib/spi/README
index 65868f0fc7af1e8dfdc0ac34bb01a0ef497eb92e..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/spi/README
+++ b/contrib/spi/README
@@ -1,104 +0,0 @@
-
-Here are general trigger functions provided as workable examples
-of using SPI and triggers. "General" means that functions may be
-used for defining triggers for any tables but you have to specify
-table/field names (as described below) while creating a trigger.
-
-1. refint.c - functions for implementing referential integrity.
-
-check_primary_key () is to used for foreign keys of a table.
-   
-   You are to create trigger (BEFORE INSERT OR UPDATE) using this 
-function on a table referencing another table. You are to specify
-as function arguments: triggered table column names which correspond
-to foreign key, referenced table name and column names in referenced
-table which correspond to primary/unique key.
-   You may create as many triggers as you need - one trigger for
-one reference.
-
-check_foreign_key () is to used for primary/unique keys of a table.
-
-   You are to create trigger (BEFORE DELETE OR UPDATE) using this
-function on a table referenced by another table(s). You are to specify
-as function arguments: number of references for which function has to
-performe checking, action if referencing key found ('cascade' - to delete
-corresponding foreign key, 'restrict' - to abort transaction if foreign keys 
-exist, 'setnull' - to set foreign key referencing primary/unique key
-being deleted to null), triggered table column names which correspond
-to primary/unique key, referencing table name and column names corresponding
-to foreign key (, ... - as many referencing tables/keys as specified
-by first argument).
-   Note, that NOT NULL constraint and unique index have to be defined by
-youself.
-
-   There are examples in refint.example and regression tests
-(sql/triggers.sql).
-
-   To CREATE FUNCTIONs use refint.sql (will be made by gmake from
-refint.source).
-
-
-2. timetravel.c - functions for implementing time travel feature.
-
-   Old internally supported time-travel (TT) used insert/delete
-transaction commit times. To get the same feature using triggers
-you are to add to a table two columns of abstime type to store
-date when a tuple was inserted (start_date) and changed/deleted 
-(stop_date):
-
-CREATE TABLE XXX (
-	...		...
-	date_on		abstime default currabstime(),
-	date_off	abstime default 'infinity'
-	...		...
-);
-
-- so, tuples being inserted with NULLs in date_on/date_off will get
-_current_date_ in date_on (name of start_date column in XXX) and INFINITY in
-date_off (name of stop_date column in XXX).
-
-   Tuples with stop_date equal INFINITY are "valid now": when trigger will
-be fired for UPDATE/DELETE of a tuple with stop_date NOT equal INFINITY then
-this tuple will not be changed/deleted!
-
-   If stop_date equal INFINITY then on
-
-UPDATE: only stop_date in tuple being updated will be changed to current
-date and new tuple with new data (coming from SET ... in UPDATE) will be
-inserted. Start_date in this new tuple will be setted to current date and
-stop_date - to INFINITY.
-
-DELETE: new tuple will be inserted with stop_date setted to current date
-(and with the same data in other columns as in tuple being deleted).
-
-   NOTE:
-1. To get tuples "valid now" you are to add _stop_date_ = 'infinity'
-   to WHERE. Internally supported TT allowed to avoid this...
-   Fixed rewriting RULEs could help here...
-   As work arround you may use VIEWs...
-2. You can't change start/stop date columns with UPDATE! 
-   Use set_timetravel (below) if you need in this.
-
-   FUNCTIONs:
-
-timetravel() is general trigger function.
-
-   You are to create trigger BEFORE (!!!) UPDATE OR DELETE using this
-function on a time-traveled table. You are to specify two arguments: name of
-start_date column and name of stop_date column in triggered table.
-
-currabstime() may be used in DEFAULT for start_date column to get
-current date.
-
-set_timetravel() allows you turn time-travel ON/OFF for a table:
-
-   set_timetravel('XXX', 1) will turn TT ON for table XXX (and report
-old status).
-   set_timetravel('XXX', 0) will turn TT OFF for table XXX (-"-).
-
-Turning TT OFF allows you do with a table ALL what you want.
-
-   There is example in timetravel.example.
-
-   To CREATE FUNCTIONs use timetravel.sql (will be made by gmake from
-timetravel.source).
diff --git a/contrib/spi/autoinc.source b/contrib/spi/autoinc.source
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/spi/insert_username.source b/contrib/spi/insert_username.source
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/spi/moddatetime.source b/contrib/spi/moddatetime.source
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/spi/new_example.sql b/contrib/spi/new_example.sql
index 02049078941c7d9ac88ae8175e863221c5e4f0e9..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/spi/new_example.sql
+++ b/contrib/spi/new_example.sql
@@ -1,68 +0,0 @@
---Column ID of table A is primary key:
-
-CREATE TABLE A (
-	ID	int4 not null,
-	id1	int4 not null,
-primary key (ID,ID1)
-);
-
---Columns REFB of table B and REFC of C are foreign keys referenting ID of A:
-
-CREATE TABLE B (
-	REFB	int4,
-	REFB1	INT4
-);
-CREATE INDEX BI ON B (REFB);
-
-CREATE TABLE C (
-	REFC	int4,
-	REFC1	int4
-);
-CREATE INDEX CI ON C (REFC);
-
---Trigger for table A:
-
-CREATE TRIGGER AT BEFORE DELETE  ON A FOR EACH ROW
-EXECUTE PROCEDURE 
-check_foreign_key (2, 'cascade', 'ID','id1', 'B', 'REFB','REFB1', 'C', 'REFC','REFC1');
-
-
-CREATE TRIGGER AT1  AFTER UPDATE  ON A FOR EACH ROW
-EXECUTE PROCEDURE 
-check_foreign_key (2, 'cascade', 'ID','id1', 'B', 'REFB','REFB1', 'C', 'REFC','REFC1');
-
-
-CREATE TRIGGER BT BEFORE INSERT OR UPDATE ON B FOR EACH ROW
-EXECUTE PROCEDURE 
-check_primary_key ('REFB','REFB1', 'A', 'ID','ID1');
-
-CREATE TRIGGER CT BEFORE INSERT OR UPDATE ON C FOR EACH ROW
-EXECUTE PROCEDURE 
-check_primary_key ('REFC','REFC1', 'A', 'ID','ID1');
-
-
-
--- Now try
-
-INSERT INTO A VALUES (10,10);
-INSERT INTO A VALUES (20,20);
-INSERT INTO A VALUES (30,30);
-INSERT INTO A VALUES (40,41);
-INSERT INTO A VALUES (50,50);
-
-INSERT INTO B VALUES (1);	-- invalid reference
-INSERT INTO B VALUES (10,10);
-INSERT INTO B VALUES (30,30);
-INSERT INTO B VALUES (30,30);
-
-INSERT INTO C VALUES (11);	-- invalid reference
-INSERT INTO C VALUES (20,20);
-INSERT INTO C VALUES (20,21);
-INSERT INTO C VALUES (30,30);
-
--- now update work well 
-update  A set ID = 100 , ID1 = 199 where ID=30 ;
-
-SELECT * FROM A;
-SELECT * FROM B;
-SELECT * FROM C;
diff --git a/contrib/spi/refint.source b/contrib/spi/refint.source
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/spi/timetravel.source b/contrib/spi/timetravel.source
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/contrib/string/Makefile b/contrib/string/Makefile
index 9c727b638a68cc04b1ba3079581bf26abc1583a6..14abadc31a24c9580ccbb11dfec4af19107f7c9a 100644
--- a/contrib/string/Makefile
+++ b/contrib/string/Makefile
@@ -1,5 +1,5 @@
  #
-# $Header: /cvsroot/pgsql/contrib/string/Attic/Makefile,v 1.9 2000/06/16 18:59:21 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/string/Attic/Makefile,v 1.10 2000/06/19 13:54:20 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= string_io
 
 PROGRAM	= 
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= $(NAME).sql
 BINS	=
 EXAMPLES=
@@ -30,7 +30,7 @@ install: install_doc install_sql install_mod
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/string/README b/contrib/string/README
index 4b4d10166f82d72151dce3687e5e483141a0f20e..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/string/README
+++ b/contrib/string/README
@@ -1,23 +0,0 @@
-String io module for postgresql.
-Copyright (C) 1999, Massimo Dal Zotto <dz@cs.unitn.it>
-
-This software is distributed under the GNU General Public License
-either version 2, or (at your option) any later version.
-
-
-These output functions can be used as substitution of the standard text
-output functions to get the value of text fields printed in the format
-used for C strings. This allows the output of queries or the exported
-files to be processed more easily using standard unix filter programs
-like perl or awk.
-
-If you use the standard functions instead you could find a single tuple
-splitted into many lines and the tabs embedded in the values could be
-confused with those used as field delimters.
-
-My function translates all non-printing characters into corresponding
-esacape sequences as defined by the C syntax. All you need to reconstruct
-the exact value in your application is a corresponding unescape function
-like the string_input defined in the source code.
-
-Massimo Dal Zotto <dz@cs.unitn.it>
diff --git a/contrib/tips/Makefile b/contrib/tips/Makefile
index f21212d97c63c9c6a956bd2ed4acfe109c577efa..54d5e19f28093b3e3847c97f75e290a8e750a106 100644
--- a/contrib/tips/Makefile
+++ b/contrib/tips/Makefile
@@ -1,16 +1,16 @@
 #
-# $Header: /cvsroot/pgsql/contrib/tips/Attic/Makefile,v 1.2 2000/06/16 18:59:25 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/tips/Attic/Makefile,v 1.3 2000/06/19 13:54:31 momjian Exp $
 #
 
 TOPDIR=../..
 
 include ../Makefile.global
 
-NAME	= apachelog
+NAME	= 
 
 PROGRAM	= 
 OBJS	= 
-DOCS	= README
+DOCS	= README.apachelog
 SQLS	= 
 BINS	=
 EXAMPLES=
@@ -21,8 +21,9 @@ all::
 install: install_doc 
 
 install_doc:
+	$(TOPDIR)/config/mkinstalldirs $(CONTRIB_DOCDIR)/tips
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/tips; \
 	done
 
 clean:
diff --git a/contrib/apache_logging/README b/contrib/tips/README.apachelog
similarity index 71%
rename from contrib/apache_logging/README
rename to contrib/tips/README.apachelog
index c89a6891e74e1975c00cf52d246b58ef4fd1b8d5..8e9096241ea04d8240b607f57899a5797dcab932 100644
--- a/contrib/apache_logging/README
+++ b/contrib/tips/README.apachelog
@@ -1,4 +1,10 @@
+
 HOW TO get Apache to log to PostgreSQL
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Note: contain of files 'httpconf.txt' and 'apachelog.sql' are below this
+       text. 
+
 
 First, this is intended mostly as a starting point, an example of how to do it.
 
@@ -59,3 +65,27 @@ http://www.terrym.com
 
 Have fun ... and remember, this is mostly just intended as a stating point, 
 not as a finished idea.
+
+--- apachelog.sql : ---
+
+drop table access;
+CREATE TABLE access (host char(200), ident char(200), authuser char(200), accdate datetime, request char(500), ttime int2, status int2, bytes int4) archive = none;
+grant all on access to nobody;
+
+--- httpconf.txt: ---
+
+# This is mostly the same as the default, except for no square brakets around
+# the time or the extra timezone info, also added the download time, 3rd from
+# the end, number of seconds.
+
+LogFormat "insert into access values ( '%h', '%l', '%u', '%{%d/%b/%Y:%H:%M:%S}t', '%r', %T, %s, %b );"
+
+# The above format ALMOST eleminates the need to use sed, except that I noticed
+# that when a frameset page is called, then the bytes transfered is '-', which
+# will choke the insert, so replaced it with '-1'.
+
+TransferLog '| su -c "sed \"s/, - );$/, -1 );/\" | /usr/local/pgsql/bin/psql www_log" nobody'
+
+
+
+
diff --git a/contrib/tools/Makefile b/contrib/tools/Makefile
index 9b7d8a29f39c30b3cda5ff380ebe0c2485a70015..ecf8ac3167b565319f963f0e5254973c92d0bfbd 100644
--- a/contrib/tools/Makefile
+++ b/contrib/tools/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/tools/Attic/Makefile,v 1.2 2000/06/15 18:55:22 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/tools/Attic/Makefile,v 1.3 2000/06/19 13:54:37 momjian Exp $
 #
 
 TOPDIR=../..
diff --git a/contrib/unixdate/Makefile b/contrib/unixdate/Makefile
index 7481202998677be79f54a13914581326b4a2457c..137f15493cee6663840877de5dc618da409b1918 100644
--- a/contrib/unixdate/Makefile
+++ b/contrib/unixdate/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/unixdate/Attic/Makefile,v 1.1 2000/06/15 19:05:22 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/unixdate/Attic/Makefile,v 1.2 2000/06/19 13:54:38 momjian Exp $
 #
 
 TOPDIR=../..
diff --git a/contrib/userlock/Makefile b/contrib/userlock/Makefile
index cd97f933666967c02b0bdf13f0bab4a6244863f1..1265e66c4c5d6b65d20b6e387c48818f07519f5e 100644
--- a/contrib/userlock/Makefile
+++ b/contrib/userlock/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/userlock/Attic/Makefile,v 1.9 2000/06/16 18:59:28 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/userlock/Attic/Makefile,v 1.10 2000/06/19 13:54:44 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= user_locks
 
 PROGRAM	= 
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= $(NAME).sql
 BINS	=
 EXAMPLES=
@@ -29,7 +29,7 @@ install: install_doc install_sql install_mod
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_sql:
diff --git a/contrib/userlock/README b/contrib/userlock/README
index 4c923a46577265f16fe228a8f9c521250d966958..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/userlock/README
+++ b/contrib/userlock/README
@@ -1,55 +0,0 @@
-User locks, by Massimo Dal Zotto <dz@cs.unitn.it>
-Copyright (C) 1999, Massimo Dal Zotto <dz@cs.unitn.it>
-
-This software is distributed under the GNU General Public License
-either version 2, or (at your option) any later version.
-
-
-This loadable module, together with my user-lock.patch applied to the
-backend, provides support for user-level long-term cooperative locks.
-For example one can write:
-
-  select some_fields, user_write_lock_oid(oid) from table where id='key';
-
-Now if the returned user_write_lock_oid field is 1 you have acquired an
-user lock on the oid of the selected tuple and can now do some long operation
-on it, like let the data being edited by the user.
-If it is 0 it means that the lock has been already acquired by some other
-process and you should not use that item until the other has finished.
-Note that in this case the query returns 0 immediately without waiting on
-the lock. This is good if the lock is held for long time.
-After you have finished your work on that item you can do:
-
-  update table set some_fields where id='key';
-  select user_write_unlock_oid(oid) from table where id='key';
-
-You can also ignore the failure and go ahead but this could produce conflicts
-or inconsistent data in your application. User locks require a cooperative
-behavior between users. User locks don't interfere with the normal locks
-used by postgres for transaction processing.
-
-This could also be done by setting a flag in the record itself but in
-this case you have the overhead of the updates to the records and there
-could be some locks not released if the backend or the application crashes
-before resetting the lock flag.
-It could also be done with a begin/end block but in this case the entire
-table would be locked by postgres and it is not acceptable to do this for
-a long period because other transactions would block completely.
-
-The generic user locks use two values, group and id, to identify a lock,
-which correspond to ip_posid and ip_blkid of an ItemPointerData.
-Group is a 16 bit value while id is a 32 bit integer which could also be
-an oid. The oid user lock functions, which take only an oid as argument,
-use a group equal to 0.
-
-The meaning of group and id is defined by the application. The user
-lock code just takes two numbers and tells you if the corresponding
-entity has been succesfully locked. What this mean is up to you.
-
-My succestion is that you use the group to identify an area of your
-application and the id to identify an object in this area.
-Or you can just lock the oid of the tuples which are by definition unique.
-
-Note also that a process can acquire more than one lock on the same entity
-and it must release the lock the corresponding number of times. This can
-be done calling the unlock funtion until it returns 0.
diff --git a/contrib/vacuumlo/Makefile b/contrib/vacuumlo/Makefile
index 88fd8cfb7e06a71753bd7f6ee40c06a40c7f8f7f..2c56e542fcdbfe5b8c2dc2e61ff6483be2e4beab 100644
--- a/contrib/vacuumlo/Makefile
+++ b/contrib/vacuumlo/Makefile
@@ -1,5 +1,5 @@
 #
-# $Header: /cvsroot/pgsql/contrib/vacuumlo/Makefile,v 1.4 2000/06/16 18:59:30 momjian Exp $
+# $Header: /cvsroot/pgsql/contrib/vacuumlo/Makefile,v 1.5 2000/06/19 13:54:50 momjian Exp $
 #
 
 TOPDIR=../..
@@ -10,7 +10,7 @@ NAME	= vacuumlo
 
 PROGRAM	= $(NAME)
 OBJS	= $(NAME).o
-DOCS	= README
+DOCS	= README.$(NAME)
 SQLS	= 
 BINS    = $(PROGRAM)
 EXAMPLES=
@@ -29,7 +29,7 @@ install: install_doc install_bin
 
 install_doc:
 	for inst_file in $(DOCS); do \
-		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR)/$(DOCS).$(NAME); \
+		$(INSTALL) $(INSTL_LIB_OPTS) $$inst_file $(CONTRIB_DOCDIR); \
 	done
 
 install_bin:
diff --git a/contrib/vacuumlo/README b/contrib/vacuumlo/README
index 2c3c93de154a4631cf7d86364f157845808fb8eb..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/contrib/vacuumlo/README
+++ b/contrib/vacuumlo/README
@@ -1,38 +0,0 @@
-$Header: /cvsroot/pgsql/contrib/vacuumlo/Attic/README,v 1.1 1999/04/10 16:48:04 peter Exp $
-
-This is a simple utility that will remove any orphaned large objects out of a
-PostgreSQL database.
-
-Compiling
---------
-
-Simply run make. A single executable "vacuumlo" is created.
-
-Useage
-------
-
-vacuumlo [-v] database [db2 ... dbn]
-
-The -v flag outputs some progress messages to stdout.
-
-Method
-------
-
-First, it builds a temporary table which contains all of the oid's of the
-large objects in that database.
-
-It then scans through any columns in the database that are of type 'oid', and
-removes any entries from the temporary table.
-
-Finally, it runs through the first table, and removes from the second table, any
-oid's it finds. What is left are the orphans, and these are removed.
-
-I decided to place this in contrib as it needs further testing, but hopefully,
-this (or a variant of it) would make it into the backed as a "vacuum lo" command
-in a later release.
-
-Peter Mount <peter@retep.org.uk>
-http://www.retep.org.uk
-March 21 1999
-
-Committed April 10 1999 Peter
diff --git a/contrib/vacuumlo/vacuumlo.c b/contrib/vacuumlo/vacuumlo.c
index ac41d551c3a8bb695d4634ddc8194668daa9dd51..3f2c592c091284630083c8d78ad03dff11aa8b62 100644
--- a/contrib/vacuumlo/vacuumlo.c
+++ b/contrib/vacuumlo/vacuumlo.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.4 2000/06/15 18:55:31 momjian Exp $
+ *	  $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.5 2000/06/19 13:54:50 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */