diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c
index 61406db634245be15e29101003256b842b3348bf..ffc1a4a10633a6b41903abe849963b4931eb78bf 100644
--- a/contrib/auto_explain/auto_explain.c
+++ b/contrib/auto_explain/auto_explain.c
@@ -6,7 +6,7 @@
  * Copyright (c) 2008-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/contrib/auto_explain/auto_explain.c,v 1.13 2010/02/16 22:19:59 adunstan Exp $
+ *	  $PostgreSQL: pgsql/contrib/auto_explain/auto_explain.c,v 1.14 2010/02/26 02:00:31 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -27,11 +27,11 @@ static int	auto_explain_log_format = EXPLAIN_FORMAT_TEXT;
 static bool auto_explain_log_nested_statements = false;
 
 static const struct config_enum_entry format_options[] = {
-        {"text", EXPLAIN_FORMAT_TEXT, false},
-        {"xml", EXPLAIN_FORMAT_XML, false},
-        {"json", EXPLAIN_FORMAT_JSON, false},
-        {"yaml", EXPLAIN_FORMAT_YAML, false},
-        {NULL, 0, false}
+	{"text", EXPLAIN_FORMAT_TEXT, false},
+	{"xml", EXPLAIN_FORMAT_XML, false},
+	{"json", EXPLAIN_FORMAT_JSON, false},
+	{"yaml", EXPLAIN_FORMAT_YAML, false},
+	{NULL, 0, false}
 };
 
 /* Current nesting depth of ExecutorRun calls */
@@ -231,7 +231,7 @@ explain_ExecutorEnd(QueryDesc *queryDesc)
 		msec = queryDesc->totaltime->total * 1000.0;
 		if (msec >= auto_explain_log_min_duration)
 		{
-			ExplainState	es;
+			ExplainState es;
 
 			ExplainInitState(&es);
 			es.analyze = (queryDesc->instrument_options && auto_explain_log_analyze);
@@ -257,7 +257,7 @@ explain_ExecutorEnd(QueryDesc *queryDesc)
 			ereport(LOG,
 					(errmsg("duration: %.3f ms  plan:\n%s",
 							msec, es.str->data),
-						errhidestmt(true)));
+					 errhidestmt(true)));
 
 			pfree(es.str->data);
 		}
diff --git a/contrib/btree_gist/btree_cash.c b/contrib/btree_gist/btree_cash.c
index 852ba3718bcbaaf80e70e58888599c633abf9171..a1efde614684c029c4c44b51aed252e7a0c08969 100644
--- a/contrib/btree_gist/btree_cash.c
+++ b/contrib/btree_gist/btree_cash.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_cash.c,v 1.10 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_cash.c,v 1.11 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -57,8 +57,8 @@ gbt_cashlt(const void *a, const void *b)
 static int
 gbt_cashkey_cmp(const void *a, const void *b)
 {
-    cashKEY    *ia = (cashKEY*)(((Nsrt *) a)->t);
-	cashKEY    *ib = (cashKEY*)(((Nsrt *) b)->t);
+	cashKEY    *ia = (cashKEY *) (((Nsrt *) a)->t);
+	cashKEY    *ib = (cashKEY *) (((Nsrt *) b)->t);
 
 	if (ia->lower == ib->lower)
 	{
diff --git a/contrib/btree_gist/btree_date.c b/contrib/btree_gist/btree_date.c
index b6e4e0b292a9ede8c69c5c20af2497389721a3f3..d8dce91535da6c73c0620ed468356f6831264c75 100644
--- a/contrib/btree_gist/btree_date.c
+++ b/contrib/btree_gist/btree_date.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_date.c,v 1.8 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_date.c,v 1.9 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -73,9 +73,9 @@ gbt_datelt(const void *a, const void *b)
 static int
 gbt_datekey_cmp(const void *a, const void *b)
 {
-	dateKEY *ia = (dateKEY*)(((Nsrt *) a)->t);	
-	dateKEY *ib = (dateKEY*)(((Nsrt *) b)->t);
-	int res;
+	dateKEY    *ia = (dateKEY *) (((Nsrt *) a)->t);
+	dateKEY    *ib = (dateKEY *) (((Nsrt *) b)->t);
+	int			res;
 
 	res = DatumGetInt32(DirectFunctionCall2(date_cmp, DateADTGetDatum(ia->lower), DateADTGetDatum(ib->lower)));
 	if (res == 0)
diff --git a/contrib/btree_gist/btree_float4.c b/contrib/btree_gist/btree_float4.c
index 3246f7f43bd6f987df2d397120412b1793d41b1d..7ece9ea220d44b2a69783cebab3a1c324cc86c43 100644
--- a/contrib/btree_gist/btree_float4.c
+++ b/contrib/btree_gist/btree_float4.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_float4.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_float4.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -56,8 +56,8 @@ gbt_float4lt(const void *a, const void *b)
 static int
 gbt_float4key_cmp(const void *a, const void *b)
 {
-    float4KEY    *ia = (float4KEY*)(((Nsrt *) a)->t);
-	float4KEY    *ib = (float4KEY*)(((Nsrt *) b)->t);
+	float4KEY  *ia = (float4KEY *) (((Nsrt *) a)->t);
+	float4KEY  *ib = (float4KEY *) (((Nsrt *) b)->t);
 
 	if (ia->lower == ib->lower)
 	{
diff --git a/contrib/btree_gist/btree_float8.c b/contrib/btree_gist/btree_float8.c
index 6964d201917449a11c5c60275c387e2ab9fa122e..ab4912883dbd8440d0d34de9b7bdd43fa6a182d7 100644
--- a/contrib/btree_gist/btree_float8.c
+++ b/contrib/btree_gist/btree_float8.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_float8.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_float8.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -57,8 +57,8 @@ gbt_float8lt(const void *a, const void *b)
 static int
 gbt_float8key_cmp(const void *a, const void *b)
 {
-    float8KEY    *ia = (float8KEY*)(((Nsrt *) a)->t);
-	float8KEY    *ib = (float8KEY*)(((Nsrt *) b)->t);
+	float8KEY  *ia = (float8KEY *) (((Nsrt *) a)->t);
+	float8KEY  *ib = (float8KEY *) (((Nsrt *) b)->t);
 
 	if (ia->lower == ib->lower)
 	{
diff --git a/contrib/btree_gist/btree_inet.c b/contrib/btree_gist/btree_inet.c
index a77864abeb5093020edb738d076cc1cfebdd3a31..a8d18c578ba4567702505f27388e26d3b3d2296f 100644
--- a/contrib/btree_gist/btree_inet.c
+++ b/contrib/btree_gist/btree_inet.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_inet.c,v 1.11 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_inet.c,v 1.12 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -60,8 +60,8 @@ gbt_inetlt(const void *a, const void *b)
 static int
 gbt_inetkey_cmp(const void *a, const void *b)
 {
-	inetKEY    *ia = (inetKEY*)(((Nsrt *) a)->t);
-	inetKEY    *ib = (inetKEY*)(((Nsrt *) b)->t);
+	inetKEY    *ia = (inetKEY *) (((Nsrt *) a)->t);
+	inetKEY    *ib = (inetKEY *) (((Nsrt *) b)->t);
 
 	if (ia->lower == ib->lower)
 	{
diff --git a/contrib/btree_gist/btree_int2.c b/contrib/btree_gist/btree_int2.c
index 2aeb94b169db15ffc6fb93de95d35b0b1f0866e7..2be4c40aa8d01814921ee4e6469a940db113dbba 100644
--- a/contrib/btree_gist/btree_int2.c
+++ b/contrib/btree_gist/btree_int2.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_int2.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_int2.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -56,8 +56,8 @@ gbt_int2lt(const void *a, const void *b)
 static int
 gbt_int2key_cmp(const void *a, const void *b)
 {
-    int16KEY    *ia = (int16KEY*)(((Nsrt *) a)->t);
-	int16KEY    *ib = (int16KEY*)(((Nsrt *) b)->t);
+	int16KEY   *ia = (int16KEY *) (((Nsrt *) a)->t);
+	int16KEY   *ib = (int16KEY *) (((Nsrt *) b)->t);
 
 	if (ia->lower == ib->lower)
 	{
diff --git a/contrib/btree_gist/btree_int4.c b/contrib/btree_gist/btree_int4.c
index 12a2c476a4d38fd194de5c842bb4675ebb10202b..aa0d4ac33fb192752d831f58478cd8c381318ab7 100644
--- a/contrib/btree_gist/btree_int4.c
+++ b/contrib/btree_gist/btree_int4.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_int4.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_int4.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -57,8 +57,8 @@ gbt_int4lt(const void *a, const void *b)
 static int
 gbt_int4key_cmp(const void *a, const void *b)
 {
-	int32KEY	*ia = (int32KEY*)(((Nsrt *) a)->t);
-	int32KEY	*ib = (int32KEY*)(((Nsrt *) b)->t);
+	int32KEY   *ia = (int32KEY *) (((Nsrt *) a)->t);
+	int32KEY   *ib = (int32KEY *) (((Nsrt *) b)->t);
 
 	if (ia->lower == ib->lower)
 	{
diff --git a/contrib/btree_gist/btree_int8.c b/contrib/btree_gist/btree_int8.c
index 10b119a004d823c2f384009f6ed9e58bf0500cf7..4cf36a07faa518cd3878e4c1aed692e79af0a818 100644
--- a/contrib/btree_gist/btree_int8.c
+++ b/contrib/btree_gist/btree_int8.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_int8.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_int8.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -57,8 +57,8 @@ gbt_int8lt(const void *a, const void *b)
 static int
 gbt_int8key_cmp(const void *a, const void *b)
 {
-    int64KEY    *ia = (int64KEY*)(((Nsrt *) a)->t);
-	int64KEY    *ib = (int64KEY*)(((Nsrt *) b)->t);
+	int64KEY   *ia = (int64KEY *) (((Nsrt *) a)->t);
+	int64KEY   *ib = (int64KEY *) (((Nsrt *) b)->t);
 
 	if (ia->lower == ib->lower)
 	{
diff --git a/contrib/btree_gist/btree_interval.c b/contrib/btree_gist/btree_interval.c
index 277835074b6b6f6b9efeab4c4170e3b15c13c245..32b9ddbad7402462123335db2fe87e34d3116a84 100644
--- a/contrib/btree_gist/btree_interval.c
+++ b/contrib/btree_gist/btree_interval.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_interval.c,v 1.13 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_interval.c,v 1.14 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -65,9 +65,9 @@ gbt_intvlt(const void *a, const void *b)
 static int
 gbt_intvkey_cmp(const void *a, const void *b)
 {
-    intvKEY *ia = (intvKEY*)(((Nsrt *) a)->t);
-	intvKEY *ib = (intvKEY*)(((Nsrt *) b)->t);
-	int res;
+	intvKEY    *ia = (intvKEY *) (((Nsrt *) a)->t);
+	intvKEY    *ib = (intvKEY *) (((Nsrt *) b)->t);
+	int			res;
 
 	res = DatumGetInt32(DirectFunctionCall2(interval_cmp, IntervalPGetDatum(&ia->lower), IntervalPGetDatum(&ib->lower)));
 	if (res == 0)
diff --git a/contrib/btree_gist/btree_macaddr.c b/contrib/btree_gist/btree_macaddr.c
index 2683e1454cf91cf28c74a6468138160736229a02..60092b4e9e22b9acf2a6d8a1663fe6ff72500018 100644
--- a/contrib/btree_gist/btree_macaddr.c
+++ b/contrib/btree_gist/btree_macaddr.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_macaddr.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_macaddr.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -63,9 +63,9 @@ gbt_macadlt(const void *a, const void *b)
 static int
 gbt_macadkey_cmp(const void *a, const void *b)
 {
-	macKEY *ia = (macKEY*)(((Nsrt *) a)->t);
-	macKEY *ib = (macKEY*)(((Nsrt *) b)->t);
-	int res;
+	macKEY	   *ia = (macKEY *) (((Nsrt *) a)->t);
+	macKEY	   *ib = (macKEY *) (((Nsrt *) b)->t);
+	int			res;
 
 	res = DatumGetInt32(DirectFunctionCall2(macaddr_cmp, MacaddrPGetDatum(&ia->lower), MacaddrPGetDatum(&ib->lower)));
 	if (res == 0)
diff --git a/contrib/btree_gist/btree_oid.c b/contrib/btree_gist/btree_oid.c
index 11327c29be438575566f472fc4449570a7945ca3..96e4be54d4ba501d38fb188934b122271913aa71 100644
--- a/contrib/btree_gist/btree_oid.c
+++ b/contrib/btree_gist/btree_oid.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_oid.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_oid.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -57,8 +57,8 @@ gbt_oidlt(const void *a, const void *b)
 static int
 gbt_oidkey_cmp(const void *a, const void *b)
 {
-    oidKEY    *ia = (oidKEY*)(((Nsrt *) a)->t);
-	oidKEY    *ib = (oidKEY*)(((Nsrt *) b)->t);
+	oidKEY	   *ia = (oidKEY *) (((Nsrt *) a)->t);
+	oidKEY	   *ib = (oidKEY *) (((Nsrt *) b)->t);
 
 	if (ia->lower == ib->lower)
 	{
diff --git a/contrib/btree_gist/btree_time.c b/contrib/btree_gist/btree_time.c
index 25c756588be2de9db98631ceb4e155c135ce9623..8566a8efb7026fe7f9bb7fa64853ceed3074d99a 100644
--- a/contrib/btree_gist/btree_time.c
+++ b/contrib/btree_gist/btree_time.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_time.c,v 1.17 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_time.c,v 1.18 2010/02/26 02:00:31 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -101,9 +101,9 @@ gbt_timelt(const void *a, const void *b)
 static int
 gbt_timekey_cmp(const void *a, const void *b)
 {
-    timeKEY *ia = (timeKEY*)(((Nsrt *) a)->t);
-	timeKEY *ib = (timeKEY*)(((Nsrt *) b)->t);
-	int res;
+	timeKEY    *ia = (timeKEY *) (((Nsrt *) a)->t);
+	timeKEY    *ib = (timeKEY *) (((Nsrt *) b)->t);
+	int			res;
 
 	res = DatumGetInt32(DirectFunctionCall2(time_cmp, TimeADTGetDatumFast(ia->lower), TimeADTGetDatumFast(ib->lower)));
 	if (res == 0)
diff --git a/contrib/btree_gist/btree_ts.c b/contrib/btree_gist/btree_ts.c
index 9b5dfba8cc806dcd4e3adeef706e98ec9047c7c1..543f2129b0bc0d5b2a03e0fb17a7390cb9bfe93c 100644
--- a/contrib/btree_gist/btree_ts.c
+++ b/contrib/btree_gist/btree_ts.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_ts.c,v 1.18 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_ts.c,v 1.19 2010/02/26 02:00:32 momjian Exp $
  */
 #include "btree_gist.h"
 #include "btree_utils_num.h"
@@ -99,9 +99,9 @@ gbt_tslt(const void *a, const void *b)
 static int
 gbt_tskey_cmp(const void *a, const void *b)
 {
-    tsKEY *ia = (tsKEY*)(((Nsrt *) a)->t);
-	tsKEY *ib = (tsKEY*)(((Nsrt *) b)->t);
-	int res;
+	tsKEY	   *ia = (tsKEY *) (((Nsrt *) a)->t);
+	tsKEY	   *ib = (tsKEY *) (((Nsrt *) b)->t);
+	int			res;
 
 	res = DatumGetInt32(DirectFunctionCall2(timestamp_cmp, TimestampGetDatumFast(ia->lower), TimestampGetDatumFast(ib->lower)));
 	if (res == 0)
diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c
index f5f490bf4e95f17c3f0057b0b0e20e4cfba21c06..916706d8a479650532d16963639c179dd43260b8 100644
--- a/contrib/btree_gist/btree_utils_var.c
+++ b/contrib/btree_gist/btree_utils_var.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_utils_var.c,v 1.22 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_utils_var.c,v 1.23 2010/02/26 02:00:32 momjian Exp $
  */
 #include "btree_gist.h"
 
@@ -444,7 +444,7 @@ gbt_vsrt_cmp(const void *a, const void *b, void *arg)
 	GBT_VARKEY_R ar = gbt_var_key_readable(((const Vsrt *) a)->t);
 	GBT_VARKEY_R br = gbt_var_key_readable(((const Vsrt *) b)->t);
 	const gbtree_vinfo *tinfo = (const gbtree_vinfo *) arg;
-	int res;
+	int			res;
 
 	res = (*tinfo->f_cmp) (ar.lower, br.lower);
 	if (res == 0)
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index f9dfba8da6b457ee490bfb8878071a093e0b4437..abe64f3daec40822b0344a2cf363c5acdf55785d 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -8,7 +8,7 @@
  * Darko Prenosil <Darko.Prenosil@finteh.hr>
  * Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
  *
- * $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.90 2010/02/24 05:20:49 itagaki Exp $
+ * $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.91 2010/02/26 02:00:32 momjian Exp $
  * Copyright (c) 2001-2010, PostgreSQL Global Development Group
  * ALL RIGHTS RESERVED;
  *
@@ -101,7 +101,7 @@ static void dblink_security_check(PGconn *conn, remoteConn *rconn);
 static void dblink_res_error(const char *conname, PGresult *res, const char *dblink_context_msg, bool fail);
 static char *get_connect_string(const char *servername);
 static char *escape_param_str(const char *from);
-static int get_nondropped_natts(Oid relid);
+static int	get_nondropped_natts(Oid relid);
 
 /* Global */
 static remoteConn *pconn = NULL;
@@ -506,15 +506,15 @@ PG_FUNCTION_INFO_V1(dblink_fetch);
 Datum
 dblink_fetch(PG_FUNCTION_ARGS)
 {
-	ReturnSetInfo  *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	PGresult	   *res = NULL;
-	char		   *conname = NULL;
-	remoteConn	   *rconn = NULL;
-	PGconn		   *conn = NULL;
-	StringInfoData	buf;
-	char		   *curname = NULL;
-	int				howmany = 0;
-	bool			fail = true;	/* default to backward compatible */
+	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+	PGresult   *res = NULL;
+	char	   *conname = NULL;
+	remoteConn *rconn = NULL;
+	PGconn	   *conn = NULL;
+	StringInfoData buf;
+	char	   *curname = NULL;
+	int			howmany = 0;
+	bool		fail = true;	/* default to backward compatible */
 
 	DBLINK_INIT;
 
@@ -572,8 +572,8 @@ dblink_fetch(PG_FUNCTION_ARGS)
 
 	/*
 	 * Try to execute the query.  Note that since libpq uses malloc, the
-	 * PGresult will be long-lived even though we are still in a
-	 * short-lived memory context.
+	 * PGresult will be long-lived even though we are still in a short-lived
+	 * memory context.
 	 */
 	res = PQexec(conn, buf.data);
 	if (!res ||
@@ -645,16 +645,16 @@ dblink_get_result(PG_FUNCTION_ARGS)
 static Datum
 dblink_record_internal(FunctionCallInfo fcinfo, bool is_async)
 {
-	ReturnSetInfo  *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	char		   *msg;
-	PGresult	   *res = NULL;
-	PGconn		   *conn = NULL;
-	char		   *connstr = NULL;
-	char		   *sql = NULL;
-	char		   *conname = NULL;
-	remoteConn	   *rconn = NULL;
-	bool			fail = true;	/* default to backward compatible */
-	bool			freeconn = false;
+	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+	char	   *msg;
+	PGresult   *res = NULL;
+	PGconn	   *conn = NULL;
+	char	   *connstr = NULL;
+	char	   *sql = NULL;
+	char	   *conname = NULL;
+	remoteConn *rconn = NULL;
+	bool		fail = true;	/* default to backward compatible */
+	bool		freeconn = false;
 
 	/* check to see if caller supports us returning a tuplestore */
 	if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
@@ -764,7 +764,7 @@ dblink_record_internal(FunctionCallInfo fcinfo, bool is_async)
 static void
 materializeResult(FunctionCallInfo fcinfo, PGresult *res)
 {
-	ReturnSetInfo  *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
 
 	Assert(rsinfo->returnMode == SFRM_Materialize);
 
@@ -780,8 +780,8 @@ materializeResult(FunctionCallInfo fcinfo, PGresult *res)
 			is_sql_cmd = true;
 
 			/*
-			 * need a tuple descriptor representing one TEXT column to
-			 * return the command status string as our result tuple
+			 * need a tuple descriptor representing one TEXT column to return
+			 * the command status string as our result tuple
 			 */
 			tupdesc = CreateTemplateTupleDesc(1, false);
 			TupleDescInitEntry(tupdesc, (AttrNumber) 1, "status",
@@ -831,16 +831,16 @@ materializeResult(FunctionCallInfo fcinfo, PGresult *res)
 
 		if (ntuples > 0)
 		{
-			AttInMetadata	   *attinmeta;
-			Tuplestorestate	   *tupstore;
-			MemoryContext		oldcontext;
-			int					row;
-			char			  **values;
+			AttInMetadata *attinmeta;
+			Tuplestorestate *tupstore;
+			MemoryContext oldcontext;
+			int			row;
+			char	  **values;
 
 			attinmeta = TupleDescGetAttInMetadata(tupdesc);
 
 			oldcontext = MemoryContextSwitchTo(
-								rsinfo->econtext->ecxt_per_query_memory);
+									rsinfo->econtext->ecxt_per_query_memory);
 			tupstore = tuplestore_begin_heap(true, false, work_mem);
 			rsinfo->setResult = tupstore;
 			rsinfo->setDesc = tupdesc;
@@ -1281,13 +1281,13 @@ dblink_build_sql_insert(PG_FUNCTION_ARGS)
 						"attributes too large")));
 
 	/*
-	 * ensure we don't ask for more pk attributes than we have
-	 * non-dropped columns
+	 * ensure we don't ask for more pk attributes than we have non-dropped
+	 * columns
 	 */
 	nondropped_natts = get_nondropped_natts(relid);
 	if (pknumatts > nondropped_natts)
 		ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
-				errmsg("number of primary key fields exceeds number of specified relation attributes")));
+						errmsg("number of primary key fields exceeds number of specified relation attributes")));
 
 	/*
 	 * Source array is made up of key values that will be used to locate the
@@ -1388,13 +1388,13 @@ dblink_build_sql_delete(PG_FUNCTION_ARGS)
 						"attributes too large")));
 
 	/*
-	 * ensure we don't ask for more pk attributes than we have
-	 * non-dropped columns
+	 * ensure we don't ask for more pk attributes than we have non-dropped
+	 * columns
 	 */
 	nondropped_natts = get_nondropped_natts(relid);
 	if (pknumatts > nondropped_natts)
 		ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
-				errmsg("number of primary key fields exceeds number of specified relation attributes")));
+						errmsg("number of primary key fields exceeds number of specified relation attributes")));
 
 	/*
 	 * Target array is made up of key values that will be used to build the
@@ -1487,13 +1487,13 @@ dblink_build_sql_update(PG_FUNCTION_ARGS)
 						"attributes too large")));
 
 	/*
-	 * ensure we don't ask for more pk attributes than we have
-	 * non-dropped columns
+	 * ensure we don't ask for more pk attributes than we have non-dropped
+	 * columns
 	 */
 	nondropped_natts = get_nondropped_natts(relid);
 	if (pknumatts > nondropped_natts)
 		ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
-				errmsg("number of primary key fields exceeds number of specified relation attributes")));
+						errmsg("number of primary key fields exceeds number of specified relation attributes")));
 
 	/*
 	 * Source array is made up of key values that will be used to locate the
@@ -1551,7 +1551,7 @@ dblink_current_query(PG_FUNCTION_ARGS)
 }
 
 /*
- * Retrieve async notifications for a connection. 
+ * Retrieve async notifications for a connection.
  *
  * Returns an setof record of notifications, or an empty set if none recieved.
  * Can optionally take a named connection as parameter, but uses the unnamed connection per default.
@@ -1563,14 +1563,14 @@ PG_FUNCTION_INFO_V1(dblink_get_notify);
 Datum
 dblink_get_notify(PG_FUNCTION_ARGS)
 {
-	PGconn			   *conn = NULL;
-	remoteConn		   *rconn = NULL;
-	PGnotify		   *notify;
-	ReturnSetInfo	   *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	TupleDesc			tupdesc;
-	Tuplestorestate	   *tupstore;
-	MemoryContext		per_query_ctx;
-	MemoryContext		oldcontext;
+	PGconn	   *conn = NULL;
+	remoteConn *rconn = NULL;
+	PGnotify   *notify;
+	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+	TupleDesc	tupdesc;
+	Tuplestorestate *tupstore;
+	MemoryContext per_query_ctx;
+	MemoryContext oldcontext;
 
 	DBLINK_INIT;
 	if (PG_NARGS() == 1)
@@ -2484,4 +2484,3 @@ get_nondropped_natts(Oid relid)
 	relation_close(rel, AccessShareLock);
 	return nondropped_natts;
 }
-
diff --git a/contrib/dict_xsyn/dict_xsyn.c b/contrib/dict_xsyn/dict_xsyn.c
index 8c972484efe9ff66a7588bc72fabc21cb92a22d5..dc16d9583e2fa30a408ff7266cd8584736755012 100644
--- a/contrib/dict_xsyn/dict_xsyn.c
+++ b/contrib/dict_xsyn/dict_xsyn.c
@@ -6,7 +6,7 @@
  * Copyright (c) 2007-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/contrib/dict_xsyn/dict_xsyn.c,v 1.8 2010/01/02 16:57:32 momjian Exp $
+ *	  $PostgreSQL: pgsql/contrib/dict_xsyn/dict_xsyn.c,v 1.9 2010/02/26 02:00:32 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -147,7 +147,7 @@ dxsyn_init(PG_FUNCTION_ARGS)
 	List	   *dictoptions = (List *) PG_GETARG_POINTER(0);
 	DictSyn    *d;
 	ListCell   *l;
-	char       *filename = NULL;
+	char	   *filename = NULL;
 
 	d = (DictSyn *) palloc0(sizeof(DictSyn));
 	d->len = 0;
diff --git a/contrib/hstore/hstore.h b/contrib/hstore/hstore.h
index 495ac1afc9b986730504d0188bf98b2b2ac18706..796dce575e52a79570a95da1596a9eeb813b55f7 100644
--- a/contrib/hstore/hstore.h
+++ b/contrib/hstore/hstore.h
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/hstore/hstore.h,v 1.9 2009/09/30 19:50:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore.h,v 1.10 2010/02/26 02:00:32 momjian Exp $
  */
 #ifndef __HSTORE_H__
 #define __HSTORE_H__
@@ -12,7 +12,7 @@
  * HEntry: there is one of these for each key _and_ value in an hstore
  *
  * the position offset points to the _end_ so that we can get the length
- * by subtraction from the previous entry.  the ISFIRST flag lets us tell
+ * by subtraction from the previous entry.	the ISFIRST flag lets us tell
  * whether there is a previous entry.
  */
 typedef struct
@@ -51,7 +51,7 @@ typedef struct
 /*
  * it's not possible to get more than 2^28 items into an hstore,
  * so we reserve the top few bits of the size field. See hstore_compat.c
- * for one reason why.  Some bits are left for future use here.
+ * for one reason why.	Some bits are left for future use here.
  */
 #define HS_FLAG_NEWVERSION 0x80000000
 
@@ -88,7 +88,7 @@ typedef struct
  * evaluation here.
  */
 #define HS_COPYITEM(dent_,dbuf_,dptr_,sptr_,klen_,vlen_,vnull_)			\
-    do {																\
+	do {																\
 		memcpy((dptr_), (sptr_), (klen_)+(vlen_));						\
 		(dptr_) += (klen_)+(vlen_);										\
 		(dent_)++->entry = ((dptr_) - (dbuf_) - (vlen_)) & HENTRY_POSMASK; \
@@ -119,7 +119,7 @@ typedef struct
 /* finalize a newly-constructed hstore */
 #define HS_FINALIZE(hsp_,count_,buf_,ptr_)							\
 	do {															\
-		int	buflen = (ptr_) - (buf_);								\
+		int buflen = (ptr_) - (buf_);								\
 		if ((count_))												\
 			ARRPTR(hsp_)[0].entry |= HENTRY_ISFIRST;				\
 		if ((count_) != HS_COUNT((hsp_)))							\
@@ -133,7 +133,7 @@ typedef struct
 /* ensure the varlena size of an existing hstore is correct */
 #define HS_FIXSIZE(hsp_,count_)											\
 	do {																\
-		int bl = (count_) ? HSE_ENDPOS(ARRPTR(hsp_)[2*(count_)-1]) : 0;	\
+		int bl = (count_) ? HSE_ENDPOS(ARRPTR(hsp_)[2*(count_)-1]) : 0; \
 		SET_VARSIZE((hsp_), CALCDATASIZE((count_),bl));					\
 	} while (0)
 
@@ -172,7 +172,7 @@ extern Pairs *hstoreArrayToPairs(ArrayType *a, int *npairs);
 #define HStoreExistsStrategyNumber		9
 #define HStoreExistsAnyStrategyNumber	10
 #define HStoreExistsAllStrategyNumber	11
-#define HStoreOldContainsStrategyNumber	13		/* backwards compatibility */
+#define HStoreOldContainsStrategyNumber 13		/* backwards compatibility */
 
 /*
  * defining HSTORE_POLLUTE_NAMESPACE=0 will prevent use of old function names;
diff --git a/contrib/hstore/hstore_compat.c b/contrib/hstore/hstore_compat.c
index e2c2b55c100aa7bb10a9089b42d7d218162a7f0c..033d945f9ca7a150d80f60d6cbdbbeda294ee18c 100644
--- a/contrib/hstore/hstore_compat.c
+++ b/contrib/hstore/hstore_compat.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/hstore/hstore_compat.c,v 1.1 2009/09/30 19:50:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore_compat.c,v 1.2 2010/02/26 02:00:32 momjian Exp $
  *
  * Notes on old/new hstore format disambiguation.
  *
@@ -106,24 +106,24 @@ typedef struct
 				pos:31;
 } HOldEntry;
 
-static int hstoreValidNewFormat(HStore *hs);
-static int hstoreValidOldFormat(HStore *hs);
+static int	hstoreValidNewFormat(HStore *hs);
+static int	hstoreValidOldFormat(HStore *hs);
 
 
 /*
  * Validity test for a new-format hstore.
- *  0 = not valid
- *  1 = valid but with "slop" in the length
- *  2 = exactly valid
+ *	0 = not valid
+ *	1 = valid but with "slop" in the length
+ *	2 = exactly valid
  */
 static int
 hstoreValidNewFormat(HStore *hs)
 {
-	int count = HS_COUNT(hs);
-	HEntry *entries = ARRPTR(hs);
-	int buflen = (count) ? HSE_ENDPOS(entries[2*(count)-1]) : 0;
-	int vsize = CALCDATASIZE(count,buflen);
-	int i;
+	int			count = HS_COUNT(hs);
+	HEntry	   *entries = ARRPTR(hs);
+	int			buflen = (count) ? HSE_ENDPOS(entries[2 * (count) - 1]) : 0;
+	int			vsize = CALCDATASIZE(count, buflen);
+	int			i;
 
 	if (hs->size_ & HS_FLAG_NEWVERSION)
 		return 2;
@@ -139,10 +139,10 @@ hstoreValidNewFormat(HStore *hs)
 
 	/* entry position must be nondecreasing */
 
-	for (i = 1; i < 2*count; ++i)
+	for (i = 1; i < 2 * count; ++i)
 	{
 		if (HSE_ISFIRST(entries[i])
-			|| (HSE_ENDPOS(entries[i]) < HSE_ENDPOS(entries[i-1])))
+			|| (HSE_ENDPOS(entries[i]) < HSE_ENDPOS(entries[i - 1])))
 			return 0;
 	}
 
@@ -150,9 +150,9 @@ hstoreValidNewFormat(HStore *hs)
 
 	for (i = 1; i < count; ++i)
 	{
-		if (HS_KEYLEN(entries,i) < HS_KEYLEN(entries,i-1))
+		if (HS_KEYLEN(entries, i) < HS_KEYLEN(entries, i - 1))
 			return 0;
-		if (HSE_ISNULL(entries[2*i]))
+		if (HSE_ISNULL(entries[2 * i]))
 			return 0;
 	}
 
@@ -164,18 +164,18 @@ hstoreValidNewFormat(HStore *hs)
 
 /*
  * Validity test for an old-format hstore.
- *  0 = not valid
- *  1 = valid but with "slop" in the length
- *  2 = exactly valid
+ *	0 = not valid
+ *	1 = valid but with "slop" in the length
+ *	2 = exactly valid
  */
 static int
 hstoreValidOldFormat(HStore *hs)
 {
-	int count = hs->size_;
-	HOldEntry *entries = (HOldEntry *) ARRPTR(hs);
-	int vsize;
-	int lastpos = 0;
-	int i;
+	int			count = hs->size_;
+	HOldEntry  *entries = (HOldEntry *) ARRPTR(hs);
+	int			vsize;
+	int			lastpos = 0;
+	int			i;
 
 	if (hs->size_ & HS_FLAG_NEWVERSION)
 		return 0;
@@ -188,7 +188,7 @@ hstoreValidOldFormat(HStore *hs)
 	if (count > 0xFFFFFFF)
 		return 0;
 
-	if (CALCDATASIZE(count,0) > VARSIZE(hs))
+	if (CALCDATASIZE(count, 0) > VARSIZE(hs))
 		return 0;
 
 	if (entries[0].pos != 0)
@@ -198,14 +198,14 @@ hstoreValidOldFormat(HStore *hs)
 
 	for (i = 1; i < count; ++i)
 	{
-		if (entries[i].keylen < entries[i-1].keylen)
+		if (entries[i].keylen < entries[i - 1].keylen)
 			return 0;
 	}
 
 	/*
-	 * entry position must be strictly increasing, except for the
-	 * first entry (which can be ""=>"" and thus zero-length); and
-	 * all entries must be properly contiguous
+	 * entry position must be strictly increasing, except for the first entry
+	 * (which can be ""=>"" and thus zero-length); and all entries must be
+	 * properly contiguous
 	 */
 
 	for (i = 0; i < count; ++i)
@@ -216,7 +216,7 @@ hstoreValidOldFormat(HStore *hs)
 					+ ((entries[i].valisnull) ? 0 : entries[i].vallen));
 	}
 
-	vsize = CALCDATASIZE(count,lastpos);
+	vsize = CALCDATASIZE(count, lastpos);
 
 	if (vsize > VARSIZE(hs))
 		return 0;
@@ -255,108 +255,100 @@ hstoreUpgrade(Datum orig)
 		if (valid_new)
 		{
 			/*
-			 * force the "new version" flag and the correct varlena
-			 * length, but only if we have a writable copy already
-			 * (which we almost always will, since short new-format
-			 * values won't come through here)
+			 * force the "new version" flag and the correct varlena length,
+			 * but only if we have a writable copy already (which we almost
+			 * always will, since short new-format values won't come through
+			 * here)
 			 */
 			if (writable)
 			{
-				HS_SETCOUNT(hs,HS_COUNT(hs));
-				HS_FIXSIZE(hs,HS_COUNT(hs));
+				HS_SETCOUNT(hs, HS_COUNT(hs));
+				HS_FIXSIZE(hs, HS_COUNT(hs));
 			}
 			return hs;
 		}
 		else
 		{
-			elog(ERROR,"invalid hstore value found");
+			elog(ERROR, "invalid hstore value found");
 		}
 	}
 
 	/*
-	 * this is the tricky edge case. It is only possible in some
-	 * quite extreme cases (the hstore must have had a lot
-	 * of wasted padding space at the end).
-	 * But the only way a "new" hstore value could get here is if
-	 * we're upgrading in place from a pre-release version of
-	 * hstore-new (NOT contrib/hstore), so we work off the following
-	 * assumptions:
-	 *  1. If you're moving from old contrib/hstore to hstore-new,
-	 *     you're required to fix up any potential conflicts first,
-	 *     e.g. by running ALTER TABLE ... USING col::text::hstore;
-	 *     on all hstore columns before upgrading.
-	 *  2. If you're moving from old contrib/hstore to new
-	 *     contrib/hstore, then "new" values are impossible here
-	 *  3. If you're moving from pre-release hstore-new to hstore-new,
-	 *     then "old" values are impossible here
-	 *  4. If you're moving from pre-release hstore-new to new
-	 *     contrib/hstore, you're not doing so as an in-place upgrade,
-	 *     so there is no issue
-	 * So the upshot of all this is that we can treat all the edge
-	 * cases as "new" if we're being built as hstore-new, and "old"
-	 * if we're being built as contrib/hstore.
+	 * this is the tricky edge case. It is only possible in some quite extreme
+	 * cases (the hstore must have had a lot of wasted padding space at the
+	 * end). But the only way a "new" hstore value could get here is if we're
+	 * upgrading in place from a pre-release version of hstore-new (NOT
+	 * contrib/hstore), so we work off the following assumptions: 1. If you're
+	 * moving from old contrib/hstore to hstore-new, you're required to fix up
+	 * any potential conflicts first, e.g. by running ALTER TABLE ... USING
+	 * col::text::hstore; on all hstore columns before upgrading. 2. If you're
+	 * moving from old contrib/hstore to new contrib/hstore, then "new" values
+	 * are impossible here 3. If you're moving from pre-release hstore-new to
+	 * hstore-new, then "old" values are impossible here 4. If you're moving
+	 * from pre-release hstore-new to new contrib/hstore, you're not doing so
+	 * as an in-place upgrade, so there is no issue So the upshot of all this
+	 * is that we can treat all the edge cases as "new" if we're being built
+	 * as hstore-new, and "old" if we're being built as contrib/hstore.
 	 *
-	 * XXX the WARNING can probably be downgraded to DEBUG1 once this
-	 * has been beta-tested. But for now, it would be very useful to
-	 * know if anyone can actually reach this case in a non-contrived
-	 * setting.
+	 * XXX the WARNING can probably be downgraded to DEBUG1 once this has been
+	 * beta-tested. But for now, it would be very useful to know if anyone can
+	 * actually reach this case in a non-contrived setting.
 	 */
 
 	if (valid_new)
 	{
 #if HSTORE_IS_HSTORE_NEW
-		elog(WARNING,"ambiguous hstore value resolved as hstore-new");
+		elog(WARNING, "ambiguous hstore value resolved as hstore-new");
 
 		/*
-		 * force the "new version" flag and the correct varlena
-		 * length, but only if we have a writable copy already
-		 * (which we almost always will, since short new-format
-		 * values won't come through here)
+		 * force the "new version" flag and the correct varlena length, but
+		 * only if we have a writable copy already (which we almost always
+		 * will, since short new-format values won't come through here)
 		 */
 		if (writable)
 		{
-			HS_SETCOUNT(hs,HS_COUNT(hs));
-			HS_FIXSIZE(hs,HS_COUNT(hs));
+			HS_SETCOUNT(hs, HS_COUNT(hs));
+			HS_FIXSIZE(hs, HS_COUNT(hs));
 		}
 		return hs;
 #else
-		elog(WARNING,"ambiguous hstore value resolved as hstore-old");
+		elog(WARNING, "ambiguous hstore value resolved as hstore-old");
 #endif
 	}
 
 	/*
-	 * must have an old-style value. Overwrite it in place as a new-style
-	 * one, making sure we have a writable copy first.
+	 * must have an old-style value. Overwrite it in place as a new-style one,
+	 * making sure we have a writable copy first.
 	 */
 
 	if (!writable)
 		hs = (HStore *) PG_DETOAST_DATUM_COPY(orig);
 
 	{
-		int count = hs->size_;
-		HEntry *new_entries = ARRPTR(hs);
-		HOldEntry *old_entries = (HOldEntry *) ARRPTR(hs);
-		int i;
-		
+		int			count = hs->size_;
+		HEntry	   *new_entries = ARRPTR(hs);
+		HOldEntry  *old_entries = (HOldEntry *) ARRPTR(hs);
+		int			i;
+
 		for (i = 0; i < count; ++i)
 		{
-			uint32 pos = old_entries[i].pos;
-			uint32 keylen = old_entries[i].keylen;
-			uint32 vallen = old_entries[i].vallen;
-			bool isnull = old_entries[i].valisnull;
+			uint32		pos = old_entries[i].pos;
+			uint32		keylen = old_entries[i].keylen;
+			uint32		vallen = old_entries[i].vallen;
+			bool		isnull = old_entries[i].valisnull;
 
 			if (isnull)
 				vallen = 0;
 
-			new_entries[2*i].entry = (pos + keylen) & HENTRY_POSMASK;
-			new_entries[2*i+1].entry = (((pos + keylen + vallen) & HENTRY_POSMASK)
-										| ((isnull) ? HENTRY_ISNULL : 0)); 
+			new_entries[2 * i].entry = (pos + keylen) & HENTRY_POSMASK;
+			new_entries[2 * i + 1].entry = (((pos + keylen + vallen) & HENTRY_POSMASK)
+											| ((isnull) ? HENTRY_ISNULL : 0));
 		}
 
 		if (count)
 			new_entries[0].entry |= HENTRY_ISFIRST;
-		HS_SETCOUNT(hs,count);
-		HS_FIXSIZE(hs,count);
+		HS_SETCOUNT(hs, count);
+		HS_FIXSIZE(hs, count);
 	}
 
 	return hs;
@@ -368,9 +360,9 @@ Datum		hstore_version_diag(PG_FUNCTION_ARGS);
 Datum
 hstore_version_diag(PG_FUNCTION_ARGS)
 {
-	HStore *hs = (HStore *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
-	int valid_new = hstoreValidNewFormat(hs);
-	int valid_old = hstoreValidOldFormat(hs);
+	HStore	   *hs = (HStore *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
+	int			valid_new = hstoreValidNewFormat(hs);
+	int			valid_old = hstoreValidOldFormat(hs);
 
-	PG_RETURN_INT32(valid_old*10 + valid_new);
+	PG_RETURN_INT32(valid_old * 10 + valid_new);
 }
diff --git a/contrib/hstore/hstore_gin.c b/contrib/hstore/hstore_gin.c
index 3bd9d718bb33e92679286db84ac9c08a90db76cd..f5056f53ec633b0f40e357fbf7eef0b54a02848a 100644
--- a/contrib/hstore/hstore_gin.c
+++ b/contrib/hstore/hstore_gin.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/hstore/hstore_gin.c,v 1.7 2009/09/30 19:50:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore_gin.c,v 1.8 2010/02/26 02:00:32 momjian Exp $
  */
 #include "postgres.h"
 
@@ -36,10 +36,10 @@ gin_extract_hstore(PG_FUNCTION_ARGS)
 	HStore	   *hs = PG_GETARG_HS(0);
 	int32	   *nentries = (int32 *) PG_GETARG_POINTER(1);
 	Datum	   *entries = NULL;
-	HEntry     *hsent = ARRPTR(hs);
-	char       *ptr = STRPTR(hs);
-	int        count = HS_COUNT(hs);
-	int        i;
+	HEntry	   *hsent = ARRPTR(hs);
+	char	   *ptr = STRPTR(hs);
+	int			count = HS_COUNT(hs);
+	int			i;
 
 	*nentries = 2 * count;
 	if (count)
@@ -49,21 +49,21 @@ gin_extract_hstore(PG_FUNCTION_ARGS)
 	{
 		text	   *item;
 
-		item = makeitem(HS_KEY(hsent,ptr,i), HS_KEYLEN(hsent,i));
+		item = makeitem(HS_KEY(hsent, ptr, i), HS_KEYLEN(hsent, i));
 		*VARDATA(item) = KEYFLAG;
-		entries[2*i] = PointerGetDatum(item);
+		entries[2 * i] = PointerGetDatum(item);
 
-		if (HS_VALISNULL(hsent,i))
+		if (HS_VALISNULL(hsent, i))
 		{
 			item = makeitem(NULL, 0);
 			*VARDATA(item) = NULLFLAG;
 		}
 		else
 		{
-			item = makeitem(HS_VAL(hsent,ptr,i), HS_VALLEN(hsent,i));
+			item = makeitem(HS_VAL(hsent, ptr, i), HS_VALLEN(hsent, i));
 			*VARDATA(item) = VALFLAG;
 		}
-		entries[2*i+1] = PointerGetDatum(item);
+		entries[2 * i + 1] = PointerGetDatum(item);
 	}
 
 	PG_RETURN_POINTER(entries);
@@ -103,14 +103,15 @@ gin_extract_hstore_query(PG_FUNCTION_ARGS)
 	else if (strategy == HStoreExistsAnyStrategyNumber ||
 			 strategy == HStoreExistsAllStrategyNumber)
 	{
-		ArrayType   *query = PG_GETARG_ARRAYTYPE_P(0);
-		Datum      *key_datums;
-		bool       *key_nulls;
-		int        key_count;
-		int        i,j;
+		ArrayType  *query = PG_GETARG_ARRAYTYPE_P(0);
+		Datum	   *key_datums;
+		bool	   *key_nulls;
+		int			key_count;
+		int			i,
+					j;
 		int32	   *nentries = (int32 *) PG_GETARG_POINTER(1);
 		Datum	   *entries = NULL;
-		text       *item;
+		text	   *item;
 
 		deconstruct_array(query,
 						  TEXTOID, -1, false, 'i',
@@ -145,8 +146,10 @@ gin_consistent_hstore(PG_FUNCTION_ARGS)
 {
 	bool	   *check = (bool *) PG_GETARG_POINTER(0);
 	StrategyNumber strategy = PG_GETARG_UINT16(1);
+
 	/* HStore	   *query = PG_GETARG_HS(2); */
 	int32		nkeys = PG_GETARG_INT32(3);
+
 	/* Pointer	   *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
 	bool	   *recheck = (bool *) PG_GETARG_POINTER(5);
 	bool		res = true;
@@ -178,7 +181,7 @@ gin_consistent_hstore(PG_FUNCTION_ARGS)
 	}
 	else if (strategy == HStoreExistsAllStrategyNumber)
 	{
-		int        i;
+		int			i;
 
 		for (i = 0; res && i < nkeys; ++i)
 			if (!check[i])
diff --git a/contrib/hstore/hstore_gist.c b/contrib/hstore/hstore_gist.c
index b036fa932f227db24a919706ec4e0f005e045f08..db58fb62ddfd768b8ebb2850553564e77cc757d8 100644
--- a/contrib/hstore/hstore_gist.c
+++ b/contrib/hstore/hstore_gist.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/hstore/hstore_gist.c,v 1.11 2009/09/30 19:50:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore_gist.c,v 1.12 2010/02/26 02:00:32 momjian Exp $
  */
 #include "postgres.h"
 
@@ -118,20 +118,20 @@ ghstore_compress(PG_FUNCTION_ARGS)
 		HStore	   *val = DatumGetHStoreP(entry->key);
 		HEntry	   *hsent = ARRPTR(val);
 		char	   *ptr = STRPTR(val);
-		int        count = HS_COUNT(val);
-		int        i;
+		int			count = HS_COUNT(val);
+		int			i;
 
 		SET_VARSIZE(res, CALCGTSIZE(0));
 
 		for (i = 0; i < count; ++i)
 		{
-			int	h;
+			int			h;
 
-			h = crc32_sz((char *) HS_KEY(hsent,ptr,i), HS_KEYLEN(hsent,i));
+			h = crc32_sz((char *) HS_KEY(hsent, ptr, i), HS_KEYLEN(hsent, i));
 			HASH(GETSIGN(res), h);
-			if (!HS_VALISNULL(hsent,i))
+			if (!HS_VALISNULL(hsent, i))
 			{
-				h = crc32_sz((char *) HS_VAL(hsent,ptr,i), HS_VALLEN(hsent,i));
+				h = crc32_sz((char *) HS_VAL(hsent, ptr, i), HS_VALLEN(hsent, i));
 				HASH(GETSIGN(res), h);
 			}
 		}
@@ -511,6 +511,7 @@ ghstore_consistent(PG_FUNCTION_ARGS)
 {
 	GISTTYPE   *entry = (GISTTYPE *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
 	StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
 	/* Oid		subtype = PG_GETARG_OID(3); */
 	bool	   *recheck = (bool *) PG_GETARG_POINTER(4);
 	bool		res = true;
@@ -530,18 +531,18 @@ ghstore_consistent(PG_FUNCTION_ARGS)
 		HStore	   *query = PG_GETARG_HS(1);
 		HEntry	   *qe = ARRPTR(query);
 		char	   *qv = STRPTR(query);
-		int        count = HS_COUNT(query);
-		int        i;
+		int			count = HS_COUNT(query);
+		int			i;
 
 		for (i = 0; res && i < count; ++i)
 		{
-			int	crc = crc32_sz((char *) HS_KEY(qe,qv,i), HS_KEYLEN(qe,i));
+			int			crc = crc32_sz((char *) HS_KEY(qe, qv, i), HS_KEYLEN(qe, i));
 
 			if (GETBIT(sign, HASHVAL(crc)))
 			{
-				if (!HS_VALISNULL(qe,i))
+				if (!HS_VALISNULL(qe, i))
 				{
-					crc = crc32_sz((char *) HS_VAL(qe,qv,i), HS_VALLEN(qe,i));
+					crc = crc32_sz((char *) HS_VAL(qe, qv, i), HS_VALLEN(qe, i));
 					if (!GETBIT(sign, HASHVAL(crc)))
 						res = false;
 				}
@@ -559,11 +560,11 @@ ghstore_consistent(PG_FUNCTION_ARGS)
 	}
 	else if (strategy == HStoreExistsAllStrategyNumber)
 	{
-		ArrayType   *query = PG_GETARG_ARRAYTYPE_P(1);
-		Datum      *key_datums;
-		bool       *key_nulls;
-		int        key_count;
-		int        i;
+		ArrayType  *query = PG_GETARG_ARRAYTYPE_P(1);
+		Datum	   *key_datums;
+		bool	   *key_nulls;
+		int			key_count;
+		int			i;
 
 		deconstruct_array(query,
 						  TEXTOID, -1, false, 'i',
@@ -571,7 +572,8 @@ ghstore_consistent(PG_FUNCTION_ARGS)
 
 		for (i = 0; res && i < key_count; ++i)
 		{
-			int	crc;
+			int			crc;
+
 			if (key_nulls[i])
 				continue;
 			crc = crc32_sz(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ);
@@ -581,11 +583,11 @@ ghstore_consistent(PG_FUNCTION_ARGS)
 	}
 	else if (strategy == HStoreExistsAnyStrategyNumber)
 	{
-		ArrayType   *query = PG_GETARG_ARRAYTYPE_P(1);
-		Datum      *key_datums;
-		bool       *key_nulls;
-		int        key_count;
-		int        i;
+		ArrayType  *query = PG_GETARG_ARRAYTYPE_P(1);
+		Datum	   *key_datums;
+		bool	   *key_nulls;
+		int			key_count;
+		int			i;
 
 		deconstruct_array(query,
 						  TEXTOID, -1, false, 'i',
@@ -595,7 +597,8 @@ ghstore_consistent(PG_FUNCTION_ARGS)
 
 		for (i = 0; !res && i < key_count; ++i)
 		{
-			int	crc;
+			int			crc;
+
 			if (key_nulls[i])
 				continue;
 			crc = crc32_sz(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ);
diff --git a/contrib/hstore/hstore_io.c b/contrib/hstore/hstore_io.c
index a79cddef0af8e9dc6bc21dd8036d07677732c935..fa6da693e94eef81ccde3b99d0c26f50f745aeb1 100644
--- a/contrib/hstore/hstore_io.c
+++ b/contrib/hstore/hstore_io.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/hstore/hstore_io.c,v 1.12 2009/09/30 19:50:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore_io.c,v 1.13 2010/02/26 02:00:32 momjian Exp $
  */
 #include "postgres.h"
 
@@ -18,7 +18,7 @@
 PG_MODULE_MAGIC;
 
 /* old names for C functions */
-HSTORE_POLLUTE(hstore_from_text,tconvert);
+HSTORE_POLLUTE(hstore_from_text, tconvert);
 
 
 typedef struct
@@ -370,12 +370,12 @@ hstoreCheckValLen(size_t len)
 HStore *
 hstorePairs(Pairs *pairs, int4 pcount, int4 buflen)
 {
-	HStore     *out;
+	HStore	   *out;
 	HEntry	   *entry;
 	char	   *ptr;
 	char	   *buf;
-	int4       len;
-	int4       i;
+	int4		len;
+	int4		i;
 
 	len = CALCDATASIZE(pcount, buflen);
 	out = palloc(len);
@@ -389,9 +389,9 @@ hstorePairs(Pairs *pairs, int4 pcount, int4 buflen)
 	buf = ptr = STRPTR(out);
 
 	for (i = 0; i < pcount; i++)
-		HS_ADDITEM(entry,buf,ptr,pairs[i]);
+		HS_ADDITEM(entry, buf, ptr, pairs[i]);
 
-	HS_FINALIZE(out,pcount,buf,ptr);
+	HS_FINALIZE(out, pcount, buf, ptr);
 
 	return out;
 }
@@ -426,9 +426,9 @@ hstore_recv(PG_FUNCTION_ARGS)
 	int4		buflen;
 	HStore	   *out;
 	Pairs	   *pairs;
-	int4	   i;
-	int4	   pcount;
-	StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+	int4		i;
+	int4		pcount;
+	StringInfo	buf = (StringInfo) PG_GETARG_POINTER(0);
 
 	pcount = pq_getmsgint(buf, 4);
 
@@ -442,8 +442,8 @@ hstore_recv(PG_FUNCTION_ARGS)
 
 	for (i = 0; i < pcount; ++i)
 	{
-		int rawlen = pq_getmsgint(buf, 4);
-		int len;
+		int			rawlen = pq_getmsgint(buf, 4);
+		int			len;
 
 		if (rawlen < 0)
 			ereport(ERROR,
@@ -482,9 +482,9 @@ Datum		hstore_from_text(PG_FUNCTION_ARGS);
 Datum
 hstore_from_text(PG_FUNCTION_ARGS)
 {
-	text       *key;
-	text       *val = NULL;
-	Pairs      p;
+	text	   *key;
+	text	   *val = NULL;
+	Pairs		p;
 	HStore	   *out;
 
 	if (PG_ARGISNULL(0))
@@ -524,13 +524,13 @@ hstore_from_arrays(PG_FUNCTION_ARGS)
 	Pairs	   *pairs;
 	Datum	   *key_datums;
 	bool	   *key_nulls;
-	int		   key_count;
+	int			key_count;
 	Datum	   *value_datums;
 	bool	   *value_nulls;
-	int		   value_count;
+	int			value_count;
 	ArrayType  *key_array;
 	ArrayType  *value_array;
-	int		   i;
+	int			i;
 
 	if (PG_ARGISNULL(0))
 		PG_RETURN_NULL();
@@ -540,8 +540,8 @@ hstore_from_arrays(PG_FUNCTION_ARGS)
 	Assert(ARR_ELEMTYPE(key_array) == TEXTOID);
 
 	/*
-	 * must check >1 rather than != 1 because empty arrays have
-	 * 0 dimensions, not 1
+	 * must check >1 rather than != 1 because empty arrays have 0 dimensions,
+	 * not 1
 	 */
 
 	if (ARR_NDIM(key_array) > 1)
@@ -631,15 +631,15 @@ Datum
 hstore_from_array(PG_FUNCTION_ARGS)
 {
 	ArrayType  *in_array = PG_GETARG_ARRAYTYPE_P(0);
-	int         ndims = ARR_NDIM(in_array);
-	int         count;
+	int			ndims = ARR_NDIM(in_array);
+	int			count;
 	int4		buflen;
 	HStore	   *out;
 	Pairs	   *pairs;
 	Datum	   *in_datums;
 	bool	   *in_nulls;
-	int		    in_count;
-	int		    i;
+	int			in_count;
+	int			i;
 
 	Assert(ARR_ELEMTYPE(in_array) == TEXTOID);
 
@@ -667,7 +667,7 @@ hstore_from_array(PG_FUNCTION_ARGS)
 			ereport(ERROR,
 					(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
 					 errmsg("wrong number of array subscripts")));
-	}			
+	}
 
 	deconstruct_array(in_array,
 					  TEXTOID, -1, false, 'i',
@@ -679,26 +679,26 @@ hstore_from_array(PG_FUNCTION_ARGS)
 
 	for (i = 0; i < count; ++i)
 	{
-		if (in_nulls[i*2])
+		if (in_nulls[i * 2])
 			ereport(ERROR,
 					(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
 					 errmsg("null value not allowed for hstore key")));
 
-		if (in_nulls[i*2+1])
+		if (in_nulls[i * 2 + 1])
 		{
-			pairs[i].key = VARDATA_ANY(in_datums[i*2]);
+			pairs[i].key = VARDATA_ANY(in_datums[i * 2]);
 			pairs[i].val = NULL;
-			pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i*2]));
+			pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2]));
 			pairs[i].vallen = 4;
 			pairs[i].isnull = true;
 			pairs[i].needfree = false;
 		}
 		else
 		{
-			pairs[i].key = VARDATA_ANY(in_datums[i*2]);
-			pairs[i].val = VARDATA_ANY(in_datums[i*2+1]);
-			pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i*2]));
-			pairs[i].vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(in_datums[i*2+1]));
+			pairs[i].key = VARDATA_ANY(in_datums[i * 2]);
+			pairs[i].val = VARDATA_ANY(in_datums[i * 2 + 1]);
+			pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2]));
+			pairs[i].vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(in_datums[i * 2 + 1]));
 			pairs[i].isnull = false;
 			pairs[i].needfree = false;
 		}
@@ -740,25 +740,26 @@ hstore_from_record(PG_FUNCTION_ARGS)
 	HeapTupleHeader rec;
 	int4		buflen;
 	HStore	   *out;
-	Pairs      *pairs;
+	Pairs	   *pairs;
 	Oid			tupType;
 	int32		tupTypmod;
 	TupleDesc	tupdesc;
 	HeapTupleData tuple;
 	RecordIOData *my_extra;
 	int			ncolumns;
-	int			i,j;
+	int			i,
+				j;
 	Datum	   *values;
 	bool	   *nulls;
 
 	if (PG_ARGISNULL(0))
 	{
-		Oid     argtype = get_fn_expr_argtype(fcinfo->flinfo,0);
+		Oid			argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
 
 		/*
-		 * have no tuple to look at, so the only source of type info
-		 * is the argtype. The lookup_rowtype_tupdesc call below will
-		 * error out if we don't have a known composite type oid here.
+		 * have no tuple to look at, so the only source of type info is the
+		 * argtype. The lookup_rowtype_tupdesc call below will error out if we
+		 * don't have a known composite type oid here.
 		 */
 		tupType = argtype;
 		tupTypmod = -1;
@@ -855,7 +856,7 @@ hstore_from_record(PG_FUNCTION_ARGS)
 		 */
 		if (column_info->column_type != column_type)
 		{
-			bool typIsVarlena;
+			bool		typIsVarlena;
 
 			getTypeOutputInfo(column_type,
 							  &column_info->typiofunc,
@@ -889,18 +890,18 @@ Datum		hstore_populate_record(PG_FUNCTION_ARGS);
 Datum
 hstore_populate_record(PG_FUNCTION_ARGS)
 {
-	Oid         argtype = get_fn_expr_argtype(fcinfo->flinfo,0);
-	HStore     *hs;
-	HEntry     *entries;
-	char       *ptr;
+	Oid			argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+	HStore	   *hs;
+	HEntry	   *entries;
+	char	   *ptr;
 	HeapTupleHeader rec;
 	Oid			tupType;
 	int32		tupTypmod;
 	TupleDesc	tupdesc;
 	HeapTupleData tuple;
-	HeapTuple   rettuple;
+	HeapTuple	rettuple;
 	RecordIOData *my_extra;
-	int         ncolumns;
+	int			ncolumns;
 	int			i;
 	Datum	   *values;
 	bool	   *nulls;
@@ -918,9 +919,9 @@ hstore_populate_record(PG_FUNCTION_ARGS)
 		rec = NULL;
 
 		/*
-		 * have no tuple to look at, so the only source of type info
-		 * is the argtype. The lookup_rowtype_tupdesc call below will
-		 * error out if we don't have a known composite type oid here.
+		 * have no tuple to look at, so the only source of type info is the
+		 * argtype. The lookup_rowtype_tupdesc call below will error out if we
+		 * don't have a known composite type oid here.
 		 */
 		tupType = argtype;
 		tupTypmod = -1;
@@ -942,9 +943,9 @@ hstore_populate_record(PG_FUNCTION_ARGS)
 	ptr = STRPTR(hs);
 
 	/*
-	 * if the input hstore is empty, we can only skip the rest if
-	 * we were passed in a non-null record, since otherwise there
-	 * may be issues with domain nulls.
+	 * if the input hstore is empty, we can only skip the rest if we were
+	 * passed in a non-null record, since otherwise there may be issues with
+	 * domain nulls.
 	 */
 
 	if (HS_COUNT(hs) == 0 && rec)
@@ -1012,8 +1013,8 @@ hstore_populate_record(PG_FUNCTION_ARGS)
 		ColumnIOData *column_info = &my_extra->columns[i];
 		Oid			column_type = tupdesc->attrs[i]->atttypid;
 		char	   *value;
-		int        idx;
-		int        vallen;
+		int			idx;
+		int			vallen;
 
 		/* Ignore dropped columns in datatype */
 		if (tupdesc->attrs[i]->attisdropped)
@@ -1025,14 +1026,14 @@ hstore_populate_record(PG_FUNCTION_ARGS)
 		idx = hstoreFindKey(hs, 0,
 							NameStr(tupdesc->attrs[i]->attname),
 							strlen(NameStr(tupdesc->attrs[i]->attname)));
+
 		/*
-		 * we can't just skip here if the key wasn't found since we
-		 * might have a domain to deal with. If we were passed in a
-		 * non-null record datum, we assume that the existing values
-		 * are valid (if they're not, then it's not our fault), but if
-		 * we were passed in a null, then every field which we don't
-		 * populate needs to be run through the input function just in
-		 * case it's a domain type.
+		 * we can't just skip here if the key wasn't found since we might have
+		 * a domain to deal with. If we were passed in a non-null record
+		 * datum, we assume that the existing values are valid (if they're
+		 * not, then it's not our fault), but if we were passed in a null,
+		 * then every field which we don't populate needs to be run through
+		 * the input function just in case it's a domain type.
 		 */
 		if (idx < 0 && rec)
 			continue;
@@ -1050,11 +1051,11 @@ hstore_populate_record(PG_FUNCTION_ARGS)
 			column_info->column_type = column_type;
 		}
 
-		if (idx < 0 || HS_VALISNULL(entries,idx))
+		if (idx < 0 || HS_VALISNULL(entries, idx))
 		{
 			/*
-			 * need InputFunctionCall to happen even for nulls, so
-			 * that domain checks are done
+			 * need InputFunctionCall to happen even for nulls, so that domain
+			 * checks are done
 			 */
 			values[i] = InputFunctionCall(&column_info->proc, NULL,
 										  column_info->typioparam,
@@ -1063,9 +1064,9 @@ hstore_populate_record(PG_FUNCTION_ARGS)
 		}
 		else
 		{
-			vallen = HS_VALLEN(entries,idx);
+			vallen = HS_VALLEN(entries, idx);
 			value = palloc(1 + vallen);
-			memcpy(value, HS_VAL(entries,ptr,idx), vallen);
+			memcpy(value, HS_VAL(entries, ptr, idx), vallen);
 			value[vallen] = 0;
 
 			values[i] = InputFunctionCall(&column_info->proc, value,
@@ -1105,7 +1106,7 @@ hstore_out(PG_FUNCTION_ARGS)
 	HStore	   *in = PG_GETARG_HS(0);
 	int			buflen,
 				i;
-	int        count = HS_COUNT(in);
+	int			count = HS_COUNT(in);
 	char	   *out,
 			   *ptr;
 	char	   *base = STRPTR(in);
@@ -1121,21 +1122,21 @@ hstore_out(PG_FUNCTION_ARGS)
 	buflen = 0;
 
 	/*
-	 * this loop overestimates due to pessimistic assumptions about
-	 * escaping, so very large hstore values can't be output. this
-	 * could be fixed, but many other data types probably have the
-	 * same issue. This replaced code that used the original varlena
-	 * size for calculations, which was wrong in some subtle ways.
+	 * this loop overestimates due to pessimistic assumptions about escaping,
+	 * so very large hstore values can't be output. this could be fixed, but
+	 * many other data types probably have the same issue. This replaced code
+	 * that used the original varlena size for calculations, which was wrong
+	 * in some subtle ways.
 	 */
 
 	for (i = 0; i < count; i++)
 	{
 		/* include "" and => and comma-space */
-		buflen += 6 + 2 * HS_KEYLEN(entries,i);
+		buflen += 6 + 2 * HS_KEYLEN(entries, i);
 		/* include "" only if nonnull */
-		buflen += 2 + (HS_VALISNULL(entries,i)
+		buflen += 2 + (HS_VALISNULL(entries, i)
 					   ? 2
-					   : 2 * HS_VALLEN(entries,i));
+					   : 2 * HS_VALLEN(entries, i));
 	}
 
 	out = ptr = palloc(buflen);
@@ -1143,11 +1144,11 @@ hstore_out(PG_FUNCTION_ARGS)
 	for (i = 0; i < count; i++)
 	{
 		*ptr++ = '"';
-		ptr = cpw(ptr, HS_KEY(entries,base,i), HS_KEYLEN(entries,i));
+		ptr = cpw(ptr, HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
 		*ptr++ = '"';
 		*ptr++ = '=';
 		*ptr++ = '>';
-		if (HS_VALISNULL(entries,i))
+		if (HS_VALISNULL(entries, i))
 		{
 			*ptr++ = 'N';
 			*ptr++ = 'U';
@@ -1157,7 +1158,7 @@ hstore_out(PG_FUNCTION_ARGS)
 		else
 		{
 			*ptr++ = '"';
-			ptr = cpw(ptr, HS_VAL(entries,base,i), HS_VALLEN(entries,i));
+			ptr = cpw(ptr, HS_VAL(entries, base, i), HS_VALLEN(entries, i));
 			*ptr++ = '"';
 		}
 
@@ -1179,8 +1180,8 @@ Datum
 hstore_send(PG_FUNCTION_ARGS)
 {
 	HStore	   *in = PG_GETARG_HS(0);
-	int        i;
-	int        count = HS_COUNT(in);
+	int			i;
+	int			count = HS_COUNT(in);
 	char	   *base = STRPTR(in);
 	HEntry	   *entries = ARRPTR(in);
 	StringInfoData buf;
@@ -1191,18 +1192,20 @@ hstore_send(PG_FUNCTION_ARGS)
 
 	for (i = 0; i < count; i++)
 	{
-		int32 keylen = HS_KEYLEN(entries,i);
+		int32		keylen = HS_KEYLEN(entries, i);
+
 		pq_sendint(&buf, keylen, 4);
-		pq_sendtext(&buf, HS_KEY(entries,base,i), keylen);
-		if (HS_VALISNULL(entries,i))
+		pq_sendtext(&buf, HS_KEY(entries, base, i), keylen);
+		if (HS_VALISNULL(entries, i))
 		{
 			pq_sendint(&buf, -1, 4);
 		}
 		else
 		{
-			int32 vallen = HS_VALLEN(entries,i);
+			int32		vallen = HS_VALLEN(entries, i);
+
 			pq_sendint(&buf, vallen, 4);
-			pq_sendtext(&buf, HS_VAL(entries,base,i), vallen);
+			pq_sendtext(&buf, HS_VAL(entries, base, i), vallen);
 		}
 	}
 
diff --git a/contrib/hstore/hstore_op.c b/contrib/hstore/hstore_op.c
index cc1a162dacb0d205c9e87762ca4aed2cd5bb6779..ebee60a1dbe5df5fb211f0c5c3a8eed84c1e3fba 100644
--- a/contrib/hstore/hstore_op.c
+++ b/contrib/hstore/hstore_op.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/hstore/hstore_op.c,v 1.15 2009/09/30 21:26:17 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore_op.c,v 1.16 2010/02/26 02:00:32 momjian Exp $
  */
 #include "postgres.h"
 
@@ -13,18 +13,18 @@
 #include "hstore.h"
 
 /* old names for C functions */
-HSTORE_POLLUTE(hstore_fetchval,fetchval);
-HSTORE_POLLUTE(hstore_exists,exists);
-HSTORE_POLLUTE(hstore_defined,defined);
-HSTORE_POLLUTE(hstore_delete,delete);
-HSTORE_POLLUTE(hstore_concat,hs_concat);
-HSTORE_POLLUTE(hstore_contains,hs_contains);
-HSTORE_POLLUTE(hstore_contained,hs_contained);
-HSTORE_POLLUTE(hstore_akeys,akeys);
-HSTORE_POLLUTE(hstore_avals,avals);
-HSTORE_POLLUTE(hstore_skeys,skeys);
-HSTORE_POLLUTE(hstore_svals,svals);
-HSTORE_POLLUTE(hstore_each,each);
+HSTORE_POLLUTE(hstore_fetchval, fetchval);
+HSTORE_POLLUTE(hstore_exists, exists);
+HSTORE_POLLUTE(hstore_defined, defined);
+HSTORE_POLLUTE(hstore_delete, delete);
+HSTORE_POLLUTE(hstore_concat, hs_concat);
+HSTORE_POLLUTE(hstore_contains, hs_contains);
+HSTORE_POLLUTE(hstore_contained, hs_contained);
+HSTORE_POLLUTE(hstore_akeys, akeys);
+HSTORE_POLLUTE(hstore_avals, avals);
+HSTORE_POLLUTE(hstore_skeys, skeys);
+HSTORE_POLLUTE(hstore_svals, svals);
+HSTORE_POLLUTE(hstore_each, each);
 
 
 /*
@@ -34,24 +34,24 @@ HSTORE_POLLUTE(hstore_each,each);
  * one-off or unordered searches.
  */
 int
-hstoreFindKey(HStore * hs, int *lowbound, char *key, int keylen)
+hstoreFindKey(HStore *hs, int *lowbound, char *key, int keylen)
 {
 	HEntry	   *entries = ARRPTR(hs);
-	int         stopLow = lowbound ? *lowbound : 0;
-	int         stopHigh = HS_COUNT(hs);
-	int         stopMiddle;
+	int			stopLow = lowbound ? *lowbound : 0;
+	int			stopHigh = HS_COUNT(hs);
+	int			stopMiddle;
 	char	   *base = STRPTR(hs);
 
 	while (stopLow < stopHigh)
 	{
-		int difference;
+		int			difference;
 
 		stopMiddle = stopLow + (stopHigh - stopLow) / 2;
 
-		if (HS_KEYLEN(entries,stopMiddle) == keylen)
-			difference = strncmp(HS_KEY(entries,base,stopMiddle), key, keylen);
+		if (HS_KEYLEN(entries, stopMiddle) == keylen)
+			difference = strncmp(HS_KEY(entries, base, stopMiddle), key, keylen);
 		else
-			difference = (HS_KEYLEN(entries,stopMiddle) > keylen) ? 1 : -1;
+			difference = (HS_KEYLEN(entries, stopMiddle) > keylen) ? 1 : -1;
 
 		if (difference == 0)
 		{
@@ -73,12 +73,13 @@ hstoreFindKey(HStore * hs, int *lowbound, char *key, int keylen)
 Pairs *
 hstoreArrayToPairs(ArrayType *a, int *npairs)
 {
-	Datum      *key_datums;
-	bool       *key_nulls;
-    int         key_count;
-	Pairs      *key_pairs;
-	int         bufsiz;
-	int         i,j;
+	Datum	   *key_datums;
+	bool	   *key_nulls;
+	int			key_count;
+	Pairs	   *key_pairs;
+	int			bufsiz;
+	int			i,
+				j;
 
 	deconstruct_array(a,
 					  TEXTOID, -1, false, 'i',
@@ -121,14 +122,14 @@ hstore_fetchval(PG_FUNCTION_ARGS)
 	text	   *key = PG_GETARG_TEXT_PP(1);
 	HEntry	   *entries = ARRPTR(hs);
 	text	   *out;
-    int         idx = hstoreFindKey(hs, NULL,
+	int			idx = hstoreFindKey(hs, NULL,
 									VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
 
-	if (idx < 0 || HS_VALISNULL(entries,idx))
+	if (idx < 0 || HS_VALISNULL(entries, idx))
 		PG_RETURN_NULL();
 
-	out = cstring_to_text_with_len(HS_VAL(entries,STRPTR(hs),idx),
-								   HS_VALLEN(entries,idx));
+	out = cstring_to_text_with_len(HS_VAL(entries, STRPTR(hs), idx),
+								   HS_VALLEN(entries, idx));
 
 	PG_RETURN_TEXT_P(out);
 }
@@ -141,7 +142,7 @@ hstore_exists(PG_FUNCTION_ARGS)
 {
 	HStore	   *hs = PG_GETARG_HS(0);
 	text	   *key = PG_GETARG_TEXT_PP(1);
-    int         idx = hstoreFindKey(hs, NULL,
+	int			idx = hstoreFindKey(hs, NULL,
 									VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
 
 	PG_RETURN_BOOL(idx >= 0);
@@ -155,23 +156,23 @@ hstore_exists_any(PG_FUNCTION_ARGS)
 {
 	HStore	   *hs = PG_GETARG_HS(0);
 	ArrayType  *keys = PG_GETARG_ARRAYTYPE_P(1);
-	int         nkeys;
-	Pairs      *key_pairs = hstoreArrayToPairs(keys, &nkeys);
-	int         i;
-	int         lowbound = 0;
-	bool        res = false;
+	int			nkeys;
+	Pairs	   *key_pairs = hstoreArrayToPairs(keys, &nkeys);
+	int			i;
+	int			lowbound = 0;
+	bool		res = false;
 
 	/*
-	 * we exploit the fact that the pairs list is already sorted into
-	 * strictly increasing order to narrow the hstoreFindKey search;
-	 * each search can start one entry past the previous "found"
-	 * entry, or at the lower bound of the last search.
+	 * we exploit the fact that the pairs list is already sorted into strictly
+	 * increasing order to narrow the hstoreFindKey search; each search can
+	 * start one entry past the previous "found" entry, or at the lower bound
+	 * of the last search.
 	 */
 
 	for (i = 0; !res && i < nkeys; ++i)
 	{
-		int idx = hstoreFindKey(hs, &lowbound,
-								key_pairs[i].key, key_pairs[i].keylen);
+		int			idx = hstoreFindKey(hs, &lowbound,
+									  key_pairs[i].key, key_pairs[i].keylen);
 
 		if (idx >= 0)
 			res = true;
@@ -188,23 +189,23 @@ hstore_exists_all(PG_FUNCTION_ARGS)
 {
 	HStore	   *hs = PG_GETARG_HS(0);
 	ArrayType  *keys = PG_GETARG_ARRAYTYPE_P(1);
-	int         nkeys;
-	Pairs      *key_pairs = hstoreArrayToPairs(keys, &nkeys);
-	int         i;
-	int         lowbound = 0;
-	bool        res = nkeys ? true : false;
+	int			nkeys;
+	Pairs	   *key_pairs = hstoreArrayToPairs(keys, &nkeys);
+	int			i;
+	int			lowbound = 0;
+	bool		res = nkeys ? true : false;
 
 	/*
-	 * we exploit the fact that the pairs list is already sorted into
-	 * strictly increasing order to narrow the hstoreFindKey search;
-	 * each search can start one entry past the previous "found"
-	 * entry, or at the lower bound of the last search.
+	 * we exploit the fact that the pairs list is already sorted into strictly
+	 * increasing order to narrow the hstoreFindKey search; each search can
+	 * start one entry past the previous "found" entry, or at the lower bound
+	 * of the last search.
 	 */
 
 	for (i = 0; res && i < nkeys; ++i)
 	{
-		int idx = hstoreFindKey(hs, &lowbound,
-								key_pairs[i].key, key_pairs[i].keylen);
+		int			idx = hstoreFindKey(hs, &lowbound,
+									  key_pairs[i].key, key_pairs[i].keylen);
 
 		if (idx < 0)
 			res = false;
@@ -222,9 +223,9 @@ hstore_defined(PG_FUNCTION_ARGS)
 	HStore	   *hs = PG_GETARG_HS(0);
 	text	   *key = PG_GETARG_TEXT_PP(1);
 	HEntry	   *entries = ARRPTR(hs);
-    int         idx = hstoreFindKey(hs, NULL,
+	int			idx = hstoreFindKey(hs, NULL,
 									VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
-	bool        res = (idx >= 0 && !HS_VALISNULL(entries,idx));
+	bool		res = (idx >= 0 && !HS_VALISNULL(entries, idx));
 
 	PG_RETURN_BOOL(res);
 }
@@ -237,20 +238,20 @@ hstore_delete(PG_FUNCTION_ARGS)
 {
 	HStore	   *hs = PG_GETARG_HS(0);
 	text	   *key = PG_GETARG_TEXT_PP(1);
-	char       *keyptr = VARDATA_ANY(key);
-	int         keylen = VARSIZE_ANY_EXHDR(key);
+	char	   *keyptr = VARDATA_ANY(key);
+	int			keylen = VARSIZE_ANY_EXHDR(key);
 	HStore	   *out = palloc(VARSIZE(hs));
 	char	   *bufs,
-		       *bufd,
+			   *bufd,
 			   *ptrd;
 	HEntry	   *es,
 			   *ed;
-	int         i;
-	int         count = HS_COUNT(hs);
-	int         outcount = 0;
+	int			i;
+	int			count = HS_COUNT(hs);
+	int			outcount = 0;
 
 	SET_VARSIZE(out, VARSIZE(hs));
-	HS_SETCOUNT(out, count);		/* temporary! */
+	HS_SETCOUNT(out, count);	/* temporary! */
 
 	bufs = STRPTR(hs);
 	es = ARRPTR(hs);
@@ -259,18 +260,19 @@ hstore_delete(PG_FUNCTION_ARGS)
 
 	for (i = 0; i < count; ++i)
 	{
-		int len = HS_KEYLEN(es,i);
-		char *ptrs = HS_KEY(es,bufs,i);
+		int			len = HS_KEYLEN(es, i);
+		char	   *ptrs = HS_KEY(es, bufs, i);
 
 		if (!(len == keylen && strncmp(ptrs, keyptr, keylen) == 0))
 		{
-			int vallen = HS_VALLEN(es,i);
-			HS_COPYITEM(ed, bufd, ptrd, ptrs, len, vallen, HS_VALISNULL(es,i));
+			int			vallen = HS_VALLEN(es, i);
+
+			HS_COPYITEM(ed, bufd, ptrd, ptrs, len, vallen, HS_VALISNULL(es, i));
 			++outcount;
 		}
 	}
 
-	HS_FINALIZE(out,outcount,bufd,ptrd);
+	HS_FINALIZE(out, outcount, bufd, ptrd);
 
 	PG_RETURN_POINTER(out);
 }
@@ -283,20 +285,21 @@ hstore_delete_array(PG_FUNCTION_ARGS)
 {
 	HStore	   *hs = PG_GETARG_HS(0);
 	HStore	   *out = palloc(VARSIZE(hs));
-	int         hs_count = HS_COUNT(hs);
+	int			hs_count = HS_COUNT(hs);
 	char	   *ps,
-		       *bufd,
+			   *bufd,
 			   *pd;
 	HEntry	   *es,
 			   *ed;
-	int         i,j;
-	int         outcount = 0;
+	int			i,
+				j;
+	int			outcount = 0;
 	ArrayType  *key_array = PG_GETARG_ARRAYTYPE_P(1);
-	int         nkeys;
-	Pairs      *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
+	int			nkeys;
+	Pairs	   *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
 
 	SET_VARSIZE(out, VARSIZE(hs));
-	HS_SETCOUNT(out, hs_count);		/* temporary! */
+	HS_SETCOUNT(out, hs_count); /* temporary! */
 
 	ps = STRPTR(hs);
 	es = ARRPTR(hs);
@@ -313,22 +316,22 @@ hstore_delete_array(PG_FUNCTION_ARGS)
 	}
 
 	/*
-	 * this is in effect a merge between hs and key_pairs, both of
-	 * which are already sorted by (keylen,key); we take keys from
-	 * hs only
+	 * this is in effect a merge between hs and key_pairs, both of which are
+	 * already sorted by (keylen,key); we take keys from hs only
 	 */
 
-	for (i = j = 0; i < hs_count; )
+	for (i = j = 0; i < hs_count;)
 	{
-		int	difference;
-		
+		int			difference;
+
 		if (j >= nkeys)
 			difference = -1;
 		else
 		{
-			int skeylen = HS_KEYLEN(es,i);
+			int			skeylen = HS_KEYLEN(es, i);
+
 			if (skeylen == key_pairs[j].keylen)
-				difference = strncmp(HS_KEY(es,ps,i),
+				difference = strncmp(HS_KEY(es, ps, i),
 									 key_pairs[j].key,
 									 key_pairs[j].keylen);
 			else
@@ -342,14 +345,14 @@ hstore_delete_array(PG_FUNCTION_ARGS)
 		else
 		{
 			HS_COPYITEM(ed, bufd, pd,
-						HS_KEY(es,ps,i), HS_KEYLEN(es,i),
-						HS_VALLEN(es,i), HS_VALISNULL(es,i));
+						HS_KEY(es, ps, i), HS_KEYLEN(es, i),
+						HS_VALLEN(es, i), HS_VALISNULL(es, i));
 			++outcount;
 			++i;
 		}
 	}
 
-	HS_FINALIZE(out,outcount,bufd,pd);
+	HS_FINALIZE(out, outcount, bufd, pd);
 
 	PG_RETURN_POINTER(out);
 }
@@ -363,20 +366,21 @@ hstore_delete_hstore(PG_FUNCTION_ARGS)
 	HStore	   *hs = PG_GETARG_HS(0);
 	HStore	   *hs2 = PG_GETARG_HS(1);
 	HStore	   *out = palloc(VARSIZE(hs));
-	int         hs_count = HS_COUNT(hs);
-	int         hs2_count = HS_COUNT(hs2);
+	int			hs_count = HS_COUNT(hs);
+	int			hs2_count = HS_COUNT(hs2);
 	char	   *ps,
-		       *ps2,
-		       *bufd,
+			   *ps2,
+			   *bufd,
 			   *pd;
 	HEntry	   *es,
-		       *es2,
+			   *es2,
 			   *ed;
-	int         i,j;
-	int         outcount = 0;
+	int			i,
+				j;
+	int			outcount = 0;
 
 	SET_VARSIZE(out, VARSIZE(hs));
-	HS_SETCOUNT(out, hs_count);		/* temporary! */
+	HS_SETCOUNT(out, hs_count); /* temporary! */
 
 	ps = STRPTR(hs);
 	es = ARRPTR(hs);
@@ -395,25 +399,25 @@ hstore_delete_hstore(PG_FUNCTION_ARGS)
 	}
 
 	/*
-	 * this is in effect a merge between hs and hs2, both of
-	 * which are already sorted by (keylen,key); we take keys from
-	 * hs only; for equal keys, we take the value from hs unless the
-	 * values are equal
+	 * this is in effect a merge between hs and hs2, both of which are already
+	 * sorted by (keylen,key); we take keys from hs only; for equal keys, we
+	 * take the value from hs unless the values are equal
 	 */
 
-	for (i = j = 0; i < hs_count; )
+	for (i = j = 0; i < hs_count;)
 	{
-		int	difference;
-		
+		int			difference;
+
 		if (j >= hs2_count)
 			difference = -1;
 		else
 		{
-			int skeylen = HS_KEYLEN(es,i);
-			int s2keylen = HS_KEYLEN(es2,j);
+			int			skeylen = HS_KEYLEN(es, i);
+			int			s2keylen = HS_KEYLEN(es2, j);
+
 			if (skeylen == s2keylen)
-				difference = strncmp(HS_KEY(es,ps,i),
-									 HS_KEY(es2,ps2,j),
+				difference = strncmp(HS_KEY(es, ps, i),
+									 HS_KEY(es2, ps2, j),
 									 skeylen);
 			else
 				difference = (skeylen > s2keylen) ? 1 : -1;
@@ -423,15 +427,16 @@ hstore_delete_hstore(PG_FUNCTION_ARGS)
 			++j;
 		else if (difference == 0)
 		{
-			int svallen = HS_VALLEN(es,i);
-			int snullval = HS_VALISNULL(es,i);
-			if (snullval != HS_VALISNULL(es2,j)
+			int			svallen = HS_VALLEN(es, i);
+			int			snullval = HS_VALISNULL(es, i);
+
+			if (snullval != HS_VALISNULL(es2, j)
 				|| (!snullval
-					&& (svallen != HS_VALLEN(es2,j)
-						|| strncmp(HS_VAL(es,ps,i), HS_VAL(es2,ps2,j), svallen) != 0)))
+					&& (svallen != HS_VALLEN(es2, j)
+						|| strncmp(HS_VAL(es, ps, i), HS_VAL(es2, ps2, j), svallen) != 0)))
 			{
 				HS_COPYITEM(ed, bufd, pd,
-							HS_KEY(es,ps,i), HS_KEYLEN(es,i),
+							HS_KEY(es, ps, i), HS_KEYLEN(es, i),
 							svallen, snullval);
 				++outcount;
 			}
@@ -440,14 +445,14 @@ hstore_delete_hstore(PG_FUNCTION_ARGS)
 		else
 		{
 			HS_COPYITEM(ed, bufd, pd,
-						HS_KEY(es,ps,i), HS_KEYLEN(es,i),
-						HS_VALLEN(es,i), HS_VALISNULL(es,i));
+						HS_KEY(es, ps, i), HS_KEYLEN(es, i),
+						HS_VALLEN(es, i), HS_VALISNULL(es, i));
 			++outcount;
 			++i;
 		}
 	}
 
-	HS_FINALIZE(out,outcount,bufd,pd);
+	HS_FINALIZE(out, outcount, bufd, pd);
 
 	PG_RETURN_POINTER(out);
 }
@@ -463,16 +468,16 @@ hstore_concat(PG_FUNCTION_ARGS)
 	HStore	   *out = palloc(VARSIZE(s1) + VARSIZE(s2));
 	char	   *ps1,
 			   *ps2,
-		       *bufd,
+			   *bufd,
 			   *pd;
 	HEntry	   *es1,
 			   *es2,
 			   *ed;
-	int         s1idx;
-	int         s2idx;
-	int         s1count = HS_COUNT(s1);
-	int         s2count = HS_COUNT(s2);
-	int         outcount = 0;
+	int			s1idx;
+	int			s2idx;
+	int			s1count = HS_COUNT(s1);
+	int			s2count = HS_COUNT(s2);
+	int			outcount = 0;
 
 	SET_VARSIZE(out, VARSIZE(s1) + VARSIZE(s2) - HSHRDSIZE);
 	HS_SETCOUNT(out, s1count + s2count);
@@ -503,25 +508,26 @@ hstore_concat(PG_FUNCTION_ARGS)
 	ed = ARRPTR(out);
 
 	/*
-	 * this is in effect a merge between s1 and s2, both of which
-	 * are already sorted by (keylen,key); we take s2 for equal keys
+	 * this is in effect a merge between s1 and s2, both of which are already
+	 * sorted by (keylen,key); we take s2 for equal keys
 	 */
 
 	for (s1idx = s2idx = 0; s1idx < s1count || s2idx < s2count; ++outcount)
 	{
-		int	difference;
-		
+		int			difference;
+
 		if (s1idx >= s1count)
 			difference = 1;
 		else if (s2idx >= s2count)
 			difference = -1;
 		else
 		{
-			int s1keylen = HS_KEYLEN(es1,s1idx);
-			int s2keylen = HS_KEYLEN(es2,s2idx);
+			int			s1keylen = HS_KEYLEN(es1, s1idx);
+			int			s2keylen = HS_KEYLEN(es2, s2idx);
+
 			if (s1keylen == s2keylen)
-				difference = strncmp(HS_KEY(es1,ps1,s1idx),
-									 HS_KEY(es2,ps2,s2idx),
+				difference = strncmp(HS_KEY(es1, ps1, s1idx),
+									 HS_KEY(es2, ps2, s2idx),
 									 s1keylen);
 			else
 				difference = (s1keylen > s2keylen) ? 1 : -1;
@@ -530,8 +536,8 @@ hstore_concat(PG_FUNCTION_ARGS)
 		if (difference >= 0)
 		{
 			HS_COPYITEM(ed, bufd, pd,
-						HS_KEY(es2,ps2,s2idx), HS_KEYLEN(es2,s2idx),
-						HS_VALLEN(es2,s2idx), HS_VALISNULL(es2,s2idx));
+						HS_KEY(es2, ps2, s2idx), HS_KEYLEN(es2, s2idx),
+						HS_VALLEN(es2, s2idx), HS_VALISNULL(es2, s2idx));
 			++s2idx;
 			if (difference == 0)
 				++s1idx;
@@ -539,13 +545,13 @@ hstore_concat(PG_FUNCTION_ARGS)
 		else
 		{
 			HS_COPYITEM(ed, bufd, pd,
-						HS_KEY(es1,ps1,s1idx), HS_KEYLEN(es1,s1idx),
-						HS_VALLEN(es1,s1idx), HS_VALISNULL(es1,s1idx));
+						HS_KEY(es1, ps1, s1idx), HS_KEYLEN(es1, s1idx),
+						HS_VALLEN(es1, s1idx), HS_VALISNULL(es1, s1idx));
 			++s1idx;
 		}
 	}
 
-	HS_FINALIZE(out,outcount,bufd,pd);
+	HS_FINALIZE(out, outcount, bufd, pd);
 
 	PG_RETURN_POINTER(out);
 }
@@ -558,15 +564,15 @@ hstore_slice_to_array(PG_FUNCTION_ARGS)
 {
 	HStore	   *hs = PG_GETARG_HS(0);
 	HEntry	   *entries = ARRPTR(hs);
-	char       *ptr = STRPTR(hs);
+	char	   *ptr = STRPTR(hs);
 	ArrayType  *key_array = PG_GETARG_ARRAYTYPE_P(1);
 	ArrayType  *aout;
-	Datum      *key_datums;
-	bool       *key_nulls;
-	Datum      *out_datums;
-	bool       *out_nulls;
-    int         key_count;
-	int         i;
+	Datum	   *key_datums;
+	bool	   *key_nulls;
+	Datum	   *out_datums;
+	bool	   *out_nulls;
+	int			key_count;
+	int			i;
 
 	deconstruct_array(key_array,
 					  TEXTOID, -1, false, 'i',
@@ -583,15 +589,15 @@ hstore_slice_to_array(PG_FUNCTION_ARGS)
 
 	for (i = 0; i < key_count; ++i)
 	{
-		text       *key = (text*) DatumGetPointer(key_datums[i]);
-		int        idx;
+		text	   *key = (text *) DatumGetPointer(key_datums[i]);
+		int			idx;
 
 		if (key_nulls[i])
 			idx = -1;
 		else
 			idx = hstoreFindKey(hs, NULL, VARDATA(key), VARSIZE(key) - VARHDRSZ);
 
-		if (idx < 0 || HS_VALISNULL(entries,idx))
+		if (idx < 0 || HS_VALISNULL(entries, idx))
 		{
 			out_nulls[i] = true;
 			out_datums[i] = (Datum) 0;
@@ -599,8 +605,8 @@ hstore_slice_to_array(PG_FUNCTION_ARGS)
 		else
 		{
 			out_datums[i] = PointerGetDatum(
-				cstring_to_text_with_len(HS_VAL(entries,ptr,idx),
-										 HS_VALLEN(entries,idx)));
+						  cstring_to_text_with_len(HS_VAL(entries, ptr, idx),
+												   HS_VALLEN(entries, idx)));
 			out_nulls[i] = false;
 		}
 	}
@@ -609,7 +615,7 @@ hstore_slice_to_array(PG_FUNCTION_ARGS)
 							  ARR_NDIM(key_array),
 							  ARR_DIMS(key_array),
 							  ARR_LBOUND(key_array),
-							  TEXTOID, -1, false,	'i');
+							  TEXTOID, -1, false, 'i');
 
 	PG_RETURN_POINTER(aout);
 }
@@ -622,16 +628,16 @@ hstore_slice_to_hstore(PG_FUNCTION_ARGS)
 {
 	HStore	   *hs = PG_GETARG_HS(0);
 	HEntry	   *entries = ARRPTR(hs);
-	char       *ptr = STRPTR(hs);
+	char	   *ptr = STRPTR(hs);
 	ArrayType  *key_array = PG_GETARG_ARRAYTYPE_P(1);
-	HStore     *out;
-	int         nkeys;
-	Pairs      *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
-	Pairs      *out_pairs;
-	int         bufsiz;
-	int         lastidx = 0;
-	int         i;
-	int         out_count = 0;
+	HStore	   *out;
+	int			nkeys;
+	Pairs	   *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
+	Pairs	   *out_pairs;
+	int			bufsiz;
+	int			lastidx = 0;
+	int			i;
+	int			out_count = 0;
 
 	if (nkeys == 0)
 	{
@@ -643,32 +649,32 @@ hstore_slice_to_hstore(PG_FUNCTION_ARGS)
 	bufsiz = 0;
 
 	/*
-	 * we exploit the fact that the pairs list is already sorted into
-	 * strictly increasing order to narrow the hstoreFindKey search;
-	 * each search can start one entry past the previous "found"
-	 * entry, or at the lower bound of the last search.
+	 * we exploit the fact that the pairs list is already sorted into strictly
+	 * increasing order to narrow the hstoreFindKey search; each search can
+	 * start one entry past the previous "found" entry, or at the lower bound
+	 * of the last search.
 	 */
 
 	for (i = 0; i < nkeys; ++i)
 	{
-		int idx = hstoreFindKey(hs, &lastidx,
-								key_pairs[i].key, key_pairs[i].keylen);
+		int			idx = hstoreFindKey(hs, &lastidx,
+									  key_pairs[i].key, key_pairs[i].keylen);
 
 		if (idx >= 0)
 		{
 			out_pairs[out_count].key = key_pairs[i].key;
 			bufsiz += (out_pairs[out_count].keylen = key_pairs[i].keylen);
-			out_pairs[out_count].val = HS_VAL(entries,ptr,idx);
-			bufsiz += (out_pairs[out_count].vallen = HS_VALLEN(entries,idx));
-			out_pairs[out_count].isnull = HS_VALISNULL(entries,idx);
+			out_pairs[out_count].val = HS_VAL(entries, ptr, idx);
+			bufsiz += (out_pairs[out_count].vallen = HS_VALLEN(entries, idx));
+			out_pairs[out_count].isnull = HS_VALISNULL(entries, idx);
 			out_pairs[out_count].needfree = false;
 			++out_count;
 		}
 	}
 
 	/*
-	 * we don't use uniquePairs here because we know that the
-	 * pairs list is already sorted and uniq'ed.
+	 * we don't use uniquePairs here because we know that the pairs list is
+	 * already sorted and uniq'ed.
 	 */
 
 	out = hstorePairs(out_pairs, out_count, bufsiz);
@@ -687,8 +693,8 @@ hstore_akeys(PG_FUNCTION_ARGS)
 	ArrayType  *a;
 	HEntry	   *entries = ARRPTR(hs);
 	char	   *base = STRPTR(hs);
-	int         count = HS_COUNT(hs);
-	int         i;
+	int			count = HS_COUNT(hs);
+	int			i;
 
 	if (count == 0)
 	{
@@ -700,13 +706,14 @@ hstore_akeys(PG_FUNCTION_ARGS)
 
 	for (i = 0; i < count; ++i)
 	{
-		text *item = cstring_to_text_with_len(HS_KEY(entries,base,i),
-											  HS_KEYLEN(entries,i));
+		text	   *item = cstring_to_text_with_len(HS_KEY(entries, base, i),
+													HS_KEYLEN(entries, i));
+
 		d[i] = PointerGetDatum(item);
 	}
 
 	a = construct_array(d, count,
-						TEXTOID, -1, false,	'i');
+						TEXTOID, -1, false, 'i');
 
 	PG_RETURN_POINTER(a);
 }
@@ -719,13 +726,13 @@ hstore_avals(PG_FUNCTION_ARGS)
 {
 	HStore	   *hs = PG_GETARG_HS(0);
 	Datum	   *d;
-	bool       *nulls;
+	bool	   *nulls;
 	ArrayType  *a;
 	HEntry	   *entries = ARRPTR(hs);
 	char	   *base = STRPTR(hs);
-	int         count = HS_COUNT(hs);
-	int         lb = 1;
-	int         i;
+	int			count = HS_COUNT(hs);
+	int			lb = 1;
+	int			i;
 
 	if (count == 0)
 	{
@@ -738,22 +745,23 @@ hstore_avals(PG_FUNCTION_ARGS)
 
 	for (i = 0; i < count; ++i)
 	{
-		if (HS_VALISNULL(entries,i))
+		if (HS_VALISNULL(entries, i))
 		{
 			d[i] = (Datum) 0;
 			nulls[i] = true;
 		}
 		else
 		{
-			text *item = cstring_to_text_with_len(HS_VAL(entries,base,i),
-												  HS_VALLEN(entries,i));
+			text	   *item = cstring_to_text_with_len(HS_VAL(entries, base, i),
+													  HS_VALLEN(entries, i));
+
 			d[i] = PointerGetDatum(item);
 			nulls[i] = false;
 		}
 	}
 
 	a = construct_md_array(d, nulls, 1, &count, &lb,
-						   TEXTOID, -1, false,	'i');
+						   TEXTOID, -1, false, 'i');
 
 	PG_RETURN_POINTER(a);
 }
@@ -764,12 +772,12 @@ hstore_to_array_internal(HStore *hs, int ndims)
 {
 	HEntry	   *entries = ARRPTR(hs);
 	char	   *base = STRPTR(hs);
-	int         count = HS_COUNT(hs);
-	int         out_size[2] = { 0, 2 };
-	int         lb[2] = { 1, 1 };
+	int			count = HS_COUNT(hs);
+	int			out_size[2] = {0, 2};
+	int			lb[2] = {1, 1};
 	Datum	   *out_datums;
 	bool	   *out_nulls;
-	int         i;
+	int			i;
 
 	Assert(ndims < 3);
 
@@ -782,22 +790,24 @@ hstore_to_array_internal(HStore *hs, int ndims)
 
 	for (i = 0; i < count; ++i)
 	{
-		text *key = cstring_to_text_with_len(HS_KEY(entries,base,i),
-											 HS_KEYLEN(entries,i));
-		out_datums[i*2] = PointerGetDatum(key);
-		out_nulls[i*2] = false;
+		text	   *key = cstring_to_text_with_len(HS_KEY(entries, base, i),
+												   HS_KEYLEN(entries, i));
+
+		out_datums[i * 2] = PointerGetDatum(key);
+		out_nulls[i * 2] = false;
 
-		if (HS_VALISNULL(entries,i))
+		if (HS_VALISNULL(entries, i))
 		{
-			out_datums[i*2+1] = (Datum) 0;
-			out_nulls[i*2+1] = true;
+			out_datums[i * 2 + 1] = (Datum) 0;
+			out_nulls[i * 2 + 1] = true;
 		}
 		else
 		{
-			text *item = cstring_to_text_with_len(HS_VAL(entries,base,i),
-												  HS_VALLEN(entries,i));
-			out_datums[i*2+1] = PointerGetDatum(item);
-			out_nulls[i*2+1] = false;
+			text	   *item = cstring_to_text_with_len(HS_VAL(entries, base, i),
+													  HS_VALLEN(entries, i));
+
+			out_datums[i * 2 + 1] = PointerGetDatum(item);
+			out_nulls[i * 2 + 1] = false;
 		}
 	}
 
@@ -811,7 +821,7 @@ Datum		hstore_to_array(PG_FUNCTION_ARGS);
 Datum
 hstore_to_array(PG_FUNCTION_ARGS)
 {
-	HStore     *hs = PG_GETARG_HS(0);
+	HStore	   *hs = PG_GETARG_HS(0);
 	ArrayType  *out = hstore_to_array_internal(hs, 1);
 
 	PG_RETURN_POINTER(out);
@@ -822,7 +832,7 @@ Datum		hstore_to_matrix(PG_FUNCTION_ARGS);
 Datum
 hstore_to_matrix(PG_FUNCTION_ARGS)
 {
-	HStore     *hs = PG_GETARG_HS(0);
+	HStore	   *hs = PG_GETARG_HS(0);
 	ArrayType  *out = hstore_to_array_internal(hs, 2);
 
 	PG_RETURN_POINTER(out);
@@ -838,11 +848,11 @@ hstore_to_matrix(PG_FUNCTION_ARGS)
  */
 
 static void
-setup_firstcall(FuncCallContext *funcctx, HStore * hs,
+setup_firstcall(FuncCallContext *funcctx, HStore *hs,
 				FunctionCallInfoData *fcinfo)
 {
 	MemoryContext oldcontext;
-	HStore     *st;
+	HStore	   *st;
 
 	oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
 
@@ -858,7 +868,7 @@ setup_firstcall(FuncCallContext *funcctx, HStore * hs,
 		/* Build a tuple descriptor for our result type */
 		if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
 			elog(ERROR, "return type must be a row type");
-		
+
 		funcctx->tuple_desc = BlessTupleDesc(tupdesc);
 	}
 
@@ -872,8 +882,8 @@ Datum
 hstore_skeys(PG_FUNCTION_ARGS)
 {
 	FuncCallContext *funcctx;
-	HStore    *hs;
-	int        i;
+	HStore	   *hs;
+	int			i;
 
 	if (SRF_IS_FIRSTCALL())
 	{
@@ -888,11 +898,11 @@ hstore_skeys(PG_FUNCTION_ARGS)
 
 	if (i < HS_COUNT(hs))
 	{
-		HEntry     *entries = ARRPTR(hs);
+		HEntry	   *entries = ARRPTR(hs);
 		text	   *item;
 
-		item = cstring_to_text_with_len(HS_KEY(entries,STRPTR(hs),i),
-										HS_KEYLEN(entries,i));
+		item = cstring_to_text_with_len(HS_KEY(entries, STRPTR(hs), i),
+										HS_KEYLEN(entries, i));
 
 		SRF_RETURN_NEXT(funcctx, PointerGetDatum(item));
 	}
@@ -907,8 +917,8 @@ Datum
 hstore_svals(PG_FUNCTION_ARGS)
 {
 	FuncCallContext *funcctx;
-	HStore    *hs;
-	int        i;
+	HStore	   *hs;
+	int			i;
 
 	if (SRF_IS_FIRSTCALL())
 	{
@@ -923,9 +933,9 @@ hstore_svals(PG_FUNCTION_ARGS)
 
 	if (i < HS_COUNT(hs))
 	{
-		HEntry     *entries = ARRPTR(hs);
+		HEntry	   *entries = ARRPTR(hs);
 
-		if (HS_VALISNULL(entries,i))
+		if (HS_VALISNULL(entries, i))
 		{
 			ReturnSetInfo *rsi;
 
@@ -939,8 +949,8 @@ hstore_svals(PG_FUNCTION_ARGS)
 		{
 			text	   *item;
 
-			item = cstring_to_text_with_len(HS_VAL(entries,STRPTR(hs),i),
-											HS_VALLEN(entries,i));
+			item = cstring_to_text_with_len(HS_VAL(entries, STRPTR(hs), i),
+											HS_VALLEN(entries, i));
 
 			SRF_RETURN_NEXT(funcctx, PointerGetDatum(item));
 		}
@@ -962,31 +972,31 @@ hstore_contains(PG_FUNCTION_ARGS)
 	char	   *tstr = STRPTR(tmpl);
 	HEntry	   *ve = ARRPTR(val);
 	char	   *vstr = STRPTR(val);
-	int         tcount = HS_COUNT(tmpl);
-	int         lastidx = 0;
-	int         i;
+	int			tcount = HS_COUNT(tmpl);
+	int			lastidx = 0;
+	int			i;
 
 	/*
-	 * we exploit the fact that keys in "tmpl" are in strictly
-	 * increasing order to narrow the hstoreFindKey search; each search
-	 * can start one entry past the previous "found" entry, or at the
-	 * lower bound of the search
+	 * we exploit the fact that keys in "tmpl" are in strictly increasing
+	 * order to narrow the hstoreFindKey search; each search can start one
+	 * entry past the previous "found" entry, or at the lower bound of the
+	 * search
 	 */
 
 	for (i = 0; res && i < tcount; ++i)
 	{
-		int idx = hstoreFindKey(val, &lastidx,
-								HS_KEY(te,tstr,i), HS_KEYLEN(te,i));
+		int			idx = hstoreFindKey(val, &lastidx,
+									  HS_KEY(te, tstr, i), HS_KEYLEN(te, i));
 
 		if (idx >= 0)
 		{
-			bool nullval = HS_VALISNULL(te,i);
-			int  vallen = HS_VALLEN(te,i);
+			bool		nullval = HS_VALISNULL(te, i);
+			int			vallen = HS_VALLEN(te, i);
 
-			if (nullval != HS_VALISNULL(ve,idx)
+			if (nullval != HS_VALISNULL(ve, idx)
 				|| (!nullval
-					&& (vallen != HS_VALLEN(ve,idx)
-						|| strncmp(HS_VAL(te,tstr,i), HS_VAL(ve,vstr,idx), vallen))))
+					&& (vallen != HS_VALLEN(ve, idx)
+			|| strncmp(HS_VAL(te, tstr, i), HS_VAL(ve, vstr, idx), vallen))))
 				res = false;
 		}
 		else
@@ -1015,8 +1025,8 @@ Datum
 hstore_each(PG_FUNCTION_ARGS)
 {
 	FuncCallContext *funcctx;
-	HStore     *hs;
-	int         i;
+	HStore	   *hs;
+	int			i;
 
 	if (SRF_IS_FIRSTCALL())
 	{
@@ -1032,26 +1042,26 @@ hstore_each(PG_FUNCTION_ARGS)
 	if (i < HS_COUNT(hs))
 	{
 		HEntry	   *entries = ARRPTR(hs);
-		char       *ptr = STRPTR(hs);
+		char	   *ptr = STRPTR(hs);
 		Datum		res,
 					dvalues[2];
 		bool		nulls[2] = {false, false};
 		text	   *item;
 		HeapTuple	tuple;
 
-		item = cstring_to_text_with_len(HS_KEY(entries,ptr,i),
-										HS_KEYLEN(entries,i));
+		item = cstring_to_text_with_len(HS_KEY(entries, ptr, i),
+										HS_KEYLEN(entries, i));
 		dvalues[0] = PointerGetDatum(item);
 
-		if (HS_VALISNULL(entries,i))
+		if (HS_VALISNULL(entries, i))
 		{
 			dvalues[1] = (Datum) 0;
 			nulls[1] = true;
 		}
 		else
 		{
-			item = cstring_to_text_with_len(HS_VAL(entries,ptr,i),
-											HS_VALLEN(entries,i));
+			item = cstring_to_text_with_len(HS_VAL(entries, ptr, i),
+											HS_VALLEN(entries, i));
 			dvalues[1] = PointerGetDatum(item);
 		}
 
@@ -1078,15 +1088,15 @@ hstore_cmp(PG_FUNCTION_ARGS)
 {
 	HStore	   *hs1 = PG_GETARG_HS(0);
 	HStore	   *hs2 = PG_GETARG_HS(1);
-	int         hcount1 = HS_COUNT(hs1);
-	int         hcount2 = HS_COUNT(hs2);
-	int         res = 0;
+	int			hcount1 = HS_COUNT(hs1);
+	int			hcount2 = HS_COUNT(hs2);
+	int			res = 0;
 
 	if (hcount1 == 0 || hcount2 == 0)
 	{
 		/*
-		 * if either operand is empty, and the other is nonempty, the
-		 * nonempty one is larger. If both are empty they are equal.
+		 * if either operand is empty, and the other is nonempty, the nonempty
+		 * one is larger. If both are empty they are equal.
 		 */
 		if (hcount1 > 0)
 			res = 1;
@@ -1096,14 +1106,14 @@ hstore_cmp(PG_FUNCTION_ARGS)
 	else
 	{
 		/* here we know both operands are nonempty */
-		char       *str1 = STRPTR(hs1);
-		char       *str2 = STRPTR(hs2);
-		HEntry     *ent1 = ARRPTR(hs1);
-		HEntry     *ent2 = ARRPTR(hs2);
-		size_t      len1 = HSE_ENDPOS(ent1[2*hcount1 - 1]);
-		size_t      len2 = HSE_ENDPOS(ent2[2*hcount2 - 1]);
+		char	   *str1 = STRPTR(hs1);
+		char	   *str2 = STRPTR(hs2);
+		HEntry	   *ent1 = ARRPTR(hs1);
+		HEntry	   *ent2 = ARRPTR(hs2);
+		size_t		len1 = HSE_ENDPOS(ent1[2 * hcount1 - 1]);
+		size_t		len2 = HSE_ENDPOS(ent2[2 * hcount2 - 1]);
 
-		res = memcmp(str1, str2, Min(len1,len2));
+		res = memcmp(str1, str2, Min(len1, len2));
 
 		if (res == 0)
 		{
@@ -1117,8 +1127,8 @@ hstore_cmp(PG_FUNCTION_ARGS)
 				res = -1;
 			else
 			{
-				int count = hcount1 * 2;
-				int i;
+				int			count = hcount1 * 2;
+				int			i;
 
 				for (i = 0; i < count; ++i)
 					if (HSE_ENDPOS(ent1[i]) != HSE_ENDPOS(ent2[i]) ||
@@ -1144,11 +1154,11 @@ hstore_cmp(PG_FUNCTION_ARGS)
 	}
 
 	/*
-	 * this is a btree support function; this is one of the few
-	 * places where memory needs to be explicitly freed.
+	 * this is a btree support function; this is one of the few places where
+	 * memory needs to be explicitly freed.
 	 */
-	PG_FREE_IF_COPY(hs1,0);
-	PG_FREE_IF_COPY(hs2,1);
+	PG_FREE_IF_COPY(hs1, 0);
+	PG_FREE_IF_COPY(hs2, 1);
 	PG_RETURN_INT32(res);
 }
 
@@ -1158,9 +1168,10 @@ Datum		hstore_eq(PG_FUNCTION_ARGS);
 Datum
 hstore_eq(PG_FUNCTION_ARGS)
 {
-	int     res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
-													PG_GETARG_DATUM(0),
-													PG_GETARG_DATUM(1)));
+	int			res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+														PG_GETARG_DATUM(0),
+														PG_GETARG_DATUM(1)));
+
 	PG_RETURN_BOOL(res == 0);
 }
 
@@ -1169,9 +1180,10 @@ Datum		hstore_ne(PG_FUNCTION_ARGS);
 Datum
 hstore_ne(PG_FUNCTION_ARGS)
 {
-	int     res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
-													PG_GETARG_DATUM(0),
-													PG_GETARG_DATUM(1)));
+	int			res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+														PG_GETARG_DATUM(0),
+														PG_GETARG_DATUM(1)));
+
 	PG_RETURN_BOOL(res != 0);
 }
 
@@ -1180,9 +1192,10 @@ Datum		hstore_gt(PG_FUNCTION_ARGS);
 Datum
 hstore_gt(PG_FUNCTION_ARGS)
 {
-	int     res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
-													PG_GETARG_DATUM(0),
-													PG_GETARG_DATUM(1)));
+	int			res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+														PG_GETARG_DATUM(0),
+														PG_GETARG_DATUM(1)));
+
 	PG_RETURN_BOOL(res > 0);
 }
 
@@ -1191,9 +1204,10 @@ Datum		hstore_ge(PG_FUNCTION_ARGS);
 Datum
 hstore_ge(PG_FUNCTION_ARGS)
 {
-	int     res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
-													PG_GETARG_DATUM(0),
-													PG_GETARG_DATUM(1)));
+	int			res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+														PG_GETARG_DATUM(0),
+														PG_GETARG_DATUM(1)));
+
 	PG_RETURN_BOOL(res >= 0);
 }
 
@@ -1202,9 +1216,10 @@ Datum		hstore_lt(PG_FUNCTION_ARGS);
 Datum
 hstore_lt(PG_FUNCTION_ARGS)
 {
-	int     res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
-													PG_GETARG_DATUM(0),
-													PG_GETARG_DATUM(1)));
+	int			res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+														PG_GETARG_DATUM(0),
+														PG_GETARG_DATUM(1)));
+
 	PG_RETURN_BOOL(res < 0);
 }
 
@@ -1213,9 +1228,10 @@ Datum		hstore_le(PG_FUNCTION_ARGS);
 Datum
 hstore_le(PG_FUNCTION_ARGS)
 {
-	int     res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
-													PG_GETARG_DATUM(0),
-													PG_GETARG_DATUM(1)));
+	int			res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+														PG_GETARG_DATUM(0),
+														PG_GETARG_DATUM(1)));
+
 	PG_RETURN_BOOL(res <= 0);
 }
 
@@ -1226,21 +1242,20 @@ Datum
 hstore_hash(PG_FUNCTION_ARGS)
 {
 	HStore	   *hs = PG_GETARG_HS(0);
-	Datum       hval = hash_any((unsigned char *)VARDATA(hs),
+	Datum		hval = hash_any((unsigned char *) VARDATA(hs),
 								VARSIZE(hs) - VARHDRSZ);
 
 	/*
-	 * this is the only place in the code that cares whether the
-	 * overall varlena size exactly matches the true data size;
-	 * this assertion should be maintained by all the other code,
-	 * but we make it explicit here.
+	 * this is the only place in the code that cares whether the overall
+	 * varlena size exactly matches the true data size; this assertion should
+	 * be maintained by all the other code, but we make it explicit here.
 	 */
 	Assert(VARSIZE(hs) ==
 		   (HS_COUNT(hs) != 0 ?
 			CALCDATASIZE(HS_COUNT(hs),
-						 HSE_ENDPOS(ARRPTR(hs)[2*HS_COUNT(hs) - 1])) :
+						 HSE_ENDPOS(ARRPTR(hs)[2 * HS_COUNT(hs) - 1])) :
 			HSHRDSIZE));
 
-	PG_FREE_IF_COPY(hs,0);
+	PG_FREE_IF_COPY(hs, 0);
 	PG_RETURN_DATUM(hval);
 }
diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c
index 11cd53a3907fc8526cfd4531adc442ce6abd07df..dac760b1114846fbdc513f6effe1bf0792ccf839 100644
--- a/contrib/isn/isn.c
+++ b/contrib/isn/isn.c
@@ -3,11 +3,11 @@
  * isn.c
  *	  PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC)
  *
- * Author:  German Mendez Bravo (Kronuz)
+ * Author:	German Mendez Bravo (Kronuz)
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/contrib/isn/isn.c,v 1.13 2010/02/05 04:34:51 momjian Exp $
+ *	  $PostgreSQL: pgsql/contrib/isn/isn.c,v 1.14 2010/02/26 02:00:32 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
diff --git a/contrib/isn/isn.h b/contrib/isn/isn.h
index 628fdab91b967d24049daba355ddb356439af8d4..fdc72d9b53949cba62b2da14db6f1f4478db5aec 100644
--- a/contrib/isn/isn.h
+++ b/contrib/isn/isn.h
@@ -3,11 +3,11 @@
  * isn.h
  *	  PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC)
  *
- * Author:  German Mendez Bravo (Kronuz)
+ * Author:	German Mendez Bravo (Kronuz)
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/contrib/isn/isn.h,v 1.9 2010/02/05 04:34:51 momjian Exp $
+ *	  $PostgreSQL: pgsql/contrib/isn/isn.h,v 1.10 2010/02/26 02:00:32 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
diff --git a/contrib/oid2name/oid2name.c b/contrib/oid2name/oid2name.c
index 52d6fafeaf4a0def133db9705ed323e858ac6b42..ff824278aacaff92f6b09d93176e62db33b1170e 100644
--- a/contrib/oid2name/oid2name.c
+++ b/contrib/oid2name/oid2name.c
@@ -5,7 +5,7 @@
  * Originally by
  * B. Palmer, bpalmer@crimelabs.net 1-17-2001
  *
- * $PostgreSQL: pgsql/contrib/oid2name/oid2name.c,v 1.37 2010/02/07 20:48:08 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/oid2name/oid2name.c,v 1.38 2010/02/26 02:00:32 momjian Exp $
  */
 #include "postgres_fe.h"
 
@@ -440,7 +440,7 @@ sql_exec_dumpalldbs(PGconn *conn, struct options * opts)
 	/* get the oid and database name from the system pg_database table */
 	snprintf(todo, sizeof(todo),
 			 "SELECT d.oid AS \"Oid\", datname AS \"Database Name\", "
-	  "spcname AS \"Tablespace\" FROM pg_catalog.pg_database d JOIN pg_catalog.pg_tablespace t ON "
+			 "spcname AS \"Tablespace\" FROM pg_catalog.pg_database d JOIN pg_catalog.pg_tablespace t ON "
 			 "(dattablespace = t.oid) ORDER BY 2");
 
 	sql_exec(conn, todo, opts->quiet);
@@ -456,10 +456,10 @@ sql_exec_dumpalltables(PGconn *conn, struct options * opts)
 	char	   *addfields = ",c.oid AS \"Oid\", nspname AS \"Schema\", spcname as \"Tablespace\" ";
 
 	snprintf(todo, sizeof(todo),
-		  "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s "
+			 "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s "
 			 "FROM pg_class c "
 		   "	LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace "
-	"	LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),"
+			 "	LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),"
 			 "	pg_catalog.pg_tablespace t "
 			 "WHERE relkind IN ('r'%s%s) AND "
 			 "	%s"
@@ -527,7 +527,7 @@ sql_exec_searchtables(PGconn *conn, struct options * opts)
 	/* now build the query */
 	todo = (char *) myalloc(650 + strlen(qualifiers));
 	snprintf(todo, 650 + strlen(qualifiers),
-		 "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
+			 "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
 			 "FROM pg_catalog.pg_class c \n"
 		 "	LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
 			 "	LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
diff --git a/contrib/passwordcheck/passwordcheck.c b/contrib/passwordcheck/passwordcheck.c
index b18b161227c68cff056122ae748971424fb3a5ec..adf417769e7dec676e1b9e9993014396007d9251 100644
--- a/contrib/passwordcheck/passwordcheck.c
+++ b/contrib/passwordcheck/passwordcheck.c
@@ -8,7 +8,7 @@
  * Author: Laurenz Albe <laurenz.albe@wien.gv.at>
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/contrib/passwordcheck/passwordcheck.c,v 1.2 2010/01/02 16:57:32 momjian Exp $
+ *	  $PostgreSQL: pgsql/contrib/passwordcheck/passwordcheck.c,v 1.3 2010/02/26 02:00:32 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -66,12 +66,12 @@ check_password(const char *username,
 	switch (password_type)
 	{
 		case PASSWORD_TYPE_MD5:
+
 			/*
-			 * Unfortunately we cannot perform exhaustive checks on
-			 * encrypted passwords - we are restricted to guessing.
-			 * (Alternatively, we could insist on the password being
-			 * presented non-encrypted, but that has its own security
-			 * disadvantages.)
+			 * Unfortunately we cannot perform exhaustive checks on encrypted
+			 * passwords - we are restricted to guessing. (Alternatively, we
+			 * could insist on the password being presented non-encrypted, but
+			 * that has its own security disadvantages.)
 			 *
 			 * We only check for username = password.
 			 */
@@ -84,6 +84,7 @@ check_password(const char *username,
 			break;
 
 		case PASSWORD_TYPE_PLAINTEXT:
+
 			/*
 			 * For unencrypted passwords we can perform better checks
 			 */
@@ -106,8 +107,8 @@ check_password(const char *username,
 			for (i = 0; i < pwdlen; i++)
 			{
 				/*
-				 * isalpha() does not work for multibyte encodings
-				 * but let's consider non-ASCII characters non-letters
+				 * isalpha() does not work for multibyte encodings but let's
+				 * consider non-ASCII characters non-letters
 				 */
 				if (isalpha((unsigned char) password[i]))
 					pwd_has_letter = true;
@@ -117,7 +118,7 @@ check_password(const char *username,
 			if (!pwd_has_letter || !pwd_has_nonletter)
 				ereport(ERROR,
 						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-						 errmsg("password must contain both letters and nonletters")));
+				errmsg("password must contain both letters and nonletters")));
 
 #ifdef USE_CRACKLIB
 			/* call cracklib to check password */
diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c
index 7df15a978acabe15a646ce1cd00c2e225b62da88..d1a0d60a2c2e40ced1614f988bbfd8ca04b37572 100644
--- a/contrib/pg_standby/pg_standby.c
+++ b/contrib/pg_standby/pg_standby.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/contrib/pg_standby/pg_standby.c,v 1.27 2009/11/04 12:51:30 heikki Exp $
+ * $PostgreSQL: pgsql/contrib/pg_standby/pg_standby.c,v 1.28 2010/02/26 02:00:32 momjian Exp $
  *
  *
  * pg_standby.c
@@ -576,6 +576,7 @@ main(int argc, char **argv)
 	}
 
 #ifndef WIN32
+
 	/*
 	 * You can send SIGUSR1 to trigger failover.
 	 *
@@ -614,9 +615,10 @@ main(int argc, char **argv)
 				}
 				break;
 			case 'l':			/* Use link */
+
 				/*
-				 * Link feature disabled, possibly permanently. Linking
-				 * causes a problem after recovery ends that is not currently
+				 * Link feature disabled, possibly permanently. Linking causes
+				 * a problem after recovery ends that is not currently
 				 * resolved by PostgreSQL. 25 Jun 2009
 				 */
 #ifdef NOT_USED
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 11dfb6280ad25f98d52a6a3586863a312b8cbf86..8fa249e9b8b2f25e2d9a5b1bc5e201f56a196d92 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -14,7 +14,7 @@
  * Copyright (c) 2008-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/contrib/pg_stat_statements/pg_stat_statements.c,v 1.12 2010/01/08 00:38:19 itagaki Exp $
+ *	  $PostgreSQL: pgsql/contrib/pg_stat_statements/pg_stat_statements.c,v 1.13 2010/02/26 02:00:32 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -76,18 +76,18 @@ typedef struct pgssHashKey
  */
 typedef struct Counters
 {
-	int64		calls;				/* # of times executed */
-	double		total_time;			/* total execution time in seconds */
-	int64		rows;				/* total # of retrieved or affected rows */
+	int64		calls;			/* # of times executed */
+	double		total_time;		/* total execution time in seconds */
+	int64		rows;			/* total # of retrieved or affected rows */
 	int64		shared_blks_hit;	/* # of shared buffer hits */
-	int64		shared_blks_read;	/* # of shared disk blocks read */
-	int64		shared_blks_written;/* # of shared disk blocks written */
-	int64		local_blks_hit;		/* # of local buffer hits */
+	int64		shared_blks_read;		/* # of shared disk blocks read */
+	int64		shared_blks_written;	/* # of shared disk blocks written */
+	int64		local_blks_hit; /* # of local buffer hits */
 	int64		local_blks_read;	/* # of local disk blocks read */
-	int64		local_blks_written;	/* # of local disk blocks written */
-	int64		temp_blks_read;		/* # of temp blocks read */
-	int64		temp_blks_written;	/* # of temp blocks written */
-	double		usage;				/* usage factor */
+	int64		local_blks_written;		/* # of local disk blocks written */
+	int64		temp_blks_read; /* # of temp blocks read */
+	int64		temp_blks_written;		/* # of temp blocks written */
+	double		usage;			/* usage factor */
 } Counters;
 
 /*
@@ -148,7 +148,7 @@ static const struct config_enum_entry track_options[] =
 
 static int	pgss_max;			/* max # statements to track */
 static int	pgss_track;			/* tracking level */
-static bool pgss_track_utility;	/* whether to track utility commands */
+static bool pgss_track_utility; /* whether to track utility commands */
 static bool pgss_save;			/* whether to save stats across shutdown */
 
 
@@ -175,12 +175,12 @@ static void pgss_ExecutorRun(QueryDesc *queryDesc,
 				 long count);
 static void pgss_ExecutorEnd(QueryDesc *queryDesc);
 static void pgss_ProcessUtility(Node *parsetree,
-			   const char *queryString, ParamListInfo params, bool isTopLevel,
-			   DestReceiver *dest, char *completionTag);
+			  const char *queryString, ParamListInfo params, bool isTopLevel,
+					DestReceiver *dest, char *completionTag);
 static uint32 pgss_hash_fn(const void *key, Size keysize);
 static int	pgss_match_fn(const void *key1, const void *key2, Size keysize);
 static void pgss_store(const char *query, double total_time, uint64 rows,
-					   const BufferUsage *bufusage);
+		   const BufferUsage *bufusage);
 static Size pgss_memsize(void);
 static pgssEntry *entry_alloc(pgssHashKey *key);
 static void entry_dealloc(void);
@@ -231,7 +231,7 @@ _PG_init(void)
 							 NULL);
 
 	DefineCustomBoolVariable("pg_stat_statements.track_utility",
-			   "Selects whether utility commands are tracked by pg_stat_statements.",
+	   "Selects whether utility commands are tracked by pg_stat_statements.",
 							 NULL,
 							 &pgss_track_utility,
 							 true,
@@ -356,8 +356,8 @@ pgss_shmem_startup(void)
 		on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
 
 	/*
-	 * Attempt to load old statistics from the dump file, if this is the
-	 * first time through and we weren't told not to.
+	 * Attempt to load old statistics from the dump file, if this is the first
+	 * time through and we weren't told not to.
 	 */
 	if (found || !pgss_save)
 		return;
@@ -592,7 +592,7 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
 		instr_time	start;
 		instr_time	duration;
 		uint64		rows = 0;
-		BufferUsage	bufusage;
+		BufferUsage bufusage;
 
 		bufusage = pgBufferUsage;
 		INSTR_TIME_SET_CURRENT(start);
diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c
index b38086490a45a4e5f26640fd8e1ebec365584b06..b290b7477b1db005fd3f37185cff6caf198b2e57 100644
--- a/contrib/pgbench/pgbench.c
+++ b/contrib/pgbench/pgbench.c
@@ -4,7 +4,7 @@
  * A simple benchmark program for PostgreSQL
  * Originally written by Tatsuo Ishii and enhanced by many contributors.
  *
- * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.96 2010/01/06 01:30:03 itagaki Exp $
+ * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.97 2010/02/26 02:00:32 momjian Exp $
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  * ALL RIGHTS RESERVED;
  *
@@ -28,7 +28,7 @@
  */
 
 #ifdef WIN32
-#define FD_SETSIZE 1024		/* set before winsock2.h is included */
+#define FD_SETSIZE 1024			/* set before winsock2.h is included */
 #endif   /* ! WIN32 */
 
 #include "postgres_fe.h"
@@ -66,16 +66,14 @@
 
 #ifdef WIN32
 /* Use native win32 threads on Windows */
-typedef struct win32_pthread   *pthread_t;
-typedef int						pthread_attr_t;
-
-static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void * (*start_routine)(void *), void *arg);
-static int pthread_join(pthread_t th, void **thread_return);
+typedef struct win32_pthread *pthread_t;
+typedef int pthread_attr_t;
 
+static int	pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
+static int	pthread_join(pthread_t th, void **thread_return);
 #elif defined(ENABLE_THREAD_SAFETY)
 /* Use platform-dependent pthread capability */
 #include <pthread.h>
-
 #else
 /* Use emulation with fork. Rename pthread identifiers to avoid conflicts */
 
@@ -86,12 +84,11 @@ static int pthread_join(pthread_t th, void **thread_return);
 #define pthread_create			pg_pthread_create
 #define pthread_join			pg_pthread_join
 
-typedef struct fork_pthread	   *pthread_t;
-typedef int						pthread_attr_t;
-
-static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void * (*start_routine)(void *), void *arg);
-static int pthread_join(pthread_t th, void **thread_return);
+typedef struct fork_pthread *pthread_t;
+typedef int pthread_attr_t;
 
+static int	pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
+static int	pthread_join(pthread_t th, void **thread_return);
 #endif
 
 extern char *optarg;
@@ -129,7 +126,8 @@ int			fillfactor = 100;
  * end of configurable parameters
  *********************************************************************/
 
-#define nbranches	1  /* Makes little sense to change this.  Change -s instead */
+#define nbranches	1			/* Makes little sense to change this.  Change
+								 * -s instead */
 #define ntellers	10
 #define naccounts	100000
 
@@ -156,7 +154,7 @@ typedef struct
 } Variable;
 
 #define MAX_FILES		128		/* max number of SQL script files allowed */
-#define SHELL_COMMAND_SIZE	256	/* maximum size allowed for shell command */
+#define SHELL_COMMAND_SIZE	256 /* maximum size allowed for shell command */
 
 /*
  * structures used in custom query mode
@@ -185,18 +183,18 @@ typedef struct
  */
 typedef struct
 {
-	pthread_t		thread;		/* thread handle */
-	CState		   *state;		/* array of CState */
-	int				nstate;		/* length of state[] */
-	instr_time		start_time;	/* thread start time */
+	pthread_t	thread;			/* thread handle */
+	CState	   *state;			/* array of CState */
+	int			nstate;			/* length of state[] */
+	instr_time	start_time;		/* thread start time */
 } TState;
 
 #define INVALID_THREAD		((pthread_t) 0)
 
 typedef struct
 {
-	instr_time		conn_time;
-	int				xacts;
+	instr_time	conn_time;
+	int			xacts;
 } TResult;
 
 /*
@@ -224,9 +222,9 @@ typedef struct
 	char	   *argv[MAX_ARGS]; /* command list */
 } Command;
 
-static Command	  **sql_files[MAX_FILES];	/* SQL script files */
-static int			num_files;				/* number of script files */
-static int			debug = 0;				/* debug flag */
+static Command **sql_files[MAX_FILES];	/* SQL script files */
+static int	num_files;			/* number of script files */
+static int	debug = 0;			/* debug flag */
 
 /* default scenario */
 static char *tpc_b = {
@@ -271,7 +269,7 @@ static char *select_only = {
 
 /* Function prototypes */
 static void setalarm(int seconds);
-static void* threadRun(void *arg);
+static void *threadRun(void *arg);
 
 static void
 usage(const char *progname)
@@ -432,7 +430,7 @@ getVariable(CState *st, char *name)
 static bool
 isLegalVariableName(const char *name)
 {
-	int		i;
+	int			i;
 
 	for (i = 0; name[i] != '\0'; i++)
 	{
@@ -624,29 +622,28 @@ getQueryParams(CState *st, const Command *command, const char **params)
 static bool
 runShellCommand(CState *st, char *variable, char **argv, int argc)
 {
-	char	command[SHELL_COMMAND_SIZE];
-	int		i,
-			len = 0;
-	FILE   *fp;
-	char	res[64];
-	char   *endptr;
-	int		retval;
+	char		command[SHELL_COMMAND_SIZE];
+	int			i,
+				len = 0;
+	FILE	   *fp;
+	char		res[64];
+	char	   *endptr;
+	int			retval;
 
 	/*
 	 * Join arguments with whilespace separaters. Arguments starting with
-	 * exactly one colon are treated as variables:
-	 *	name - append a string "name"
-	 *	:var - append a variable named 'var'.
-	 *	::name - append a string ":name"
+	 * exactly one colon are treated as variables: name - append a string
+	 * "name" :var - append a variable named 'var'. ::name - append a string
+	 * ":name"
 	 */
 	for (i = 0; i < argc; i++)
 	{
-		char   *arg;
-		int		arglen;
+		char	   *arg;
+		int			arglen;
 
 		if (argv[i][0] != ':')
 		{
-			arg = argv[i];	/* a string literal */
+			arg = argv[i];		/* a string literal */
 		}
 		else if (argv[i][1] == ':')
 		{
@@ -732,14 +729,14 @@ preparedStatementName(char *buffer, int file, int state)
 static bool
 clientDone(CState *st, bool ok)
 {
-	(void) ok;	/* unused */
+	(void) ok;					/* unused */
 
 	if (st->con != NULL)
 	{
 		PQfinish(st->con);
 		st->con = NULL;
 	}
-	return false;	/* always false */
+	return false;				/* always false */
 }
 
 /* return false iff client should be disconnected */
@@ -811,10 +808,10 @@ top:
 			{
 				case PGRES_COMMAND_OK:
 				case PGRES_TUPLES_OK:
-					break;	/* OK */
+					break;		/* OK */
 				default:
 					fprintf(stderr, "Client %d aborted in state %d: %s",
-						st->id, st->state, PQerrorMessage(st->con));
+							st->id, st->state, PQerrorMessage(st->con));
 					PQclear(res);
 					return clientDone(st, false);
 			}
@@ -847,7 +844,8 @@ top:
 
 	if (st->con == NULL)
 	{
-		instr_time	start, end;
+		instr_time	start,
+					end;
 
 		INSTR_TIME_SET_CURRENT(start);
 		if ((st->con = doConnect()) == NULL)
@@ -1091,7 +1089,7 @@ top:
 		{
 			char	   *var;
 			int			usec;
-			instr_time now;
+			instr_time	now;
 
 			if (*argv[1] == ':')
 			{
@@ -1124,9 +1122,9 @@ top:
 		}
 		else if (pg_strcasecmp(argv[0], "setshell") == 0)
 		{
-			bool	ret = runShellCommand(st, argv[1], argv + 2, argc - 2);
+			bool		ret = runShellCommand(st, argv[1], argv + 2, argc - 2);
 
-			if (timer_exceeded)	/* timeout */
+			if (timer_exceeded) /* timeout */
 				return clientDone(st, true);
 			else if (!ret)		/* on error */
 			{
@@ -1138,9 +1136,9 @@ top:
 		}
 		else if (pg_strcasecmp(argv[0], "shell") == 0)
 		{
-			bool	ret = runShellCommand(st, NULL, argv + 1, argc - 1);
+			bool		ret = runShellCommand(st, NULL, argv + 1, argc - 1);
 
-			if (timer_exceeded)	/* timeout */
+			if (timer_exceeded) /* timeout */
 				return clientDone(st, true);
 			else if (!ret)		/* on error */
 			{
@@ -1442,7 +1440,7 @@ process_commands(char *buf)
 			 */
 			if (my_commands->argv[1][0] != ':')
 			{
-				char	*c = my_commands->argv[1];
+				char	   *c = my_commands->argv[1];
 
 				while (isdigit((unsigned char) *c))
 					c++;
@@ -1667,7 +1665,7 @@ printResults(int ttype, int normal_xacts, int nclients, int nthreads,
 	time_include = INSTR_TIME_GET_DOUBLE(total_time);
 	tps_include = normal_xacts / time_include;
 	tps_exclude = normal_xacts / (time_include -
-		(INSTR_TIME_GET_DOUBLE(conn_total_time) / nthreads));
+						(INSTR_TIME_GET_DOUBLE(conn_total_time) / nthreads));
 
 	if (ttype == 0)
 		s = "TPC-B (sort of)";
@@ -1704,8 +1702,8 @@ int
 main(int argc, char **argv)
 {
 	int			c;
-	int			nclients = 1;		/* default number of simulated clients */
-	int			nthreads = 1;		/* default number of threads */
+	int			nclients = 1;	/* default number of simulated clients */
+	int			nthreads = 1;	/* default number of threads */
 	int			is_init_mode = 0;		/* initialize mode? */
 	int			is_no_vacuum = 0;		/* no vacuum at all before testing? */
 	int			do_vacuum_accounts = 0; /* do vacuum accounts before testing? */
@@ -1826,7 +1824,7 @@ main(int argc, char **argv)
 				}
 #endif   /* HAVE_GETRLIMIT */
 				break;
-			case 'j':	/* jobs */
+			case 'j':			/* jobs */
 				nthreads = atoi(optarg);
 				if (nthreads <= 0)
 				{
@@ -2120,7 +2118,8 @@ main(int argc, char **argv)
 		/* the first thread (i = 0) is executed by main thread */
 		if (i > 0)
 		{
-			int err = pthread_create(&threads[i].thread, NULL, threadRun, &threads[i]);
+			int			err = pthread_create(&threads[i].thread, NULL, threadRun, &threads[i]);
+
 			if (err != 0 || threads[i].thread == INVALID_THREAD)
 			{
 				fprintf(stderr, "cannot create thread: %s\n", strerror(err));
@@ -2138,7 +2137,7 @@ main(int argc, char **argv)
 	INSTR_TIME_SET_ZERO(conn_total_time);
 	for (i = 0; i < nthreads; i++)
 	{
-		void *ret = NULL;
+		void	   *ret = NULL;
 
 		if (threads[i].thread == INVALID_THREAD)
 			ret = threadRun(&threads[i]);
@@ -2147,7 +2146,8 @@ main(int argc, char **argv)
 
 		if (ret != NULL)
 		{
-			TResult *r = (TResult *) ret;
+			TResult    *r = (TResult *) ret;
+
 			total_xacts += r->xacts;
 			INSTR_TIME_ADD(conn_total_time, r->conn_time);
 			free(ret);
@@ -2170,10 +2170,11 @@ threadRun(void *arg)
 {
 	TState	   *thread = (TState *) arg;
 	CState	   *state = thread->state;
-	TResult	   *result;
-	instr_time	start, end;
+	TResult    *result;
+	instr_time	start,
+				end;
 	int			nstate = thread->nstate;
-	int			remains = nstate;	/* number of remaining clients */
+	int			remains = nstate;		/* number of remaining clients */
 	int			i;
 
 	result = malloc(sizeof(TResult));
@@ -2202,7 +2203,7 @@ threadRun(void *arg)
 
 		st->use_file = getrand(0, num_files - 1);
 		if (!doCustom(st, &result->conn_time))
-			remains--;		/* I've aborted */
+			remains--;			/* I've aborted */
 
 		if (st->ecnt > prev_ecnt && commands[st->state]->type == META_COMMAND)
 		{
@@ -2215,10 +2216,10 @@ threadRun(void *arg)
 
 	while (remains > 0)
 	{
-		fd_set			input_mask;
-		int				maxsock;		/* max socket number to be waited */
-		int64			now_usec = 0;
-		int64			min_usec;
+		fd_set		input_mask;
+		int			maxsock;	/* max socket number to be waited */
+		int64		now_usec = 0;
+		int64		min_usec;
 
 		FD_ZERO(&input_mask);
 
@@ -2237,6 +2238,7 @@ threadRun(void *arg)
 				if (min_usec == INT64_MAX)
 				{
 					instr_time	now;
+
 					INSTR_TIME_SET_CURRENT(now);
 					now_usec = INSTR_TIME_GET_MICROSEC(now);
 				}
@@ -2262,18 +2264,20 @@ threadRun(void *arg)
 				goto done;
 			}
 
-			FD_SET(sock, &input_mask);
+			FD_SET		(sock, &input_mask);
+
 			if (maxsock < sock)
 				maxsock = sock;
 		}
 
 		if (min_usec > 0 && maxsock != -1)
 		{
-			int		nsocks;			/* return from select(2) */
+			int			nsocks; /* return from select(2) */
 
 			if (min_usec != INT64_MAX)
 			{
-				struct timeval	timeout;
+				struct timeval timeout;
+
 				timeout.tv_sec = min_usec / 1000000;
 				timeout.tv_usec = min_usec % 1000000;
 				nsocks = select(maxsock + 1, &input_mask, NULL, NULL, &timeout);
@@ -2298,10 +2302,10 @@ threadRun(void *arg)
 			int			prev_ecnt = st->ecnt;
 
 			if (st->con && (FD_ISSET(PQsocket(st->con), &input_mask)
-						  || commands[st->state]->type == META_COMMAND))
+							|| commands[st->state]->type == META_COMMAND))
 			{
 				if (!doCustom(st, &result->conn_time))
-					remains--;		/* I've aborted */
+					remains--;	/* I've aborted */
 			}
 
 			if (st->ecnt > prev_ecnt && commands[st->state]->type == META_COMMAND)
@@ -2353,30 +2357,30 @@ setalarm(int seconds)
 
 typedef struct fork_pthread
 {
-	pid_t	pid;
-	int		pipes[2];
-} fork_pthread;
+	pid_t		pid;
+	int			pipes[2];
+}	fork_pthread;
 
 static int
 pthread_create(pthread_t *thread,
 			   pthread_attr_t *attr,
-			   void * (*start_routine)(void *),
+			   void *(*start_routine) (void *),
 			   void *arg)
 {
-	fork_pthread   *th;
-	void		   *ret;
-	instr_time		start_time;
+	fork_pthread *th;
+	void	   *ret;
+	instr_time	start_time;
 
 	th = (fork_pthread *) malloc(sizeof(fork_pthread));
 	pipe(th->pipes);
 
 	th->pid = fork();
-	if (th->pid == -1)	/* error */
+	if (th->pid == -1)			/* error */
 	{
 		free(th);
 		return errno;
 	}
-	if (th->pid != 0)	/* in parent process */
+	if (th->pid != 0)			/* in parent process */
 	{
 		close(th->pipes[1]);
 		*thread = th;
@@ -2391,11 +2395,11 @@ pthread_create(pthread_t *thread,
 		setalarm(duration);
 
 	/*
-	 * Set a different random seed in each child process.  Otherwise they
-	 * all inherit the parent's state and generate the same "random"
-	 * sequence.  (In the threaded case, the different threads will obtain
-	 * subsets of the output of a single random() sequence, which should be
-	 * okay for our purposes.)
+	 * Set a different random seed in each child process.  Otherwise they all
+	 * inherit the parent's state and generate the same "random" sequence.
+	 * (In the threaded case, the different threads will obtain subsets of the
+	 * output of a single random() sequence, which should be okay for our
+	 * purposes.)
 	 */
 	INSTR_TIME_SET_CURRENT(start_time);
 	srandom(((unsigned int) INSTR_TIME_GET_MICROSEC(start_time)) +
@@ -2411,7 +2415,7 @@ pthread_create(pthread_t *thread,
 static int
 pthread_join(pthread_t th, void **thread_return)
 {
-	int		status;
+	int			status;
 
 	while (waitpid(th->pid, &status, 0) != th->pid)
 	{
@@ -2434,9 +2438,7 @@ pthread_join(pthread_t th, void **thread_return)
 	free(th);
 	return 0;
 }
-
 #endif
-
 #else							/* WIN32 */
 
 static VOID CALLBACK
@@ -2468,7 +2470,7 @@ setalarm(int seconds)
 typedef struct win32_pthread
 {
 	HANDLE		handle;
-	void	   *(*routine)(void *);
+	void	   *(*routine) (void *);
 	void	   *arg;
 	void	   *result;
 } win32_pthread;
@@ -2486,11 +2488,11 @@ win32_pthread_run(void *arg)
 static int
 pthread_create(pthread_t *thread,
 			   pthread_attr_t *attr,
-			   void * (*start_routine)(void *),
+			   void *(*start_routine) (void *),
 			   void *arg)
 {
-	int				save_errno;
-	win32_pthread   *th;
+	int			save_errno;
+	win32_pthread *th;
 
 	th = (win32_pthread *) malloc(sizeof(win32_pthread));
 	th->routine = start_routine;
diff --git a/contrib/unaccent/unaccent.c b/contrib/unaccent/unaccent.c
index 99a2ed50dc2bd74468130ea016e84edd40531349..8e012ac17257572c1bef3a1b4b5e552399629d94 100644
--- a/contrib/unaccent/unaccent.c
+++ b/contrib/unaccent/unaccent.c
@@ -1,12 +1,12 @@
 /*-------------------------------------------------------------------------
  *
  * unaccent.c
- *    Text search unaccent dictionary
+ *	  Text search unaccent dictionary
  *
  * Copyright (c) 2009-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *    $PostgreSQL: pgsql/contrib/unaccent/unaccent.c,v 1.4 2010/01/02 16:57:33 momjian Exp $
+ *	  $PostgreSQL: pgsql/contrib/unaccent/unaccent.c,v 1.5 2010/02/26 02:00:32 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -25,26 +25,27 @@
 PG_MODULE_MAGIC;
 
 /*
- * Unaccent dictionary uses uncompressed suffix tree to find a 
- * character to replace. Each node of tree is an array of 
+ * Unaccent dictionary uses uncompressed suffix tree to find a
+ * character to replace. Each node of tree is an array of
  * SuffixChar struct with length = 256 (n-th element of array
  * corresponds to byte)
  */
-typedef struct SuffixChar {
-	struct SuffixChar	*nextChar;
-	char				*replaceTo;
-	int					replacelen;
+typedef struct SuffixChar
+{
+	struct SuffixChar *nextChar;
+	char	   *replaceTo;
+	int			replacelen;
 } SuffixChar;
 
 /*
  * placeChar - put str into tree's structure, byte by byte.
  */
-static SuffixChar*
+static SuffixChar *
 placeChar(SuffixChar *node, unsigned char *str, int lenstr, char *replaceTo, int replacelen)
 {
-	SuffixChar	*curnode;
+	SuffixChar *curnode;
 
-	if ( !node )
+	if (!node)
 	{
 		node = palloc(sizeof(SuffixChar) * 256);
 		memset(node, 0, sizeof(SuffixChar) * 256);
@@ -52,20 +53,20 @@ placeChar(SuffixChar *node, unsigned char *str, int lenstr, char *replaceTo, int
 
 	curnode = node + *str;
 
-	if ( lenstr == 1 )
+	if (lenstr == 1)
 	{
-		if ( curnode->replaceTo )
+		if (curnode->replaceTo)
 			elog(WARNING, "duplicate TO argument, use first one");
 		else
 		{
 			curnode->replacelen = replacelen;
-			curnode->replaceTo = palloc( replacelen );
+			curnode->replaceTo = palloc(replacelen);
 			memcpy(curnode->replaceTo, replaceTo, replacelen);
 		}
 	}
 	else
 	{
-		curnode->nextChar = placeChar( curnode->nextChar, str+1, lenstr-1, replaceTo, replacelen);
+		curnode->nextChar = placeChar(curnode->nextChar, str + 1, lenstr - 1, replaceTo, replacelen);
 	}
 
 	return node;
@@ -75,13 +76,13 @@ placeChar(SuffixChar *node, unsigned char *str, int lenstr, char *replaceTo, int
  * initSuffixTree  - create suffix tree from file. Function converts
  * UTF8-encoded file into current encoding.
  */
-static SuffixChar*
-initSuffixTree(char *filename) 
+static SuffixChar *
+initSuffixTree(char *filename)
 {
-	SuffixChar * volatile rootSuffixTree = NULL;
+	SuffixChar *volatile rootSuffixTree = NULL;
 	MemoryContext ccxt = CurrentMemoryContext;
-	tsearch_readline_state	trst;
-	volatile bool	skip;
+	tsearch_readline_state trst;
+	volatile bool skip;
 
 	filename = get_tsearch_config_filename(filename, "rules");
 	if (!tsearch_readline_begin(&trst, filename))
@@ -90,34 +91,34 @@ initSuffixTree(char *filename)
 				 errmsg("could not open unaccent file \"%s\": %m",
 						filename)));
 
-	do	
+	do
 	{
-		char	src[4096];
-		char	trg[4096];
-		int		srclen;
-		int		trglen;
-		char   *line = NULL;
+		char		src[4096];
+		char		trg[4096];
+		int			srclen;
+		int			trglen;
+		char	   *line = NULL;
 
 		skip = true;
 
 		PG_TRY();
 		{
 			/*
-			 * pg_do_encoding_conversion() (called by tsearch_readline())
-			 * will emit exception if it finds untranslatable characters in current locale.
-			 * We just skip such characters.
+			 * pg_do_encoding_conversion() (called by tsearch_readline()) will
+			 * emit exception if it finds untranslatable characters in current
+			 * locale. We just skip such characters.
 			 */
 			while ((line = tsearch_readline(&trst)) != NULL)
 			{
-				if ( sscanf(line, "%s\t%s\n", src, trg)!=2 )
+				if (sscanf(line, "%s\t%s\n", src, trg) != 2)
 					continue;
 
 				srclen = strlen(src);
 				trglen = strlen(trg);
 
-				rootSuffixTree = placeChar(rootSuffixTree, 
-											(unsigned char*)src, srclen, 
-											trg, trglen);
+				rootSuffixTree = placeChar(rootSuffixTree,
+										   (unsigned char *) src, srclen,
+										   trg, trglen);
 				skip = false;
 				pfree(line);
 			}
@@ -141,7 +142,7 @@ initSuffixTree(char *filename)
 		}
 		PG_END_TRY();
 	}
-	while(skip);
+	while (skip);
 
 	tsearch_readline_end(&trst);
 
@@ -151,13 +152,13 @@ initSuffixTree(char *filename)
 /*
  * findReplaceTo - find multibyte character in tree
  */
-static SuffixChar * 
-findReplaceTo( SuffixChar *node, unsigned char *src, int srclen )
+static SuffixChar *
+findReplaceTo(SuffixChar *node, unsigned char *src, int srclen)
 {
-	while( node ) 
+	while (node)
 	{
 		node = node + *src;
-		if ( srclen == 1 )
+		if (srclen == 1)
 			return node;
 
 		src++;
@@ -169,13 +170,13 @@ findReplaceTo( SuffixChar *node, unsigned char *src, int srclen )
 }
 
 PG_FUNCTION_INFO_V1(unaccent_init);
-Datum       unaccent_init(PG_FUNCTION_ARGS);
+Datum		unaccent_init(PG_FUNCTION_ARGS);
 Datum
 unaccent_init(PG_FUNCTION_ARGS)
 {
-	List       *dictoptions = (List *) PG_GETARG_POINTER(0);
+	List	   *dictoptions = (List *) PG_GETARG_POINTER(0);
 	SuffixChar *rootSuffixTree = NULL;
-	bool        fileloaded = false;
+	bool		fileloaded = false;
 	ListCell   *l;
 
 	foreach(l, dictoptions)
@@ -188,8 +189,8 @@ unaccent_init(PG_FUNCTION_ARGS)
 				ereport(ERROR,
 						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
 						 errmsg("multiple Rules parameters")));
-				rootSuffixTree = initSuffixTree(defGetString(defel));
-				fileloaded = true;
+			rootSuffixTree = initSuffixTree(defGetString(defel));
+			fileloaded = true;
 		}
 		else
 		{
@@ -211,51 +212,52 @@ unaccent_init(PG_FUNCTION_ARGS)
 }
 
 PG_FUNCTION_INFO_V1(unaccent_lexize);
-Datum       unaccent_lexize(PG_FUNCTION_ARGS);
+Datum		unaccent_lexize(PG_FUNCTION_ARGS);
 Datum
 unaccent_lexize(PG_FUNCTION_ARGS)
 {
-	SuffixChar *rootSuffixTree = (SuffixChar*)PG_GETARG_POINTER(0);
-	char       *srcchar = (char *) PG_GETARG_POINTER(1);
+	SuffixChar *rootSuffixTree = (SuffixChar *) PG_GETARG_POINTER(0);
+	char	   *srcchar = (char *) PG_GETARG_POINTER(1);
 	int32		len = PG_GETARG_INT32(2);
-	char	   *srcstart, *trgchar = NULL;
+	char	   *srcstart,
+			   *trgchar = NULL;
 	int			charlen;
 	TSLexeme   *res = NULL;
 	SuffixChar *node;
 
 	srcstart = srcchar;
-	while( srcchar - srcstart < len )
+	while (srcchar - srcstart < len)
 	{
 		charlen = pg_mblen(srcchar);
 
-		node = findReplaceTo( rootSuffixTree, (unsigned char *) srcchar, charlen );
-		if ( node  && node->replaceTo )
+		node = findReplaceTo(rootSuffixTree, (unsigned char *) srcchar, charlen);
+		if (node && node->replaceTo)
 		{
-			if ( !res )
+			if (!res)
 			{
 				/* allocate res only it it's needed */
 				res = palloc0(sizeof(TSLexeme) * 2);
-				res->lexeme = trgchar = palloc( len * pg_database_encoding_max_length() + 1 /* \0 */ );
+				res->lexeme = trgchar = palloc(len * pg_database_encoding_max_length() + 1 /* \0 */ );
 				res->flags = TSL_FILTER;
-				if ( srcchar != srcstart )
+				if (srcchar != srcstart)
 				{
 					memcpy(trgchar, srcstart, srcchar - srcstart);
 					trgchar += (srcchar - srcstart);
 				}
 			}
-			memcpy( trgchar, node->replaceTo, node->replacelen );
-			trgchar += node->replacelen; 
+			memcpy(trgchar, node->replaceTo, node->replacelen);
+			trgchar += node->replacelen;
 		}
-		else if ( res )
+		else if (res)
 		{
-			memcpy( trgchar, srcchar, charlen );
+			memcpy(trgchar, srcchar, charlen);
 			trgchar += charlen;
 		}
 
 		srcchar += charlen;
 	}
 
-	if ( res )
+	if (res)
 		*trgchar = '\0';
 
 	PG_RETURN_POINTER(res);
@@ -265,15 +267,15 @@ unaccent_lexize(PG_FUNCTION_ARGS)
  * Function-like wrapper for dictionary
  */
 PG_FUNCTION_INFO_V1(unaccent_dict);
-Datum       unaccent_dict(PG_FUNCTION_ARGS);
+Datum		unaccent_dict(PG_FUNCTION_ARGS);
 Datum
 unaccent_dict(PG_FUNCTION_ARGS)
 {
-	text	*str;
-	int		strArg;
-	Oid		dictOid;
-	TSDictionaryCacheEntry	*dict;
-	TSLexeme *res;
+	text	   *str;
+	int			strArg;
+	Oid			dictOid;
+	TSDictionaryCacheEntry *dict;
+	TSLexeme   *res;
 
 	if (PG_NARGS() == 1)
 	{
@@ -290,25 +292,25 @@ unaccent_dict(PG_FUNCTION_ARGS)
 	dict = lookup_ts_dictionary_cache(dictOid);
 
 	res = (TSLexeme *) DatumGetPointer(FunctionCall4(&(dict->lexize),
-													 PointerGetDatum(dict->dictData),
-													 PointerGetDatum(VARDATA(str)),
-													 Int32GetDatum(VARSIZE(str) - VARHDRSZ),
+											 PointerGetDatum(dict->dictData),
+											   PointerGetDatum(VARDATA(str)),
+									  Int32GetDatum(VARSIZE(str) - VARHDRSZ),
 													 PointerGetDatum(NULL)));
 
 	PG_FREE_IF_COPY(str, strArg);
 
-	if ( res == NULL )
+	if (res == NULL)
 	{
 		PG_RETURN_TEXT_P(PG_GETARG_TEXT_P_COPY(strArg));
 	}
-	else if ( res->lexeme == NULL )
+	else if (res->lexeme == NULL)
 	{
 		pfree(res);
 		PG_RETURN_TEXT_P(PG_GETARG_TEXT_P_COPY(strArg));
 	}
 	else
 	{
-		text *txt = cstring_to_text(res->lexeme);
+		text	   *txt = cstring_to_text(res->lexeme);
 
 		pfree(res->lexeme);
 		pfree(res);
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 65328a9f288830add2a117b0745d27e3c452017d..052982145d9b6c791da8e6e9037a105f256c1c03 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.32 2010/01/22 16:40:18 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.33 2010/02/26 02:00:32 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1210,7 +1210,7 @@ bytea *
 attribute_reloptions(Datum reloptions, bool validate)
 {
 	relopt_value *options;
-	AttributeOpts  *aopts;
+	AttributeOpts *aopts;
 	int			numoptions;
 	static const relopt_parse_elt tab[] = {
 		{"n_distinct", RELOPT_TYPE_REAL, offsetof(AttributeOpts, n_distinct)},
@@ -1241,7 +1241,7 @@ bytea *
 tablespace_reloptions(Datum reloptions, bool validate)
 {
 	relopt_value *options;
-	TableSpaceOpts	*tsopts;
+	TableSpaceOpts *tsopts;
 	int			numoptions;
 	static const relopt_parse_elt tab[] = {
 		{"random_page_cost", RELOPT_TYPE_REAL, offsetof(TableSpaceOpts, random_page_cost)},
diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c
index 3d7de339dd2a294aacb9c31bb60f875b347570de..fec3b3ef583f574987cd6c9f1c8fc06243e9bf90 100644
--- a/src/backend/access/common/tupconvert.c
+++ b/src/backend/access/common/tupconvert.c
@@ -5,7 +5,7 @@
  *
  * These functions provide conversion between rowtypes that are logically
  * equivalent but might have columns in a different order or different sets
- * of dropped columns.  There is some overlap of functionality with the
+ * of dropped columns.	There is some overlap of functionality with the
  * executor's "junkfilter" routines, but these functions work on bare
  * HeapTuples rather than TupleTableSlots.
  *
@@ -14,7 +14,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/common/tupconvert.c,v 1.3 2010/01/02 16:57:33 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/common/tupconvert.c,v 1.4 2010/02/26 02:00:33 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -88,7 +88,7 @@ convert_tuples_by_position(TupleDesc indesc,
 		int32		atttypmod;
 
 		if (att->attisdropped)
-			continue;		/* attrMap[i] is already 0 */
+			continue;			/* attrMap[i] is already 0 */
 		noutcols++;
 		atttypid = att->atttypid;
 		atttypmod = att->atttypmod;
@@ -137,22 +137,22 @@ convert_tuples_by_position(TupleDesc indesc,
 						   nincols, noutcols)));
 
 	/*
-	 * Check to see if the map is one-to-one and the tuple types are the
-	 * same.  (We check the latter because if they're not, we want to do
-	 * conversion to inject the right OID into the tuple datum.)
+	 * Check to see if the map is one-to-one and the tuple types are the same.
+	 * (We check the latter because if they're not, we want to do conversion
+	 * to inject the right OID into the tuple datum.)
 	 */
 	if (indesc->natts == outdesc->natts &&
 		indesc->tdtypeid == outdesc->tdtypeid)
 	{
 		for (i = 0; i < n; i++)
 		{
-			if (attrMap[i] == (i+1))
+			if (attrMap[i] == (i + 1))
 				continue;
 
 			/*
-			 * If it's a dropped column and the corresponding input
-			 * column is also dropped, we needn't convert.  However,
-			 * attlen and attalign must agree.
+			 * If it's a dropped column and the corresponding input column is
+			 * also dropped, we needn't convert.  However, attlen and attalign
+			 * must agree.
 			 */
 			if (attrMap[i] == 0 &&
 				indesc->attrs[i]->attisdropped &&
@@ -182,10 +182,10 @@ convert_tuples_by_position(TupleDesc indesc,
 	/* preallocate workspace for Datum arrays */
 	map->outvalues = (Datum *) palloc(n * sizeof(Datum));
 	map->outisnull = (bool *) palloc(n * sizeof(bool));
-	n = indesc->natts + 1;						/* +1 for NULL */
+	n = indesc->natts + 1;		/* +1 for NULL */
 	map->invalues = (Datum *) palloc(n * sizeof(Datum));
 	map->inisnull = (bool *) palloc(n * sizeof(bool));
-	map->invalues[0] = (Datum) 0;				/* set up the NULL entry */
+	map->invalues[0] = (Datum) 0;		/* set up the NULL entry */
 	map->inisnull[0] = true;
 
 	return map;
@@ -193,7 +193,7 @@ convert_tuples_by_position(TupleDesc indesc,
 
 /*
  * Set up for tuple conversion, matching input and output columns by name.
- * (Dropped columns are ignored in both input and output.)  This is intended
+ * (Dropped columns are ignored in both input and output.)	This is intended
  * for use when the rowtypes are related by inheritance, so we expect an exact
  * match of both type and typmod.  The error messages will be a bit unhelpful
  * unless both rowtypes are named composite types.
@@ -221,7 +221,7 @@ convert_tuples_by_name(TupleDesc indesc,
 		int			j;
 
 		if (att->attisdropped)
-			continue;		/* attrMap[i] is already 0 */
+			continue;			/* attrMap[i] is already 0 */
 		attname = NameStr(att->attname);
 		atttypid = att->atttypid;
 		atttypmod = att->atttypmod;
@@ -256,9 +256,9 @@ convert_tuples_by_name(TupleDesc indesc,
 	}
 
 	/*
-	 * Check to see if the map is one-to-one and the tuple types are the
-	 * same.  (We check the latter because if they're not, we want to do
-	 * conversion to inject the right OID into the tuple datum.)
+	 * Check to see if the map is one-to-one and the tuple types are the same.
+	 * (We check the latter because if they're not, we want to do conversion
+	 * to inject the right OID into the tuple datum.)
 	 */
 	if (indesc->natts == outdesc->natts &&
 		indesc->tdtypeid == outdesc->tdtypeid)
@@ -266,13 +266,13 @@ convert_tuples_by_name(TupleDesc indesc,
 		same = true;
 		for (i = 0; i < n; i++)
 		{
-			if (attrMap[i] == (i+1))
+			if (attrMap[i] == (i + 1))
 				continue;
 
 			/*
-			 * If it's a dropped column and the corresponding input
-			 * column is also dropped, we needn't convert.  However,
-			 * attlen and attalign must agree.
+			 * If it's a dropped column and the corresponding input column is
+			 * also dropped, we needn't convert.  However, attlen and attalign
+			 * must agree.
 			 */
 			if (attrMap[i] == 0 &&
 				indesc->attrs[i]->attisdropped &&
@@ -302,10 +302,10 @@ convert_tuples_by_name(TupleDesc indesc,
 	/* preallocate workspace for Datum arrays */
 	map->outvalues = (Datum *) palloc(n * sizeof(Datum));
 	map->outisnull = (bool *) palloc(n * sizeof(bool));
-	n = indesc->natts + 1;						/* +1 for NULL */
+	n = indesc->natts + 1;		/* +1 for NULL */
 	map->invalues = (Datum *) palloc(n * sizeof(Datum));
 	map->inisnull = (bool *) palloc(n * sizeof(bool));
-	map->invalues[0] = (Datum) 0;				/* set up the NULL entry */
+	map->invalues[0] = (Datum) 0;		/* set up the NULL entry */
 	map->inisnull[0] = true;
 
 	return map;
diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c
index accd6640375f1cc7065ee33be5563baba756bf57..bb726e69f4cddefb9534592ffeda468552e12f64 100644
--- a/src/backend/access/gin/ginbulk.c
+++ b/src/backend/access/gin/ginbulk.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *			$PostgreSQL: pgsql/src/backend/access/gin/ginbulk.c,v 1.18 2010/02/11 14:29:50 teodor Exp $
+ *			$PostgreSQL: pgsql/src/backend/access/gin/ginbulk.c,v 1.19 2010/02/26 02:00:33 momjian Exp $
  *-------------------------------------------------------------------------
  */
 
@@ -22,20 +22,20 @@
 #define DEF_NENTRY	2048
 #define DEF_NPTR	4
 
-static void*
+static void *
 ginAppendData(void *old, void *new, void *arg)
 {
-	EntryAccumulator	*eo = (EntryAccumulator*)old,
-						*en = (EntryAccumulator*)new;
+	EntryAccumulator *eo = (EntryAccumulator *) old,
+			   *en = (EntryAccumulator *) new;
 
-	BuildAccumulator	*accum = (BuildAccumulator*)arg;
+	BuildAccumulator *accum = (BuildAccumulator *) arg;
 
 	if (eo->number >= eo->length)
 	{
 		accum->allocatedMemory -= GetMemoryChunkSpace(eo->list);
 		eo->length *= 2;
 		eo->list = (ItemPointerData *) repalloc(eo->list,
-									sizeof(ItemPointerData) * eo->length);
+									   sizeof(ItemPointerData) * eo->length);
 		accum->allocatedMemory += GetMemoryChunkSpace(eo->list);
 	}
 
@@ -60,9 +60,9 @@ ginAppendData(void *old, void *new, void *arg)
 static int
 cmpEntryAccumulator(const void *a, const void *b, void *arg)
 {
-	EntryAccumulator	*ea = (EntryAccumulator*)a;
-	EntryAccumulator	*eb = (EntryAccumulator*)b;
-	BuildAccumulator	*accum = (BuildAccumulator*)arg;
+	EntryAccumulator *ea = (EntryAccumulator *) a;
+	EntryAccumulator *eb = (EntryAccumulator *) b;
+	BuildAccumulator *accum = (BuildAccumulator *) arg;
 
 	return compareAttEntries(accum->ginstate, ea->attnum, ea->value,
 							 eb->attnum, eb->value);
@@ -104,13 +104,13 @@ getDatumCopy(BuildAccumulator *accum, OffsetNumber attnum, Datum value)
 static void
 ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum, Datum entry)
 {
-	EntryAccumulator 	*key,
-						*ea;
+	EntryAccumulator *key,
+			   *ea;
 
-	/* 
-	 * Allocate memory by rather big chunk to decrease overhead, we don't
-	 * keep pointer to previously allocated chunks because they will free
-	 * by MemoryContextReset() call.
+	/*
+	 * Allocate memory by rather big chunk to decrease overhead, we don't keep
+	 * pointer to previously allocated chunks because they will free by
+	 * MemoryContextReset() call.
 	 */
 	if (accum->entryallocator == NULL || accum->length >= DEF_NENTRY)
 	{
@@ -125,7 +125,7 @@ ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum
 
 	key->attnum = attnum;
 	key->value = entry;
-	/* To prevent multiple palloc/pfree cycles, we reuse array */ 
+	/* To prevent multiple palloc/pfree cycles, we reuse array */
 	if (accum->tmpList == NULL)
 		accum->tmpList =
 			(ItemPointerData *) palloc(sizeof(ItemPointerData) * DEF_NPTR);
@@ -149,8 +149,8 @@ ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum
 	else
 	{
 		/*
-		 * The key has been appended, so "free" allocated
-		 * key by decrementing chunk's counter.
+		 * The key has been appended, so "free" allocated key by decrementing
+		 * chunk's counter.
 		 */
 		accum->length--;
 	}
@@ -162,7 +162,7 @@ ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum
  * Since the entries are being inserted into a balanced binary tree, you
  * might think that the order of insertion wouldn't be critical, but it turns
  * out that inserting the entries in sorted order results in a lot of
- * rebalancing operations and is slow.  To prevent this, we attempt to insert
+ * rebalancing operations and is slow.	To prevent this, we attempt to insert
  * the nodes in an order that will produce a nearly-balanced tree if the input
  * is in fact sorted.
  *
@@ -172,11 +172,11 @@ ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum
  * tree; then, we insert the middles of each half of out virtual array, then
  * middles of quarters, etc.
  */
-	 void
+void
 ginInsertRecordBA(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum,
 				  Datum *entries, int32 nentry)
 {
-	uint32	step = nentry;
+	uint32		step = nentry;
 
 	if (nentry <= 0)
 		return;
@@ -186,21 +186,22 @@ ginInsertRecordBA(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber att
 	/*
 	 * step will contain largest power of 2 and <= nentry
 	 */
-	step |= (step >>  1);
-	step |= (step >>  2);
-	step |= (step >>  4);
-	step |= (step >>  8);
+	step |= (step >> 1);
+	step |= (step >> 2);
+	step |= (step >> 4);
+	step |= (step >> 8);
 	step |= (step >> 16);
 	step >>= 1;
-	step ++;
+	step++;
 
-	while(step > 0) {
-		int i;
+	while (step > 0)
+	{
+		int			i;
 
-		for (i = step - 1; i < nentry && i >= 0; i += step << 1 /* *2 */)
+		for (i = step - 1; i < nentry && i >= 0; i += step << 1 /* *2 */ )
 			ginInsertEntry(accum, heapptr, attnum, entries[i]);
 
-		step >>= 1; /* /2 */
+		step >>= 1;				/* /2 */
 	}
 }
 
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index 8913b437cf6898e82e23513ff72984d7ee16d0db..6d307c8d59ad21ace153e058ed358306884038bc 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *			$PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.23 2010/01/02 16:57:33 momjian Exp $
+ *			$PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.24 2010/02/26 02:00:33 momjian Exp $
  *-------------------------------------------------------------------------
  */
 
@@ -104,7 +104,7 @@ GinFormTuple(Relation index, GinState *ginstate,
 		 * Gin tuple without any ItemPointers should be large enough to keep
 		 * one ItemPointer, to prevent inconsistency between
 		 * ginHeapTupleFastCollect and ginEntryInsert called by
-		 * ginHeapTupleInsert.  ginHeapTupleFastCollect forms tuple without
+		 * ginHeapTupleInsert.	ginHeapTupleFastCollect forms tuple without
 		 * extra pointer to heap, but ginEntryInsert (called for pending list
 		 * cleanup during vacuum) will form the same tuple with one
 		 * ItemPointer.
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 967c02b798370c7da11eb0415980987eaac2a231..705d167963b12897c29c139299032cbb341b2943 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *			$PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.29 2010/01/02 16:57:33 momjian Exp $
+ *			$PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.30 2010/02/26 02:00:33 momjian Exp $
  *-------------------------------------------------------------------------
  */
 
@@ -25,11 +25,11 @@
 
 typedef struct pendingPosition
 {
-	Buffer				pendingBuffer;
-	OffsetNumber 		firstOffset;
-	OffsetNumber 		lastOffset;
-	ItemPointerData 	item;
-	bool			   *hasMatchKey;
+	Buffer		pendingBuffer;
+	OffsetNumber firstOffset;
+	OffsetNumber lastOffset;
+	ItemPointerData item;
+	bool	   *hasMatchKey;
 } pendingPosition;
 
 
@@ -877,7 +877,7 @@ matchPartialInPendingList(GinState *ginstate, Page page,
 static bool
 hasAllMatchingKeys(GinScanOpaque so, pendingPosition *pos)
 {
-	int		i;
+	int			i;
 
 	for (i = 0; i < so->nkeys; i++)
 		if (pos->hasMatchKey[i] == false)
@@ -912,7 +912,7 @@ collectDatumForItem(IndexScanDesc scan, pendingPosition *pos)
 
 		memset(key->entryRes, FALSE, key->nentries);
 	}
-	memset(pos->hasMatchKey, FALSE, so->nkeys); 
+	memset(pos->hasMatchKey, FALSE, so->nkeys);
 
 	for (;;)
 	{
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index 5cf969a1fdfb176536c4253cbaf1fcd2dd0929e3..216910307a84a519b28d698e67425d14512fbbf3 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.84 2010/01/02 16:57:34 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.85 2010/02/26 02:00:33 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -414,7 +414,8 @@ gistindex_keytest(IndexTuple tuple,
 			/*
 			 * On non-leaf page we can't conclude that child hasn't NULL
 			 * values because of assumption in GiST: union (VAL, NULL) is VAL.
-			 * But if on non-leaf page key IS NULL, then all children are NULL.
+			 * But if on non-leaf page key IS NULL, then all children are
+			 * NULL.
 			 */
 			if (key->sk_flags & SK_SEARCHNULL)
 			{
diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c
index 18ee0259a59e0a9f611531cd7c770d06b6ae3d9c..cb34b26113e140d804f9fdbf02dba4708a9cd88e 100644
--- a/src/backend/access/gist/gistproc.c
+++ b/src/backend/access/gist/gistproc.c
@@ -10,7 +10,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	$PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.20 2010/01/14 16:31:09 teodor Exp $
+ *	$PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.21 2010/02/26 02:00:33 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -889,8 +889,8 @@ gist_point_compress(PG_FUNCTION_ARGS)
 
 	if (entry->leafkey)			/* Point, actually */
 	{
-		BOX	   *box = palloc(sizeof(BOX));
-		Point  *point = DatumGetPointP(entry->key);
+		BOX		   *box = palloc(sizeof(BOX));
+		Point	   *point = DatumGetPointP(entry->key);
 		GISTENTRY  *retval = palloc(sizeof(GISTENTRY));
 
 		box->high = box->low = *point;
@@ -906,9 +906,9 @@ gist_point_compress(PG_FUNCTION_ARGS)
 
 static bool
 gist_point_consistent_internal(StrategyNumber strategy,
-										   bool isLeaf, BOX *key, Point *query)
+							   bool isLeaf, BOX *key, Point *query)
 {
-	bool result = false;
+	bool		result = false;
 
 	switch (strategy)
 	{
@@ -953,10 +953,10 @@ Datum
 gist_point_consistent(PG_FUNCTION_ARGS)
 {
 	GISTENTRY  *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
-	StrategyNumber	strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+	StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
 	bool		result;
 	bool	   *recheck = (bool *) PG_GETARG_POINTER(4);
-	StrategyNumber	strategyGroup = strategy / GeoStrategyNumberOffset;
+	StrategyNumber strategyGroup = strategy / GeoStrategyNumberOffset;
 
 	switch (strategyGroup)
 	{
@@ -969,22 +969,22 @@ gist_point_consistent(PG_FUNCTION_ARGS)
 			break;
 		case BoxStrategyNumberGroup:
 			result = DatumGetBool(DirectFunctionCall5(
-											gist_box_consistent,
-											PointerGetDatum(entry),
-											PG_GETARG_DATUM(1),
-											Int16GetDatum(RTOverlapStrategyNumber),
-											0, PointerGetDatum(recheck)));
+													  gist_box_consistent,
+													  PointerGetDatum(entry),
+													  PG_GETARG_DATUM(1),
+									  Int16GetDatum(RTOverlapStrategyNumber),
+											   0, PointerGetDatum(recheck)));
 			break;
 		case PolygonStrategyNumberGroup:
 			{
 				POLYGON    *query = PG_GETARG_POLYGON_P(1);
 
 				result = DatumGetBool(DirectFunctionCall5(
-												gist_poly_consistent,
-												PointerGetDatum(entry),
-												PolygonPGetDatum(query),
-												Int16GetDatum(RTOverlapStrategyNumber),
-												0, PointerGetDatum(recheck)));
+														gist_poly_consistent,
+													  PointerGetDatum(entry),
+													 PolygonPGetDatum(query),
+									  Int16GetDatum(RTOverlapStrategyNumber),
+											   0, PointerGetDatum(recheck)));
 
 				if (GIST_LEAF(entry) && result)
 				{
@@ -992,13 +992,13 @@ gist_point_consistent(PG_FUNCTION_ARGS)
 					 * We are on leaf page and quick check shows overlapping
 					 * of polygon's bounding box and point
 					 */
-					BOX *box = DatumGetBoxP(entry->key);
+					BOX		   *box = DatumGetBoxP(entry->key);
 
 					Assert(box->high.x == box->low.x
-						&& box->high.y == box->low.y);
+						   && box->high.y == box->low.y);
 					result = DatumGetBool(DirectFunctionCall2(
-												poly_contain_pt,
-												PolygonPGetDatum(query),
+															  poly_contain_pt,
+													 PolygonPGetDatum(query),
 												PointPGetDatum(&box->high)));
 					*recheck = false;
 				}
@@ -1006,14 +1006,14 @@ gist_point_consistent(PG_FUNCTION_ARGS)
 			break;
 		case CircleStrategyNumberGroup:
 			{
-				CIRCLE *query = PG_GETARG_CIRCLE_P(1);
+				CIRCLE	   *query = PG_GETARG_CIRCLE_P(1);
 
 				result = DatumGetBool(DirectFunctionCall5(
-												gist_circle_consistent,
-												PointerGetDatum(entry),
-												CirclePGetDatum(query),
-												Int16GetDatum(RTOverlapStrategyNumber),
-												0, PointerGetDatum(recheck)));
+													  gist_circle_consistent,
+													  PointerGetDatum(entry),
+													  CirclePGetDatum(query),
+									  Int16GetDatum(RTOverlapStrategyNumber),
+											   0, PointerGetDatum(recheck)));
 
 				if (GIST_LEAF(entry) && result)
 				{
@@ -1021,20 +1021,20 @@ gist_point_consistent(PG_FUNCTION_ARGS)
 					 * We are on leaf page and quick check shows overlapping
 					 * of polygon's bounding box and point
 					 */
-					BOX *box = DatumGetBoxP(entry->key);
+					BOX		   *box = DatumGetBoxP(entry->key);
 
 					Assert(box->high.x == box->low.x
-						&& box->high.y == box->low.y);
+						   && box->high.y == box->low.y);
 					result = DatumGetBool(DirectFunctionCall2(
-												circle_contain_pt,
-												CirclePGetDatum(query),
+														   circle_contain_pt,
+													  CirclePGetDatum(query),
 												PointPGetDatum(&box->high)));
 					*recheck = false;
 				}
 			}
 			break;
 		default:
-			result = false;			/* silence compiler warning */
+			result = false;		/* silence compiler warning */
 			elog(ERROR, "unknown strategy number: %d", strategy);
 	}
 
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index 1abdc3e168f88aa0e8d80f80b0ff506d85265833..a53d8cd08733d79d2feeed57d55d03c34fb062df 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.78 2010/01/02 16:57:34 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.79 2010/02/26 02:00:33 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -92,8 +92,8 @@ gistrescan(PG_FUNCTION_ARGS)
 		 * field.
 		 *
 		 * Next, if any of keys is a NULL and that key is not marked with
-		 * SK_SEARCHNULL/SK_SEARCHNOTNULL then nothing can be found (ie,
-		 * we assume all indexable operators are strict).
+		 * SK_SEARCHNULL/SK_SEARCHNOTNULL then nothing can be found (ie, we
+		 * assume all indexable operators are strict).
 		 */
 		for (i = 0; i < scan->numberOfKeys; i++)
 		{
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index cd01b06437f390b873095e7903ebc0d2d3ecc2fa..6474f45940ec27e4fb0f5c83b1d50f4ecdd1cdab 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.116 2010/01/02 16:57:34 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.117 2010/02/26 02:00:33 momjian Exp $
  *
  * NOTES
  *	  This file contains only the public interface routines.
@@ -233,7 +233,7 @@ hashgettuple(PG_FUNCTION_ARGS)
 		/*
 		 * An insertion into the current index page could have happened while
 		 * we didn't have read lock on it.  Re-find our position by looking
-		 * for the TID we previously returned.  (Because we hold share lock on
+		 * for the TID we previously returned.	(Because we hold share lock on
 		 * the bucket, no deletions or splits could have occurred; therefore
 		 * we can expect that the TID still exists in the current index page,
 		 * at an offset >= where we were.)
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 804f3ee934aea16aea6f173e92cb2b953c969c33..3ca8d733ad6b30b81b7c10e56ceeb9dfb1808dd9 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.68 2010/01/02 16:57:34 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.69 2010/02/26 02:00:33 momjian Exp $
  *
  * NOTES
  *	  Overflow pages look like ordinary relation pages.
@@ -717,8 +717,8 @@ _hash_squeezebucket(Relation rel,
 
 		/*
 		 * If we reach here, there are no live tuples on the "read" page ---
-		 * it was empty when we got to it, or we moved them all.  So we
-		 * can just free the page without bothering with deleting tuples
+		 * it was empty when we got to it, or we moved them all.  So we can
+		 * just free the page without bothering with deleting tuples
 		 * individually.  Then advance to the previous "read" page.
 		 *
 		 * Tricky point here: if our read and write pages are adjacent in the
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 3f1d3cda3f3615105816bfcda5db38083862d3d0..77b072c88011ac820c155e5619cc325fa4c3eccf 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.82 2010/01/02 16:57:34 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.83 2010/02/26 02:00:33 momjian Exp $
  *
  * NOTES
  *	  Postgres hash pages look like ordinary relation pages.  The opaque
@@ -799,8 +799,8 @@ _hash_splitbucket(Relation rel,
 	/*
 	 * Partition the tuples in the old bucket between the old bucket and the
 	 * new bucket, advancing along the old bucket's overflow bucket chain and
-	 * adding overflow pages to the new bucket as needed.  Outer loop
-	 * iterates once per page in old bucket.
+	 * adding overflow pages to the new bucket as needed.  Outer loop iterates
+	 * once per page in old bucket.
 	 */
 	for (;;)
 	{
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 4dfb85ac56b8bf73a604633d65bc729db62a13ef..1f26faa2ee0fddc10182369682e38429347d3dcf 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.287 2010/02/14 18:42:12 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.288 2010/02/26 02:00:33 momjian Exp $
  *
  *
  * INTERFACE ROUTINES
@@ -79,8 +79,8 @@ static HeapScanDesc heap_beginscan_internal(Relation relation,
 						bool allow_strat, bool allow_sync,
 						bool is_bitmapscan);
 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
-		   ItemPointerData from, Buffer newbuf, HeapTuple newtup,
-		   bool all_visible_cleared, bool new_all_visible_cleared);
+				ItemPointerData from, Buffer newbuf, HeapTuple newtup,
+				bool all_visible_cleared, bool new_all_visible_cleared);
 static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
 					   HeapTuple oldtup, HeapTuple newtup);
 
@@ -248,8 +248,8 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
 
 	/*
 	 * If the all-visible flag indicates that all tuples on the page are
-	 * visible to everyone, we can skip the per-tuple visibility tests.
-	 * But not in hot standby mode. A tuple that's already visible to all
+	 * visible to everyone, we can skip the per-tuple visibility tests. But
+	 * not in hot standby mode. A tuple that's already visible to all
 	 * transactions in the master might still be invisible to a read-only
 	 * transaction in the standby.
 	 */
@@ -3667,8 +3667,8 @@ recheck_xmax:
 	 * someone setting xmax.  Hence recheck after changing lock, same as for
 	 * xmax itself.
 	 *
-	 * Old-style VACUUM FULL is gone, but we have to keep this code as long
-	 * as we support having MOVED_OFF/MOVED_IN tuples in the database.
+	 * Old-style VACUUM FULL is gone, but we have to keep this code as long as
+	 * we support having MOVED_OFF/MOVED_IN tuples in the database.
 	 */
 recheck_xvac:
 	if (tuple->t_infomask & HEAP_MOVED)
@@ -4099,9 +4099,9 @@ heap_xlog_cleanup_info(XLogRecPtr lsn, XLogRecord *record)
 		ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, xlrec->node);
 
 	/*
-	 * Actual operation is a no-op. Record type exists to provide a means
-	 * for conflict processing to occur before we begin index vacuum actions.
-	 * see vacuumlazy.c and also comments in btvacuumpage()
+	 * Actual operation is a no-op. Record type exists to provide a means for
+	 * conflict processing to occur before we begin index vacuum actions. see
+	 * vacuumlazy.c and also comments in btvacuumpage()
 	 */
 }
 
@@ -4769,8 +4769,8 @@ heap_redo(XLogRecPtr lsn, XLogRecord *record)
 	uint8		info = record->xl_info & ~XLR_INFO_MASK;
 
 	/*
-	 * These operations don't overwrite MVCC data so no conflict
-	 * processing is required. The ones in heap2 rmgr do.
+	 * These operations don't overwrite MVCC data so no conflict processing is
+	 * required. The ones in heap2 rmgr do.
 	 */
 
 	RestoreBkpBlocks(lsn, record, false);
@@ -4809,8 +4809,8 @@ heap2_redo(XLogRecPtr lsn, XLogRecord *record)
 	uint8		info = record->xl_info & ~XLR_INFO_MASK;
 
 	/*
-	 * Note that RestoreBkpBlocks() is called after conflict processing
-	 * within each record type handling function.
+	 * Note that RestoreBkpBlocks() is called after conflict processing within
+	 * each record type handling function.
 	 */
 
 	switch (info & XLOG_HEAP_OPMASK)
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 9d6a737277bd589cf7a00d96da979a18c92f471c..89607f57be39b726e97bff269cc9f2e576fcc407 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.21 2010/02/08 04:33:53 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.22 2010/02/26 02:00:33 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -29,8 +29,9 @@
 typedef struct
 {
 	TransactionId new_prune_xid;	/* new prune hint value for page */
-	TransactionId latestRemovedXid; /* latest xid to be removed by this prune */
-	int			nredirected;		/* numbers of entries in arrays below */
+	TransactionId latestRemovedXid;		/* latest xid to be removed by this
+										 * prune */
+	int			nredirected;	/* numbers of entries in arrays below */
 	int			ndead;
 	int			nunused;
 	/* arrays that accumulate indexes of items to be changed */
@@ -85,8 +86,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
 
 	/*
 	 * We can't write WAL in recovery mode, so there's no point trying to
-	 * clean the page. The master will likely issue a cleaning WAL record
-	 * soon anyway, so this is no particular loss.
+	 * clean the page. The master will likely issue a cleaning WAL record soon
+	 * anyway, so this is no particular loss.
 	 */
 	if (RecoveryInProgress())
 		return;
@@ -164,8 +165,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
 	 *
 	 * First, initialize the new pd_prune_xid value to zero (indicating no
 	 * prunable tuples).  If we find any tuples which may soon become
-	 * prunable, we will save the lowest relevant XID in new_prune_xid.
-	 * Also initialize the rest of our working state.
+	 * prunable, we will save the lowest relevant XID in new_prune_xid. Also
+	 * initialize the rest of our working state.
 	 */
 	prstate.new_prune_xid = InvalidTransactionId;
 	prstate.latestRemovedXid = InvalidTransactionId;
@@ -370,7 +371,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
 			{
 				heap_prune_record_unused(prstate, rootoffnum);
 				HeapTupleHeaderAdvanceLatestRemovedXid(htup,
-													   &prstate->latestRemovedXid);
+												 &prstate->latestRemovedXid);
 				ndeleted++;
 			}
 
@@ -499,7 +500,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
 		{
 			latestdead = offnum;
 			HeapTupleHeaderAdvanceLatestRemovedXid(htup,
-												   &prstate->latestRemovedXid);
+												 &prstate->latestRemovedXid);
 		}
 		else if (!recent_dead)
 			break;
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 65522f46c147a4b447b45340dcc75139be0c39cd..5c60c1f130da91df6c49f40f36f254371ec3cc07 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -96,7 +96,7 @@
  * Portions Copyright (c) 1994-5, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.20 2010/02/03 10:01:29 heikki Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.21 2010/02/26 02:00:33 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -281,7 +281,8 @@ end_heap_rewrite(RewriteState state)
 	/* Write an XLOG UNLOGGED record if WAL-logging was skipped */
 	if (!state->rs_use_wal && !state->rs_new_rel->rd_istemp)
 	{
-		char reason[NAMEDATALEN + 30];
+		char		reason[NAMEDATALEN + 30];
+
 		snprintf(reason, sizeof(reason), "heap rewrite on \"%s\"",
 				 RelationGetRelationName(state->rs_new_rel));
 		XLogReportUnloggedStatement(reason);
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index dd6218cbfe201434fdb30ef57732e9ce1773c66a..7518db16c81a5e74608f7d8fe6f72194257f3a0b 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.97 2010/02/04 00:09:13 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.98 2010/02/26 02:00:33 momjian Exp $
  *
  *
  * INTERFACE ROUTINES
@@ -796,9 +796,9 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
 	}
 
 	/*
-	 * Finally we store attributes of type 'm' externally.  At this point
-	 * we increase the target tuple size, so that 'm' attributes aren't
-	 * stored externally unless really necessary.
+	 * Finally we store attributes of type 'm' externally.	At this point we
+	 * increase the target tuple size, so that 'm' attributes aren't stored
+	 * externally unless really necessary.
 	 */
 	maxDataLen = TOAST_TUPLE_TARGET_MAIN - hoff;
 
@@ -1190,8 +1190,8 @@ toast_save_datum(Relation rel, Datum value, int options)
 	 *
 	 * Normally this is the actual OID of the target toast table, but during
 	 * table-rewriting operations such as CLUSTER, we have to insert the OID
-	 * of the table's real permanent toast table instead.  rd_toastoid is
-	 * set if we have to substitute such an OID.
+	 * of the table's real permanent toast table instead.  rd_toastoid is set
+	 * if we have to substitute such an OID.
 	 */
 	if (OidIsValid(rel->rd_toastoid))
 		toast_pointer.va_toastrelid = rel->rd_toastoid;
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index dbfb3df2d0d6289e56437e4a396a59d35231f3e7..1ae92e6fe829a2f1fe3c019d2c6f3911243f2ec2 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.8 2010/02/09 21:43:29 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.9 2010/02/26 02:00:33 momjian Exp $
  *
  * INTERFACE ROUTINES
  *		visibilitymap_clear - clear a bit in the visibility map
@@ -19,7 +19,7 @@
  * NOTES
  *
  * The visibility map is a bitmap with one bit per heap page. A set bit means
- * that all tuples on the page are known visible to all transactions, and 
+ * that all tuples on the page are known visible to all transactions, and
  * therefore the page doesn't need to be vacuumed. The map is conservative in
  * the sense that we make sure that whenever a bit is set, we know the
  * condition is true, but if a bit is not set, it might or might not be true.
@@ -377,11 +377,10 @@ visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
 				 rel->rd_istemp);
 
 	/*
-	 * We might as well update the local smgr_vm_nblocks setting.
-	 * smgrtruncate sent an smgr cache inval message, which will cause
-	 * other backends to invalidate their copy of smgr_vm_nblocks, and
-	 * this one too at the next command boundary.  But this ensures it
-	 * isn't outright wrong until then.
+	 * We might as well update the local smgr_vm_nblocks setting. smgrtruncate
+	 * sent an smgr cache inval message, which will cause other backends to
+	 * invalidate their copy of smgr_vm_nblocks, and this one too at the next
+	 * command boundary.  But this ensures it isn't outright wrong until then.
 	 */
 	if (rel->rd_smgr)
 		rel->rd_smgr->smgr_vm_nblocks = newnblocks;
@@ -411,7 +410,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
 	{
 		if (smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
 			rel->rd_smgr->smgr_vm_nblocks = smgrnblocks(rel->rd_smgr,
-														VISIBILITYMAP_FORKNUM);
+													  VISIBILITYMAP_FORKNUM);
 		else
 			rel->rd_smgr->smgr_vm_nblocks = 0;
 	}
@@ -466,8 +465,8 @@ vm_extend(Relation rel, BlockNumber vm_nblocks)
 	RelationOpenSmgr(rel);
 
 	/*
-	 * Create the file first if it doesn't exist.  If smgr_vm_nblocks
-	 * is positive then it must exist, no need for an smgrexists call.
+	 * Create the file first if it doesn't exist.  If smgr_vm_nblocks is
+	 * positive then it must exist, no need for an smgrexists call.
 	 */
 	if ((rel->rd_smgr->smgr_vm_nblocks == 0 ||
 		 rel->rd_smgr->smgr_vm_nblocks == InvalidBlockNumber) &&
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index bd28036087964a05df163a4c219e19081990a497..d95fd90a424c0d64d397dd1bff76d3f994732576 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.80 2010/02/07 20:48:09 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.81 2010/02/26 02:00:33 momjian Exp $
  *
  * NOTES
  *	  many of the old access method routines have been turned into
@@ -94,13 +94,13 @@ RelationGetIndexScan(Relation indexRelation,
 
 	/*
 	 * During recovery we ignore killed tuples and don't bother to kill them
-	 * either. We do this because the xmin on the primary node could easily
-	 * be later than the xmin on the standby node, so that what the primary
+	 * either. We do this because the xmin on the primary node could easily be
+	 * later than the xmin on the standby node, so that what the primary
 	 * thinks is killed is supposed to be visible on standby. So for correct
 	 * MVCC for queries during recovery we must ignore these hints and check
-	 * all tuples. Do *not* set ignore_killed_tuples to true when running
-	 * in a transaction that was started during recovery.
-	 * xactStartedInRecovery should not be altered by index AMs.
+	 * all tuples. Do *not* set ignore_killed_tuples to true when running in a
+	 * transaction that was started during recovery. xactStartedInRecovery
+	 * should not be altered by index AMs.
 	 */
 	scan->kill_prior_tuple = false;
 	scan->xactStartedInRecovery = TransactionStartedDuringRecovery();
@@ -170,24 +170,24 @@ BuildIndexValueDescription(Relation indexRelation,
 
 	for (i = 0; i < natts; i++)
 	{
-		char   *val;
+		char	   *val;
 
 		if (isnull[i])
 			val = "null";
 		else
 		{
-			Oid		foutoid;
-			bool	typisvarlena;
+			Oid			foutoid;
+			bool		typisvarlena;
 
 			/*
-			 * The provided data is not necessarily of the type stored in
-			 * the index; rather it is of the index opclass's input type.
-			 * So look at rd_opcintype not the index tupdesc.
+			 * The provided data is not necessarily of the type stored in the
+			 * index; rather it is of the index opclass's input type. So look
+			 * at rd_opcintype not the index tupdesc.
 			 *
 			 * Note: this is a bit shaky for opclasses that have pseudotype
-			 * input types such as ANYARRAY or RECORD.  Currently, the
-			 * typoutput functions associated with the pseudotypes will
-			 * work okay, but we might have to try harder in future.
+			 * input types such as ANYARRAY or RECORD.	Currently, the
+			 * typoutput functions associated with the pseudotypes will work
+			 * okay, but we might have to try harder in future.
 			 */
 			getTypeOutputInfo(indexRelation->rd_opcintype[i],
 							  &foutoid, &typisvarlena);
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index f5a369715755035ad6b1b888051b3c2e84f733d2..3e7331ae7b88e57c6144b4f823f314cd17114936 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.117 2010/01/02 16:57:35 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.118 2010/02/26 02:00:34 momjian Exp $
  *
  * INTERFACE ROUTINES
  *		index_open		- open an index relation by relation OID
@@ -455,9 +455,9 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
 
 			/*
 			 * If we scanned a whole HOT chain and found only dead tuples,
-			 * tell index AM to kill its entry for that TID. We do not do
-			 * this when in recovery because it may violate MVCC to do so.
-			 * see comments in RelationGetIndexScan().
+			 * tell index AM to kill its entry for that TID. We do not do this
+			 * when in recovery because it may violate MVCC to do so. see
+			 * comments in RelationGetIndexScan().
 			 */
 			if (!scan->xactStartedInRecovery)
 				scan->kill_prior_tuple = scan->xs_hot_dead;
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 86c8698f6966a55fa0ed36f1de708b76c4091f32..de9bd95f88f30ed591ce02d8142c77d02295d6c7 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.176 2010/01/02 16:57:35 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.177 2010/02/26 02:00:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -88,7 +88,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer);
  *		and btinsert.  By here, itup is filled in, including the TID.
  *
  *		If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
- *		will allow duplicates.  Otherwise (UNIQUE_CHECK_YES or
+ *		will allow duplicates.	Otherwise (UNIQUE_CHECK_YES or
  *		UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
  *		For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
  *		don't actually insert.
@@ -149,9 +149,9 @@ top:
 	 * If we must wait for another xact, we release the lock while waiting,
 	 * and then must start over completely.
 	 *
-	 * For a partial uniqueness check, we don't wait for the other xact.
-	 * Just let the tuple in and return false for possibly non-unique,
-	 * or true for definitely unique.
+	 * For a partial uniqueness check, we don't wait for the other xact. Just
+	 * let the tuple in and return false for possibly non-unique, or true for
+	 * definitely unique.
 	 */
 	if (checkUnique != UNIQUE_CHECK_NO)
 	{
@@ -281,7 +281,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
 
 				/*
 				 * If we are doing a recheck, we expect to find the tuple we
-				 * are rechecking.  It's not a duplicate, but we have to keep
+				 * are rechecking.	It's not a duplicate, but we have to keep
 				 * scanning.
 				 */
 				if (checkUnique == UNIQUE_CHECK_EXISTING &&
@@ -302,10 +302,10 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
 
 					/*
 					 * It is a duplicate. If we are only doing a partial
-					 * check, then don't bother checking if the tuple is
-					 * being updated in another transaction. Just return
-					 * the fact that it is a potential conflict and leave
-					 * the full check till later.
+					 * check, then don't bother checking if the tuple is being
+					 * updated in another transaction. Just return the fact
+					 * that it is a potential conflict and leave the full
+					 * check till later.
 					 */
 					if (checkUnique == UNIQUE_CHECK_PARTIAL)
 					{
@@ -362,20 +362,20 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
 					}
 
 					/*
-					 * This is a definite conflict.  Break the tuple down
-					 * into datums and report the error.  But first, make
-					 * sure we release the buffer locks we're holding ---
+					 * This is a definite conflict.  Break the tuple down into
+					 * datums and report the error.  But first, make sure we
+					 * release the buffer locks we're holding ---
 					 * BuildIndexValueDescription could make catalog accesses,
-					 * which in the worst case might touch this same index
-					 * and cause deadlocks.
+					 * which in the worst case might touch this same index and
+					 * cause deadlocks.
 					 */
 					if (nbuf != InvalidBuffer)
 						_bt_relbuf(rel, nbuf);
 					_bt_relbuf(rel, buf);
 
 					{
-						Datum	values[INDEX_MAX_KEYS];
-						bool	isnull[INDEX_MAX_KEYS];
+						Datum		values[INDEX_MAX_KEYS];
+						bool		isnull[INDEX_MAX_KEYS];
 
 						index_deform_tuple(itup, RelationGetDescr(rel),
 										   values, isnull);
@@ -385,7 +385,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
 										RelationGetRelationName(rel)),
 								 errdetail("Key %s already exists.",
 										   BuildIndexValueDescription(rel,
-															values, isnull))));
+														  values, isnull))));
 					}
 				}
 				else if (all_dead)
@@ -438,16 +438,16 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
 	}
 
 	/*
-	 * If we are doing a recheck then we should have found the tuple we
-	 * are checking.  Otherwise there's something very wrong --- probably,
-	 * the index is on a non-immutable expression.
+	 * If we are doing a recheck then we should have found the tuple we are
+	 * checking.  Otherwise there's something very wrong --- probably, the
+	 * index is on a non-immutable expression.
 	 */
 	if (checkUnique == UNIQUE_CHECK_EXISTING && !found)
 		ereport(ERROR,
 				(errcode(ERRCODE_INTERNAL_ERROR),
 				 errmsg("failed to re-find tuple within index \"%s\"",
 						RelationGetRelationName(rel)),
-				 errhint("This may be because of a non-immutable index expression.")));
+		errhint("This may be because of a non-immutable index expression.")));
 
 	if (nbuf != InvalidBuffer)
 		_bt_relbuf(rel, nbuf);
@@ -518,10 +518,10 @@ _bt_findinsertloc(Relation rel,
 	if (itemsz > BTMaxItemSize(page))
 		ereport(ERROR,
 				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
-				 errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
-						(unsigned long) itemsz,
-						(unsigned long) BTMaxItemSize(page),
-						RelationGetRelationName(rel)),
+			errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
+				   (unsigned long) itemsz,
+				   (unsigned long) BTMaxItemSize(page),
+				   RelationGetRelationName(rel)),
 		errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
 				"Consider a function index of an MD5 hash of the value, "
 				"or use full text indexing.")));
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 5df975e4ec53af9edc7c1c52ffa12d02e6eafeb8..c0502e55833258dd2d17f33c68cef0e0c7a612b1 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.119 2010/02/13 00:59:58 sriggs Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.120 2010/02/26 02:00:34 momjian Exp $
  *
  *	NOTES
  *	   Postgres btree pages look like ordinary relation pages.	The opaque
@@ -459,8 +459,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
 	START_CRIT_SECTION();
 
 	/*
-	 * We don't do MarkBufferDirty here because we're about initialise
-	 * the page, and nobody else can see it yet.
+	 * We don't do MarkBufferDirty here because we're about initialise the
+	 * page, and nobody else can see it yet.
 	 */
 
 	/* XLOG stuff */
@@ -480,8 +480,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
 		recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE, rdata);
 
 		/*
-		 * We don't do PageSetLSN or PageSetTLI here because
-		 * we're about initialise the page, so no need.
+		 * We don't do PageSetLSN or PageSetTLI here because we're about
+		 * initialise the page, so no need.
 		 */
 	}
 
@@ -552,11 +552,11 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
 			{
 				page = BufferGetPage(buf);
 				if (_bt_page_recyclable(page))
-				{					
+				{
 					/*
-					 * If we are generating WAL for Hot Standby then create
-					 * a WAL record that will allow us to conflict with
-					 * queries running on standby.
+					 * If we are generating WAL for Hot Standby then create a
+					 * WAL record that will allow us to conflict with queries
+					 * running on standby.
 					 */
 					if (XLogStandbyInfoActive())
 					{
@@ -762,6 +762,7 @@ _bt_delitems(Relation rel, Buffer buf,
 		if (isVacuum)
 		{
 			xl_btree_vacuum xlrec_vacuum;
+
 			xlrec_vacuum.node = rel->rd_node;
 			xlrec_vacuum.block = BufferGetBlockNumber(buf);
 
@@ -772,6 +773,7 @@ _bt_delitems(Relation rel, Buffer buf,
 		else
 		{
 			xl_btree_delete xlrec_delete;
+
 			xlrec_delete.node = rel->rd_node;
 			xlrec_delete.block = BufferGetBlockNumber(buf);
 
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index b0acaf257f27856f87b13057c7b46ddaaa5ce1c0..01899cfc1666062b38961f7cb28af703d2c4787c 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.175 2010/02/08 04:33:53 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.176 2010/02/26 02:00:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -57,8 +57,8 @@ typedef struct
 	IndexBulkDeleteCallback callback;
 	void	   *callback_state;
 	BTCycleId	cycleid;
-	BlockNumber lastBlockVacuumed; 	/* last blkno reached by Vacuum scan */
-	BlockNumber lastUsedPage;		/* blkno of last non-recyclable page */
+	BlockNumber lastBlockVacuumed;		/* last blkno reached by Vacuum scan */
+	BlockNumber lastUsedPage;	/* blkno of last non-recyclable page */
 	BlockNumber totFreePages;	/* true total # of free pages */
 	MemoryContext pagedelcontext;
 } BTVacState;
@@ -630,7 +630,7 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	vstate.callback = callback;
 	vstate.callback_state = callback_state;
 	vstate.cycleid = cycleid;
-	vstate.lastBlockVacuumed = BTREE_METAPAGE; /* Initialise at first block */
+	vstate.lastBlockVacuumed = BTREE_METAPAGE;	/* Initialise at first block */
 	vstate.lastUsedPage = BTREE_METAPAGE;
 	vstate.totFreePages = 0;
 
@@ -702,8 +702,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 		/*
 		 * We can't use _bt_getbuf() here because it always applies
 		 * _bt_checkpage(), which will barf on an all-zero page. We want to
-		 * recycle all-zero pages, not fail.  Also, we want to use a nondefault
-		 * buffer access strategy.
+		 * recycle all-zero pages, not fail.  Also, we want to use a
+		 * nondefault buffer access strategy.
 		 */
 		buf = ReadBufferExtended(rel, MAIN_FORKNUM, num_pages - 1, RBM_NORMAL,
 								 info->strategy);
@@ -856,23 +856,25 @@ restart:
 				htup = &(itup->t_tid);
 
 				/*
-				 * During Hot Standby we currently assume that XLOG_BTREE_VACUUM
-				 * records do not produce conflicts. That is only true as long
-				 * as the callback function depends only upon whether the index
-				 * tuple refers to heap tuples removed in the initial heap scan.
-				 * When vacuum starts it derives a value of OldestXmin. Backends
-				 * taking later snapshots could have a RecentGlobalXmin with a
-				 * later xid than the vacuum's OldestXmin, so it is possible that
-				 * row versions deleted after OldestXmin could be marked as killed
-				 * by other backends. The callback function *could* look at the
-				 * index tuple state in isolation and decide to delete the index
-				 * tuple, though currently it does not. If it ever did, we would
-				 * need to reconsider whether XLOG_BTREE_VACUUM records should
-				 * cause conflicts. If they did cause conflicts they would be
-				 * fairly harsh conflicts, since we haven't yet worked out a way
-				 * to pass a useful value for latestRemovedXid on the
-				 * XLOG_BTREE_VACUUM records. This applies to *any* type of index
-				 * that marks index tuples as killed.
+				 * During Hot Standby we currently assume that
+				 * XLOG_BTREE_VACUUM records do not produce conflicts. That is
+				 * only true as long as the callback function depends only
+				 * upon whether the index tuple refers to heap tuples removed
+				 * in the initial heap scan. When vacuum starts it derives a
+				 * value of OldestXmin. Backends taking later snapshots could
+				 * have a RecentGlobalXmin with a later xid than the vacuum's
+				 * OldestXmin, so it is possible that row versions deleted
+				 * after OldestXmin could be marked as killed by other
+				 * backends. The callback function *could* look at the index
+				 * tuple state in isolation and decide to delete the index
+				 * tuple, though currently it does not. If it ever did, we
+				 * would need to reconsider whether XLOG_BTREE_VACUUM records
+				 * should cause conflicts. If they did cause conflicts they
+				 * would be fairly harsh conflicts, since we haven't yet
+				 * worked out a way to pass a useful value for
+				 * latestRemovedXid on the XLOG_BTREE_VACUUM records. This
+				 * applies to *any* type of index that marks index tuples as
+				 * killed.
 				 */
 				if (callback(htup, callback_state))
 					deletable[ndeletable++] = offnum;
@@ -885,13 +887,13 @@ restart:
 		 */
 		if (ndeletable > 0)
 		{
-			BlockNumber	lastBlockVacuumed = BufferGetBlockNumber(buf);
+			BlockNumber lastBlockVacuumed = BufferGetBlockNumber(buf);
 
 			_bt_delitems(rel, buf, deletable, ndeletable, true, vstate->lastBlockVacuumed);
 
 			/*
-			 * Keep track of the block number of the lastBlockVacuumed, so
-			 * we can scan those blocks as well during WAL replay. This then
+			 * Keep track of the block number of the lastBlockVacuumed, so we
+			 * can scan those blocks as well during WAL replay. This then
 			 * provides concurrency protection and allows btrees to be used
 			 * while in recovery.
 			 */
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 772215c1810f6913ddf028a7a4208650c7d0a8db..84540b7353046f927fd11b2b33d6379265f17a73 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -59,7 +59,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.123 2010/01/20 19:43:40 heikki Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.124 2010/02/26 02:00:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -216,12 +216,13 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
 	wstate.btws_use_wal = XLogIsNeeded() && !wstate.index->rd_istemp;
 
 	/*
-	 * Write an XLOG UNLOGGED record if WAL-logging was skipped because
-	 * WAL archiving is not enabled.
+	 * Write an XLOG UNLOGGED record if WAL-logging was skipped because WAL
+	 * archiving is not enabled.
 	 */
 	if (!wstate.btws_use_wal && !wstate.index->rd_istemp)
 	{
-		char reason[NAMEDATALEN + 20];
+		char		reason[NAMEDATALEN + 20];
+
 		snprintf(reason, sizeof(reason), "b-tree build on \"%s\"",
 				 RelationGetRelationName(wstate.index));
 		XLogReportUnloggedStatement(reason);
@@ -492,10 +493,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
 	if (itupsz > BTMaxItemSize(npage))
 		ereport(ERROR,
 				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
-				 errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
-						(unsigned long) itupsz,
-						(unsigned long) BTMaxItemSize(npage),
-						RelationGetRelationName(wstate->index)),
+			errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
+				   (unsigned long) itupsz,
+				   (unsigned long) BTMaxItemSize(npage),
+				   RelationGetRelationName(wstate->index)),
 		errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
 				"Consider a function index of an MD5 hash of the value, "
 				"or use full text indexing.")));
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index a7a3d7a12d6b441fe9596c6870cf34cbf73c7b3d..6b399d34a66b229cd75fd8617e4476e746be5df3 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.97 2010/01/03 05:39:08 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.98 2010/02/26 02:00:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -515,7 +515,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
 	StrategyNumber strat;
 
 	/*
-	 * First, deal with cases where one or both args are NULL.  This should
+	 * First, deal with cases where one or both args are NULL.	This should
 	 * only happen when the scankeys represent IS NULL/NOT NULL conditions.
 	 */
 	if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL)
@@ -566,7 +566,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
 				break;
 			default:
 				elog(ERROR, "unrecognized StrategyNumber: %d", (int) strat);
-				*result = false;		/* keep compiler quiet */
+				*result = false;	/* keep compiler quiet */
 				break;
 		}
 		return true;
@@ -612,8 +612,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
 	 * indexscan initiated by syscache lookup will use cross-data-type
 	 * operators.)
 	 *
-	 * If the sk_strategy was flipped by _bt_fix_scankey_strategy, we
-	 * have to un-flip it to get the correct opfamily member.
+	 * If the sk_strategy was flipped by _bt_fix_scankey_strategy, we have to
+	 * un-flip it to get the correct opfamily member.
 	 */
 	strat = op->sk_strategy;
 	if (op->sk_flags & SK_BT_DESC)
@@ -653,7 +653,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
  *
  * Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
  * NULL comparison value.  Since all btree operators are assumed strict,
- * a NULL means that the qual cannot be satisfied.  We return TRUE if the
+ * a NULL means that the qual cannot be satisfied.	We return TRUE if the
  * comparison value isn't NULL, or FALSE if the scan should be abandoned.
  *
  * This function is applied to the *input* scankey structure; therefore
@@ -682,7 +682,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
 	 * --- we can treat IS NULL as an equality operator for purposes of search
 	 * strategy.
 	 *
-	 * Likewise, "x IS NOT NULL" is supported.  We treat that as either "less
+	 * Likewise, "x IS NOT NULL" is supported.	We treat that as either "less
 	 * than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS
 	 * FIRST index.
 	 */
@@ -910,13 +910,13 @@ _bt_checkkeys(IndexScanDesc scan,
 			if (key->sk_flags & SK_SEARCHNULL)
 			{
 				if (isNull)
-					continue;		/* tuple satisfies this qual */
+					continue;	/* tuple satisfies this qual */
 			}
 			else
 			{
 				Assert(key->sk_flags & SK_SEARCHNOTNULL);
 				if (!isNull)
-					continue;		/* tuple satisfies this qual */
+					continue;	/* tuple satisfies this qual */
 			}
 
 			/*
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index f5320fb10396ae29f7c38419ca1a6938d092154a..07416d599b561e468f1c0f16dc140b2a31c13445 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.61 2010/02/13 00:59:58 sriggs Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.62 2010/02/26 02:00:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -473,10 +473,10 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
 	xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
 
 	/*
-	 * If queries might be active then we need to ensure every block is unpinned
-	 * between the lastBlockVacuumed and the current block, if there are any.
-	 * This ensures that every block in the index is touched during VACUUM as
-	 * required to ensure scans work correctly.
+	 * If queries might be active then we need to ensure every block is
+	 * unpinned between the lastBlockVacuumed and the current block, if there
+	 * are any. This ensures that every block in the index is touched during
+	 * VACUUM as required to ensure scans work correctly.
 	 */
 	if (standbyState == STANDBY_SNAPSHOT_READY &&
 		(xlrec->lastBlockVacuumed + 1) != xlrec->block)
@@ -486,10 +486,10 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
 		for (; blkno < xlrec->block; blkno++)
 		{
 			/*
-			 * XXX we don't actually need to read the block, we
-			 * just need to confirm it is unpinned. If we had a special call
-			 * into the buffer manager we could optimise this so that
-			 * if the block is not in shared_buffers we confirm it as unpinned.
+			 * XXX we don't actually need to read the block, we just need to
+			 * confirm it is unpinned. If we had a special call into the
+			 * buffer manager we could optimise this so that if the block is
+			 * not in shared_buffers we confirm it as unpinned.
 			 *
 			 * Another simple optimization would be to check if there's any
 			 * backends running; if not, we could just skip this.
@@ -505,9 +505,9 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
 
 	/*
 	 * If the block was restored from a full page image, nothing more to do.
-	 * The RestoreBkpBlocks() call already pinned and took cleanup lock on
-	 * it. XXX: Perhaps we should call RestoreBkpBlocks() *after* the loop
-	 * above, to make the disk access more sequential.
+	 * The RestoreBkpBlocks() call already pinned and took cleanup lock on it.
+	 * XXX: Perhaps we should call RestoreBkpBlocks() *after* the loop above,
+	 * to make the disk access more sequential.
 	 */
 	if (record->xl_info & XLR_BKP_BLOCK_1)
 		return;
@@ -567,8 +567,8 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
 	xlrec = (xl_btree_delete *) XLogRecGetData(record);
 
 	/*
-	 * We don't need to take a cleanup lock to apply these changes.
-	 * See nbtree/README for details.
+	 * We don't need to take a cleanup lock to apply these changes. See
+	 * nbtree/README for details.
 	 */
 	buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
 	if (!BufferIsValid(buffer))
@@ -819,13 +819,15 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
 		switch (info)
 		{
 			case XLOG_BTREE_DELETE:
+
 				/*
-				 * Btree delete records can conflict with standby queries. You might
-				 * think that vacuum records would conflict as well, but we've handled
-				 * that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
-				 * cleaned by the vacuum of the heap and so we can resolve any conflicts
-				 * just once when that arrives. After that any we know that no conflicts
-				 * exist from individual btree vacuum records on that index.
+				 * Btree delete records can conflict with standby queries. You
+				 * might think that vacuum records would conflict as well, but
+				 * we've handled that already. XLOG_HEAP2_CLEANUP_INFO records
+				 * provide the highest xid cleaned by the vacuum of the heap
+				 * and so we can resolve any conflicts just once when that
+				 * arrives. After that any we know that no conflicts exist
+				 * from individual btree vacuum records on that index.
 				 */
 				{
 					xl_btree_delete *xlrec = (xl_btree_delete *) XLogRecGetData(record);
@@ -842,9 +844,11 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
 				break;
 
 			case XLOG_BTREE_REUSE_PAGE:
+
 				/*
-				 * Btree reuse page records exist to provide a conflict point when we
-				 * reuse pages in the index via the FSM. That's all it does though.
+				 * Btree reuse page records exist to provide a conflict point
+				 * when we reuse pages in the index via the FSM. That's all it
+				 * does though.
 				 */
 				{
 					xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) XLogRecGetData(record);
@@ -859,8 +863,8 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
 	}
 
 	/*
-	 * Vacuum needs to pin and take cleanup lock on every leaf page,
-	 * a regular exclusive lock is enough for all other purposes.
+	 * Vacuum needs to pin and take cleanup lock on every leaf page, a regular
+	 * exclusive lock is enough for all other purposes.
 	 */
 	RestoreBkpBlocks(lsn, record, (info == XLOG_BTREE_VACUUM));
 
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 92e1aeb3fd2ebb7ce5d4ea9d0fcfb9722334b56c..3f3bdc03353a819b5242664f0d9a638a23241ff2 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -42,7 +42,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.34 2010/01/02 16:57:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.35 2010/02/26 02:00:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1298,11 +1298,11 @@ PostPrepare_MultiXact(TransactionId xid)
 	myOldestMember = OldestMemberMXactId[MyBackendId];
 	if (MultiXactIdIsValid(myOldestMember))
 	{
-		BackendId dummyBackendId = TwoPhaseGetDummyBackendId(xid);
+		BackendId	dummyBackendId = TwoPhaseGetDummyBackendId(xid);
 
 		/*
-		 * Even though storing MultiXactId is atomic, acquire lock to make sure
-		 * others see both changes, not just the reset of the slot of the
+		 * Even though storing MultiXactId is atomic, acquire lock to make
+		 * sure others see both changes, not just the reset of the slot of the
 		 * current backend. Using a volatile pointer might suffice, but this
 		 * isn't a hot spot.
 		 */
@@ -1316,8 +1316,8 @@ PostPrepare_MultiXact(TransactionId xid)
 
 	/*
 	 * We don't need to transfer OldestVisibleMXactId value, because the
-	 * transaction is not going to be looking at any more multixacts once
-	 * it's prepared.
+	 * transaction is not going to be looking at any more multixacts once it's
+	 * prepared.
 	 *
 	 * We assume that storing a MultiXactId is atomic and so we need not take
 	 * MultiXactGenLock to do this.
@@ -1340,14 +1340,14 @@ multixact_twophase_recover(TransactionId xid, uint16 info,
 						   void *recdata, uint32 len)
 {
 	BackendId	dummyBackendId = TwoPhaseGetDummyBackendId(xid);
-	MultiXactId	oldestMember;
+	MultiXactId oldestMember;
 
 	/*
-	 * Get the oldest member XID from the state file record, and set it in
-	 * the OldestMemberMXactId slot reserved for this prepared transaction.
+	 * Get the oldest member XID from the state file record, and set it in the
+	 * OldestMemberMXactId slot reserved for this prepared transaction.
 	 */
 	Assert(len == sizeof(MultiXactId));
-	oldestMember = *((MultiXactId *)recdata);
+	oldestMember = *((MultiXactId *) recdata);
 
 	OldestMemberMXactId[dummyBackendId] = oldestMember;
 }
@@ -1373,7 +1373,7 @@ multixact_twophase_postcommit(TransactionId xid, uint16 info,
  */
 void
 multixact_twophase_postabort(TransactionId xid, uint16 info,
-						void *recdata, uint32 len)
+							 void *recdata, uint32 len)
 {
 	multixact_twophase_postcommit(xid, info, recdata, len);
 }
@@ -2031,9 +2031,10 @@ multixact_redo(XLogRecPtr lsn, XLogRecord *record)
 				max_xid = xids[i];
 		}
 
-		/* We don't expect anyone else to modify nextXid, hence startup process
-		 * doesn't need to hold a lock while checking this. We still acquire
-		 * the lock to modify it, though.
+		/*
+		 * We don't expect anyone else to modify nextXid, hence startup
+		 * process doesn't need to hold a lock while checking this. We still
+		 * acquire the lock to modify it, though.
 		 */
 		if (TransactionIdFollowsOrEquals(max_xid,
 										 ShmemVariableCache->nextXid))
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index eac83a7b5382ec6733592f00df6fca9111484dcb..4ccb0c239b0af0b4f404dd926fd4878c4af904b4 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -22,7 +22,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.26 2010/01/02 16:57:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.27 2010/02/26 02:00:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -89,7 +89,7 @@ SubTransSetParent(TransactionId xid, TransactionId parent, bool overwriteOK)
 
 	/* Current state should be 0 */
 	Assert(*ptr == InvalidTransactionId ||
-			(*ptr == parent && overwriteOK));
+		   (*ptr == parent && overwriteOK));
 
 	*ptr = parent;
 
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index ee9da91f68aa41fe2004809c507328cdff0f6b90..b1bf2c4f26052ade81df863a947700e459783a74 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *		$PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.58 2010/01/02 16:57:35 momjian Exp $
+ *		$PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.59 2010/02/26 02:00:34 momjian Exp $
  *
  * NOTES
  *		Each global transaction is associated with a global transaction
@@ -110,7 +110,7 @@ int			max_prepared_xacts = 0;
 typedef struct GlobalTransactionData
 {
 	PGPROC		proc;			/* dummy proc */
-	BackendId	dummyBackendId;	/* similar to backend id for backends */
+	BackendId	dummyBackendId; /* similar to backend id for backends */
 	TimestampTz prepared_at;	/* time of preparation */
 	XLogRecPtr	prepare_lsn;	/* XLOG offset of prepare record */
 	Oid			owner;			/* ID of user that executed the xact */
@@ -209,14 +209,14 @@ TwoPhaseShmemInit(void)
 			/*
 			 * Assign a unique ID for each dummy proc, so that the range of
 			 * dummy backend IDs immediately follows the range of normal
-			 * backend IDs. We don't dare to assign a real backend ID to
-			 * dummy procs, because prepared transactions don't take part in
-			 * cache invalidation like a real backend ID would imply, but
-			 * having a unique ID for them is nevertheless handy. This
-			 * arrangement allows you to allocate an array of size
-			 * (MaxBackends + max_prepared_xacts + 1), and have a slot for
-			 * every backend and prepared transaction. Currently multixact.c
-			 * uses that technique.
+			 * backend IDs. We don't dare to assign a real backend ID to dummy
+			 * procs, because prepared transactions don't take part in cache
+			 * invalidation like a real backend ID would imply, but having a
+			 * unique ID for them is nevertheless handy. This arrangement
+			 * allows you to allocate an array of size (MaxBackends +
+			 * max_prepared_xacts + 1), and have a slot for every backend and
+			 * prepared transaction. Currently multixact.c uses that
+			 * technique.
 			 */
 			gxacts[i].dummyBackendId = MaxBackends + 1 + i;
 		}
@@ -677,7 +677,7 @@ pg_prepared_xact(PG_FUNCTION_ARGS)
 BackendId
 TwoPhaseGetDummyBackendId(TransactionId xid)
 {
-	PGPROC *proc = TwoPhaseGetDummyProc(xid);
+	PGPROC	   *proc = TwoPhaseGetDummyProc(xid);
 
 	return ((GlobalTransaction) proc)->dummyBackendId;
 }
@@ -874,8 +874,8 @@ StartPrepare(GlobalTransaction gxact)
 	save_state_data(&hdr, sizeof(TwoPhaseFileHeader));
 
 	/*
-	 * Add the additional info about subxacts, deletable files and
-	 * cache invalidation messages.
+	 * Add the additional info about subxacts, deletable files and cache
+	 * invalidation messages.
 	 */
 	if (hdr.nsubxacts > 0)
 	{
@@ -1331,8 +1331,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
 	/*
 	 * Handle cache invalidation messages.
 	 *
-	 * Relcache init file invalidation requires processing both
-	 * before and after we send the SI messages. See AtEOXact_Inval()
+	 * Relcache init file invalidation requires processing both before and
+	 * after we send the SI messages. See AtEOXact_Inval()
 	 */
 	if (hdr->initfileinval)
 		RelationCacheInitFileInvalidate(true);
@@ -1786,8 +1786,8 @@ RecoverPreparedTransactions(void)
 			bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
 
 			/*
-			 * It's possible that SubTransSetParent has been set before, if the
-			 * prepared transaction generated xid assignment records. Test
+			 * It's possible that SubTransSetParent has been set before, if
+			 * the prepared transaction generated xid assignment records. Test
 			 * here must match one used in AssignTransactionId().
 			 */
 			if (InHotStandby && hdr->nsubxacts >= PGPROC_MAX_CACHED_SUBXIDS)
diff --git a/src/backend/access/transam/twophase_rmgr.c b/src/backend/access/transam/twophase_rmgr.c
index 86a1d12f93d302d126917a2162da0cd130498993..d8f7fb6a0352d43d094f11a6a08eb19e25f3c560 100644
--- a/src/backend/access/transam/twophase_rmgr.c
+++ b/src/backend/access/transam/twophase_rmgr.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.13 2010/02/16 22:34:43 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.14 2010/02/26 02:00:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -32,8 +32,8 @@ const TwoPhaseCallback twophase_postcommit_callbacks[TWOPHASE_RM_MAX_ID + 1] =
 {
 	NULL,						/* END ID */
 	lock_twophase_postcommit,	/* Lock */
-	pgstat_twophase_postcommit,	/* pgstat */
-	multixact_twophase_postcommit /* MultiXact */
+	pgstat_twophase_postcommit, /* pgstat */
+	multixact_twophase_postcommit		/* MultiXact */
 };
 
 const TwoPhaseCallback twophase_postabort_callbacks[TWOPHASE_RM_MAX_ID + 1] =
@@ -41,7 +41,7 @@ const TwoPhaseCallback twophase_postabort_callbacks[TWOPHASE_RM_MAX_ID + 1] =
 	NULL,						/* END ID */
 	lock_twophase_postabort,	/* Lock */
 	pgstat_twophase_postabort,	/* pgstat */
-	multixact_twophase_postabort /* MultiXact */
+	multixact_twophase_postabort	/* MultiXact */
 };
 
 const TwoPhaseCallback twophase_standby_recover_callbacks[TWOPHASE_RM_MAX_ID + 1] =
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 60b5d3bd514b5e6ff610e23fd6e47b2a84a584e5..4f3c0ae4524e9a2cbec83a9a5c6c4efd6d3b2dda 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.90 2010/02/20 21:24:01 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.91 2010/02/26 02:00:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -39,7 +39,7 @@ VariableCache ShmemVariableCache = NULL;
  *
  * Note: when this is called, we are actually already inside a valid
  * transaction, since XIDs are now not allocated until the transaction
- * does something.  So it is safe to do a database lookup if we want to
+ * does something.	So it is safe to do a database lookup if we want to
  * issue a warning about XID wrap.
  */
 TransactionId
@@ -83,13 +83,13 @@ GetNewTransactionId(bool isSubXact)
 		 * For safety's sake, we release XidGenLock while sending signals,
 		 * warnings, etc.  This is not so much because we care about
 		 * preserving concurrency in this situation, as to avoid any
-		 * possibility of deadlock while doing get_database_name().
-		 * First, copy all the shared values we'll need in this path.
+		 * possibility of deadlock while doing get_database_name(). First,
+		 * copy all the shared values we'll need in this path.
 		 */
 		TransactionId xidWarnLimit = ShmemVariableCache->xidWarnLimit;
 		TransactionId xidStopLimit = ShmemVariableCache->xidStopLimit;
 		TransactionId xidWrapLimit = ShmemVariableCache->xidWrapLimit;
-		Oid		oldest_datoid = ShmemVariableCache->oldestXidDB;
+		Oid			oldest_datoid = ShmemVariableCache->oldestXidDB;
 
 		LWLockRelease(XidGenLock);
 
@@ -104,7 +104,7 @@ GetNewTransactionId(bool isSubXact)
 		if (IsUnderPostmaster &&
 			TransactionIdFollowsOrEquals(xid, xidStopLimit))
 		{
-			char   *oldest_datname = get_database_name(oldest_datoid);
+			char	   *oldest_datname = get_database_name(oldest_datoid);
 
 			/* complain even if that DB has disappeared */
 			if (oldest_datname)
@@ -124,7 +124,7 @@ GetNewTransactionId(bool isSubXact)
 		}
 		else if (TransactionIdFollowsOrEquals(xid, xidWarnLimit))
 		{
-			char   *oldest_datname = get_database_name(oldest_datoid);
+			char	   *oldest_datname = get_database_name(oldest_datoid);
 
 			/* complain even if that DB has disappeared */
 			if (oldest_datname)
@@ -329,8 +329,8 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
 
 	/* Log the info */
 	ereport(DEBUG1,
-	   (errmsg("transaction ID wrap limit is %u, limited by database with OID %u",
-			   xidWrapLimit, oldest_datoid)));
+			(errmsg("transaction ID wrap limit is %u, limited by database with OID %u",
+					xidWrapLimit, oldest_datoid)));
 
 	/*
 	 * If past the autovacuum force point, immediately signal an autovac
@@ -346,7 +346,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
 	/* Give an immediate warning if past the wrap warn point */
 	if (TransactionIdFollowsOrEquals(curXid, xidWarnLimit) && !InRecovery)
 	{
-		char   *oldest_datname = get_database_name(oldest_datoid);
+		char	   *oldest_datname = get_database_name(oldest_datoid);
 
 		/*
 		 * Note: it's possible that get_database_name fails and returns NULL,
@@ -355,11 +355,11 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
 		 */
 		if (oldest_datname)
 			ereport(WARNING,
-					(errmsg("database \"%s\" must be vacuumed within %u transactions",
-							oldest_datname,
-							xidWrapLimit - curXid),
-					 errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n"
-							 "You might also need to commit or roll back old prepared transactions.")));
+			(errmsg("database \"%s\" must be vacuumed within %u transactions",
+					oldest_datname,
+					xidWrapLimit - curXid),
+			 errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n"
+					 "You might also need to commit or roll back old prepared transactions.")));
 		else
 			ereport(WARNING,
 					(errmsg("database with OID %u must be vacuumed within %u transactions",
@@ -377,7 +377,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
  * We primarily check whether oldestXidDB is valid.  The cases we have in
  * mind are that that database was dropped, or the field was reset to zero
  * by pg_resetxlog.  In either case we should force recalculation of the
- * wrap limit.  Also do it if oldestXid is old enough to be forcing
+ * wrap limit.	Also do it if oldestXid is old enough to be forcing
  * autovacuums or other actions; this ensures we update our state as soon
  * as possible once extra overhead is being incurred.
  */
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 044afd582dd464ffd807663832c729591a4cd287..43966d5ab6f9c0efab9f2dda9cecfcad92f023e5 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -10,7 +10,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.288 2010/02/20 21:24:01 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.289 2010/02/26 02:00:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -139,9 +139,9 @@ typedef struct TransactionStateData
 	int			nChildXids;		/* # of subcommitted child XIDs */
 	int			maxChildXids;	/* allocated size of childXids[] */
 	Oid			prevUser;		/* previous CurrentUserId setting */
-	int			prevSecContext;	/* previous SecurityRestrictionContext */
+	int			prevSecContext; /* previous SecurityRestrictionContext */
 	bool		prevXactReadOnly;		/* entry-time xact r/o state */
-	bool		startedInRecovery;	/* did we start in recovery? */
+	bool		startedInRecovery;		/* did we start in recovery? */
 	struct TransactionStateData *parent;		/* back link to parent */
 } TransactionStateData;
 
@@ -178,7 +178,7 @@ static TransactionStateData TopTransactionStateData = {
  * unreportedXids holds XIDs of all subtransactions that have not yet been
  * reported in a XLOG_XACT_ASSIGNMENT record.
  */
-static int nUnreportedXids;
+static int	nUnreportedXids;
 static TransactionId unreportedXids[PGPROC_MAX_CACHED_SUBXIDS];
 
 static TransactionState CurrentTransactionState = &TopTransactionStateData;
@@ -452,25 +452,28 @@ AssignTransactionId(TransactionState s)
 	 * include the top-level xid and all the subxids that have not yet been
 	 * reported using XLOG_XACT_ASSIGNMENT records.
 	 *
-	 * This is required to limit the amount of shared memory required in a
-	 * hot standby server to keep track of in-progress XIDs. See notes for
+	 * This is required to limit the amount of shared memory required in a hot
+	 * standby server to keep track of in-progress XIDs. See notes for
 	 * RecordKnownAssignedTransactionIds().
 	 *
-	 * We don't keep track of the immediate parent of each subxid,
-	 * only the top-level transaction that each subxact belongs to. This
-	 * is correct in recovery only because aborted subtransactions are
-	 * separately WAL logged.
+	 * We don't keep track of the immediate parent of each subxid, only the
+	 * top-level transaction that each subxact belongs to. This is correct in
+	 * recovery only because aborted subtransactions are separately WAL
+	 * logged.
 	 */
 	if (isSubXact && XLogStandbyInfoActive())
 	{
 		unreportedXids[nUnreportedXids] = s->transactionId;
 		nUnreportedXids++;
 
-		/* ensure this test matches similar one in RecoverPreparedTransactions() */
+		/*
+		 * ensure this test matches similar one in
+		 * RecoverPreparedTransactions()
+		 */
 		if (nUnreportedXids >= PGPROC_MAX_CACHED_SUBXIDS)
 		{
 			XLogRecData rdata[2];
-			xl_xact_assignment	xlrec;
+			xl_xact_assignment xlrec;
 
 			/*
 			 * xtop is always set by now because we recurse up transaction
@@ -899,6 +902,7 @@ RecordTransactionCommit(void)
 	nchildren = xactGetCommittedChildren(&children);
 	nmsgs = xactGetCommittedInvalidationMessages(&invalMessages,
 												 &RelcacheInitFileInval);
+
 	/*
 	 * If we haven't been assigned an XID yet, we neither can, nor do we want
 	 * to write a COMMIT record.
@@ -1098,10 +1102,9 @@ static void
 AtCCI_LocalCache(void)
 {
 	/*
-	 * Make any pending relation map changes visible.  We must do this
-	 * before processing local sinval messages, so that the map changes
-	 * will get reflected into the relcache when relcache invals are
-	 * processed.
+	 * Make any pending relation map changes visible.  We must do this before
+	 * processing local sinval messages, so that the map changes will get
+	 * reflected into the relcache when relcache invals are processed.
 	 */
 	AtCCI_RelationMap();
 
@@ -1227,9 +1230,9 @@ AtSubCommit_childXids(void)
 	 *
 	 * Note: We rely on the fact that the XID of a child always follows that
 	 * of its parent.  By copying the XID of this subtransaction before the
-	 * XIDs of its children, we ensure that the array stays ordered.
-	 * Likewise, all XIDs already in the array belong to subtransactions
-	 * started and subcommitted before us, so their XIDs must precede ours.
+	 * XIDs of its children, we ensure that the array stays ordered. Likewise,
+	 * all XIDs already in the array belong to subtransactions started and
+	 * subcommitted before us, so their XIDs must precede ours.
 	 */
 	s->parent->childXids[s->parent->nChildXids] = s->transactionId;
 
@@ -1457,10 +1460,10 @@ AtSubAbort_childXids(void)
 	s->maxChildXids = 0;
 
 	/*
-	 * We could prune the unreportedXids array here. But we don't bother.
-	 * That would potentially reduce number of XLOG_XACT_ASSIGNMENT records
-	 * but it would likely introduce more CPU time into the more common
-	 * paths, so we choose not to do that.
+	 * We could prune the unreportedXids array here. But we don't bother. That
+	 * would potentially reduce number of XLOG_XACT_ASSIGNMENT records but it
+	 * would likely introduce more CPU time into the more common paths, so we
+	 * choose not to do that.
 	 */
 }
 
@@ -2162,7 +2165,7 @@ AbortTransaction(void)
 	/*
 	 * do abort processing
 	 */
-	AfterTriggerEndXact(false);			/* 'false' means it's abort */
+	AfterTriggerEndXact(false); /* 'false' means it's abort */
 	AtAbort_Portals();
 	AtEOXact_LargeObject(false);
 	AtAbort_Notify();
@@ -4362,9 +4365,9 @@ xact_redo_commit(xl_xact_commit *xlrec, TransactionId xid, XLogRecPtr lsn)
 	/*
 	 * Make sure nextXid is beyond any XID mentioned in the record.
 	 *
-	 * We don't expect anyone else to modify nextXid, hence we
-	 * don't need to hold a lock while checking this. We still acquire
-	 * the lock to modify it, though.
+	 * We don't expect anyone else to modify nextXid, hence we don't need to
+	 * hold a lock while checking this. We still acquire the lock to modify
+	 * it, though.
 	 */
 	if (TransactionIdFollowsOrEquals(max_xid,
 									 ShmemVariableCache->nextXid))
@@ -4400,8 +4403,8 @@ xact_redo_commit(xl_xact_commit *xlrec, TransactionId xid, XLogRecPtr lsn)
 		 * protocol during recovery to provide information on database
 		 * consistency for when users try to set hint bits. It is important
 		 * that we do not set hint bits until the minRecoveryPoint is past
-		 * this commit record. This ensures that if we crash we don't see
-		 * hint bits set on changes made by transactions that haven't yet
+		 * this commit record. This ensures that if we crash we don't see hint
+		 * bits set on changes made by transactions that haven't yet
 		 * recovered. It's unlikely but it's good to be safe.
 		 */
 		TransactionIdAsyncCommitTree(xid, xlrec->nsubxacts, sub_xids, lsn);
@@ -4413,17 +4416,17 @@ xact_redo_commit(xl_xact_commit *xlrec, TransactionId xid, XLogRecPtr lsn)
 
 		/*
 		 * Send any cache invalidations attached to the commit. We must
-		 * maintain the same order of invalidation then release locks
-		 * as occurs in 	.
+		 * maintain the same order of invalidation then release locks as
+		 * occurs in	 .
 		 */
 		ProcessCommittedInvalidationMessages(inval_msgs, xlrec->nmsgs,
-									XactCompletionRelcacheInitFileInval(xlrec),
-									xlrec->dbId, xlrec->tsId);
+								  XactCompletionRelcacheInitFileInval(xlrec),
+											 xlrec->dbId, xlrec->tsId);
 
 		/*
-		 * Release locks, if any. We do this for both two phase and normal
-		 * one phase transactions. In effect we are ignoring the prepare
-		 * phase and just going straight to lock release.
+		 * Release locks, if any. We do this for both two phase and normal one
+		 * phase transactions. In effect we are ignoring the prepare phase and
+		 * just going straight to lock release.
 		 */
 		StandbyReleaseLockTree(xid, xlrec->nsubxacts, sub_xids);
 	}
@@ -4446,15 +4449,16 @@ xact_redo_commit(xl_xact_commit *xlrec, TransactionId xid, XLogRecPtr lsn)
 	}
 
 	/*
-	 * We issue an XLogFlush() for the same reason we emit ForceSyncCommit() in
-	 * normal operation. For example, in DROP DATABASE, we delete all the files
-	 * belonging to the database, and then commit the transaction. If we crash
-	 * after all the files have been deleted but before the commit, you have an
-	 * entry in pg_database without any files. To minimize the window for that,
-	 * we use ForceSyncCommit() to rush the commit record to disk as quick as
-	 * possible. We have the same window during recovery, and forcing an
-	 * XLogFlush() (which updates minRecoveryPoint during recovery) helps
-	 * to reduce that problem window, for any user that requested ForceSyncCommit().
+	 * We issue an XLogFlush() for the same reason we emit ForceSyncCommit()
+	 * in normal operation. For example, in DROP DATABASE, we delete all the
+	 * files belonging to the database, and then commit the transaction. If we
+	 * crash after all the files have been deleted but before the commit, you
+	 * have an entry in pg_database without any files. To minimize the window
+	 * for that, we use ForceSyncCommit() to rush the commit record to disk as
+	 * quick as possible. We have the same window during recovery, and forcing
+	 * an XLogFlush() (which updates minRecoveryPoint during recovery) helps
+	 * to reduce that problem window, for any user that requested
+	 * ForceSyncCommit().
 	 */
 	if (XactCompletionForceSyncCommit(xlrec))
 		XLogFlush(lsn);
@@ -4480,9 +4484,11 @@ xact_redo_abort(xl_xact_abort *xlrec, TransactionId xid)
 	max_xid = TransactionIdLatest(xid, xlrec->nsubxacts, sub_xids);
 
 	/* Make sure nextXid is beyond any XID mentioned in the record */
-	/* We don't expect anyone else to modify nextXid, hence we
-	 * don't need to hold a lock while checking this. We still acquire
-	 * the lock to modify it, though.
+
+	/*
+	 * We don't expect anyone else to modify nextXid, hence we don't need to
+	 * hold a lock while checking this. We still acquire the lock to modify
+	 * it, though.
 	 */
 	if (TransactionIdFollowsOrEquals(max_xid,
 									 ShmemVariableCache->nextXid))
@@ -4496,12 +4502,13 @@ xact_redo_abort(xl_xact_abort *xlrec, TransactionId xid)
 	if (InHotStandby)
 	{
 		/*
-		 * If a transaction completion record arrives that has as-yet unobserved
-		 * subtransactions then this will not have been fully handled by the call
-		 * to RecordKnownAssignedTransactionIds() in the main recovery loop in
-		 * xlog.c. So we need to do bookkeeping again to cover that case. This is
-		 * confusing and it is easy to think this call is irrelevant, which has
-		 * happened three times in development already. Leave it in.
+		 * If a transaction completion record arrives that has as-yet
+		 * unobserved subtransactions then this will not have been fully
+		 * handled by the call to RecordKnownAssignedTransactionIds() in the
+		 * main recovery loop in xlog.c. So we need to do bookkeeping again to
+		 * cover that case. This is confusing and it is easy to think this
+		 * call is irrelevant, which has happened three times in development
+		 * already. Leave it in.
 		 */
 		RecordKnownAssignedTransactionIds(max_xid);
 	}
@@ -4631,8 +4638,8 @@ xact_desc_commit(StringInfo buf, xl_xact_commit *xlrec)
 		msgs = (SharedInvalidationMessage *) &xacts[xlrec->nsubxacts];
 
 		if (XactCompletionRelcacheInitFileInval(xlrec))
-			appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u", 
-										xlrec->dbId, xlrec->tsId);
+			appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u",
+							 xlrec->dbId, xlrec->tsId);
 
 		appendStringInfo(buf, "; inval msgs:");
 		for (i = 0; i < xlrec->nmsgs; i++)
@@ -4738,8 +4745,8 @@ xact_desc(StringInfo buf, uint8 xl_info, char *rec)
 
 		/*
 		 * Note that we ignore the WAL record's xid, since we're more
-		 * interested in the top-level xid that issued the record
-		 * and which xids are being reported here.
+		 * interested in the top-level xid that issued the record and which
+		 * xids are being reported here.
 		 */
 		appendStringInfo(buf, "xid assignment xtop %u: ", xlrec->xtop);
 		xact_desc_assignment(buf, xlrec);
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 40916571ac0335483fd8d3f3d11d86cc5e8c7488..d753a31cb3559ee7abcf4c691c160f1ae553fbfe 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.378 2010/02/25 02:17:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.379 2010/02/26 02:00:35 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -70,7 +70,7 @@ int			XLOGbuffers = 8;
 int			XLogArchiveTimeout = 0;
 bool		XLogArchiveMode = false;
 char	   *XLogArchiveCommand = NULL;
-bool 		XLogRequestRecoveryConnections = true;
+bool		XLogRequestRecoveryConnections = true;
 int			MaxStandbyDelay = 30;
 bool		fullPageWrites = true;
 bool		log_checkpoints = false;
@@ -140,9 +140,9 @@ TimeLineID	ThisTimeLineID = 0;
 bool		InRecovery = false;
 
 /* Are we in Hot Standby mode? Only valid in startup process, see xlog.h */
-HotStandbyState		standbyState = STANDBY_DISABLED;
+HotStandbyState standbyState = STANDBY_DISABLED;
 
-static 	XLogRecPtr	LastRec;
+static XLogRecPtr LastRec;
 
 /*
  * Local copy of SharedRecoveryInProgress variable. True actually means "not
@@ -156,7 +156,7 @@ static bool LocalRecoveryInProgress = true;
  *		0: unconditionally not allowed to insert XLOG
  *		-1: must check RecoveryInProgress(); disallow until it is false
  * Most processes start with -1 and transition to 1 after seeing that recovery
- * is not in progress.  But we can also force the value for special cases.
+ * is not in progress.	But we can also force the value for special cases.
  * The coding in XLogInsertAllowed() depends on the first two of these states
  * being numerically the same as bool true and false.
  */
@@ -181,7 +181,7 @@ static TimestampTz recoveryLastXTime = 0;
 /* options taken from recovery.conf for XLOG streaming */
 static bool StandbyMode = false;
 static char *PrimaryConnInfo = NULL;
-char *TriggerFile = NULL;
+char	   *TriggerFile = NULL;
 
 /* if recoveryStopsHere returns true, it saves actual stop xid/time here */
 static TransactionId recoveryStopXid;
@@ -389,7 +389,7 @@ typedef struct XLogCtlData
 	/* end+1 of the last record replayed (or being replayed) */
 	XLogRecPtr	replayEndRecPtr;
 	/* timestamp of last record replayed (or being replayed) */
-	TimestampTz	recoveryLastXTime;
+	TimestampTz recoveryLastXTime;
 	/* end+1 of the last record replayed */
 	XLogRecPtr	recoveryLastRecPtr;
 
@@ -456,6 +456,7 @@ static uint32 readId = 0;
 static uint32 readSeg = 0;
 static uint32 readOff = 0;
 static uint32 readLen = 0;
+
 /* Is the currently open segment being streamed from primary? */
 static bool readStreamed = false;
 
@@ -510,9 +511,9 @@ static void XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch);
 static bool InstallXLogFileSegment(uint32 *log, uint32 *seg, char *tmppath,
 					   bool find_free, int *max_advance,
 					   bool use_lock);
-static int	XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
+static int XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
 			 bool fromArchive, bool notexistOk);
-static int	XLogFileReadAnyTLI(uint32 log, uint32 seg, int emode,
+static int XLogFileReadAnyTLI(uint32 log, uint32 seg, int emode,
 				   bool fromArchive);
 static bool XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
 			 bool randAccess);
@@ -1867,10 +1868,10 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
 		 * to not need a valid 'lsn' value.
 		 *
 		 * Another important reason for doing it this way is that the passed
-		 * 'lsn' value could be bogus, i.e., past the end of available WAL,
-		 * if the caller got it from a corrupted heap page.  Accepting such
-		 * a value as the min recovery point would prevent us from coming up
-		 * at all.  Instead, we just log a warning and continue with recovery.
+		 * 'lsn' value could be bogus, i.e., past the end of available WAL, if
+		 * the caller got it from a corrupted heap page.  Accepting such a
+		 * value as the min recovery point would prevent us from coming up at
+		 * all.  Instead, we just log a warning and continue with recovery.
 		 * (See also the comments about corrupt LSNs in XLogFlush.)
 		 */
 		SpinLockAcquire(&xlogctl->info_lck);
@@ -1879,7 +1880,7 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
 
 		if (!force && XLByteLT(newMinRecoveryPoint, lsn))
 			elog(WARNING,
-				 "xlog min recovery request %X/%X is past current point %X/%X",
+			   "xlog min recovery request %X/%X is past current point %X/%X",
 				 lsn.xlogid, lsn.xrecoff,
 				 newMinRecoveryPoint.xlogid, newMinRecoveryPoint.xrecoff);
 
@@ -1912,10 +1913,10 @@ XLogFlush(XLogRecPtr record)
 
 	/*
 	 * During REDO, we are reading not writing WAL.  Therefore, instead of
-	 * trying to flush the WAL, we should update minRecoveryPoint instead.
-	 * We test XLogInsertAllowed(), not InRecovery, because we need the
-	 * bgwriter to act this way too, and because when the bgwriter tries
-	 * to write the end-of-recovery checkpoint, it should indeed flush.
+	 * trying to flush the WAL, we should update minRecoveryPoint instead. We
+	 * test XLogInsertAllowed(), not InRecovery, because we need the bgwriter
+	 * to act this way too, and because when the bgwriter tries to write the
+	 * end-of-recovery checkpoint, it should indeed flush.
 	 */
 	if (!XLogInsertAllowed())
 	{
@@ -2007,16 +2008,16 @@ XLogFlush(XLogRecPtr record)
 	 * the whole system due to corruption on one data page.  In particular, if
 	 * the bad page is encountered again during recovery then we would be
 	 * unable to restart the database at all!  (This scenario actually
-	 * happened in the field several times with 7.1 releases.)  As of 8.4,
-	 * bad LSNs encountered during recovery are UpdateMinRecoveryPoint's
-	 * problem; the only time we can reach here during recovery is while
-	 * flushing the end-of-recovery checkpoint record, and we don't expect
-	 * that to have a bad LSN.
+	 * happened in the field several times with 7.1 releases.)	As of 8.4, bad
+	 * LSNs encountered during recovery are UpdateMinRecoveryPoint's problem;
+	 * the only time we can reach here during recovery is while flushing the
+	 * end-of-recovery checkpoint record, and we don't expect that to have a
+	 * bad LSN.
 	 *
-	 * Note that for calls from xact.c, the ERROR will
-	 * be promoted to PANIC since xact.c calls this routine inside a critical
-	 * section.  However, calls from bufmgr.c are not within critical sections
-	 * and so we will not force a restart for a bad LSN on a data page.
+	 * Note that for calls from xact.c, the ERROR will be promoted to PANIC
+	 * since xact.c calls this routine inside a critical section.  However,
+	 * calls from bufmgr.c are not within critical sections and so we will not
+	 * force a restart for a bad LSN on a data page.
 	 */
 	if (XLByteLT(LogwrtResult.Flush, record))
 		elog(ERROR,
@@ -2136,9 +2137,10 @@ XLogNeedsFlush(XLogRecPtr record)
 		LWLockRelease(ControlFileLock);
 
 		/*
-		 * An invalid minRecoveryPoint means that we need to recover all the WAL,
-		 * i.e., we're doing crash recovery.  We never modify the control file's
-		 * value in that case, so we can short-circuit future checks here too.
+		 * An invalid minRecoveryPoint means that we need to recover all the
+		 * WAL, i.e., we're doing crash recovery.  We never modify the control
+		 * file's value in that case, so we can short-circuit future checks
+		 * here too.
 		 */
 		if (minRecoveryPoint.xlogid == 0 && minRecoveryPoint.xrecoff == 0)
 			updateMinRecoveryPoint = false;
@@ -2572,46 +2574,46 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
 	char		path[MAXPGPATH];
 	int			fd;
 
-		XLogFileName(xlogfname, tli, log, seg);
+	XLogFileName(xlogfname, tli, log, seg);
 
-		if (fromArchive)
-		{
-			/* Report recovery progress in PS display */
-			snprintf(activitymsg, sizeof(activitymsg), "waiting for %s",
-					 xlogfname);
-			set_ps_display(activitymsg, false);
-
-			restoredFromArchive = RestoreArchivedFile(path, xlogfname,
-													  "RECOVERYXLOG",
-													  XLogSegSize);
-			if (!restoredFromArchive)
-				return -1;
-		}
-		else
-		{
-			XLogFilePath(path, tli, log, seg);
-			restoredFromArchive = false;
-		}
+	if (fromArchive)
+	{
+		/* Report recovery progress in PS display */
+		snprintf(activitymsg, sizeof(activitymsg), "waiting for %s",
+				 xlogfname);
+		set_ps_display(activitymsg, false);
 
-		fd = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0);
-		if (fd >= 0)
-		{
-			/* Success! */
-			curFileTLI = tli;
+		restoredFromArchive = RestoreArchivedFile(path, xlogfname,
+												  "RECOVERYXLOG",
+												  XLogSegSize);
+		if (!restoredFromArchive)
+			return -1;
+	}
+	else
+	{
+		XLogFilePath(path, tli, log, seg);
+		restoredFromArchive = false;
+	}
 
-			/* Report recovery progress in PS display */
-			snprintf(activitymsg, sizeof(activitymsg), "recovering %s",
-					 xlogfname);
-			set_ps_display(activitymsg, false);
+	fd = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0);
+	if (fd >= 0)
+	{
+		/* Success! */
+		curFileTLI = tli;
 
-			return fd;
-		}
-		if (errno != ENOENT || !notfoundOk)	/* unexpected failure? */
-			ereport(PANIC,
-					(errcode_for_file_access(),
-			errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
-				   path, log, seg)));
-		return -1;
+		/* Report recovery progress in PS display */
+		snprintf(activitymsg, sizeof(activitymsg), "recovering %s",
+				 xlogfname);
+		set_ps_display(activitymsg, false);
+
+		return fd;
+	}
+	if (errno != ENOENT || !notfoundOk) /* unexpected failure? */
+		ereport(PANIC,
+				(errcode_for_file_access(),
+		   errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
+				  path, log, seg)));
+	return -1;
 }
 
 /*
@@ -2653,8 +2655,8 @@ XLogFileReadAnyTLI(uint32 log, uint32 seg, int emode, bool fromArchive)
 		 * If not in StandbyMode, fall back to searching pg_xlog. In
 		 * StandbyMode we're streaming segments from the primary to pg_xlog,
 		 * and we mustn't confuse the (possibly partial) segments in pg_xlog
-		 * with complete segments ready to be applied. We rather wait for
-		 * the records to arrive through streaming.
+		 * with complete segments ready to be applied. We rather wait for the
+		 * records to arrive through streaming.
 		 */
 		if (!StandbyMode && fromArchive)
 		{
@@ -2685,8 +2687,8 @@ XLogFileClose(void)
 	/*
 	 * WAL segment files will not be re-read in normal operation, so we advise
 	 * the OS to release any cached pages.	But do not do so if WAL archiving
-	 * or streaming is active, because archiver and walsender process could use
-	 * the cache to read the WAL segment.
+	 * or streaming is active, because archiver and walsender process could
+	 * use the cache to read the WAL segment.
 	 */
 #if defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED)
 	if (!XLogIsNeeded())
@@ -2893,7 +2895,7 @@ RestoreArchivedFile(char *path, const char *xlogfname,
 		{
 			if (expectedSize > 0 && stat_buf.st_size != expectedSize)
 			{
-				int elevel;
+				int			elevel;
 
 				/*
 				 * If we find a partial file in standby mode, we assume it's
@@ -2901,11 +2903,11 @@ RestoreArchivedFile(char *path, const char *xlogfname,
 				 * trying.
 				 *
 				 * Otherwise treat a wrong-sized file as FATAL to ensure the
-				 * DBA would notice it, but is that too strong?	We could try
+				 * DBA would notice it, but is that too strong? We could try
 				 * to plow ahead with a local copy of the file ... but the
 				 * problem is that there probably isn't one, and we'd
-				 * incorrectly conclude we've reached the end of WAL and
-				 * we're done recovering ...
+				 * incorrectly conclude we've reached the end of WAL and we're
+				 * done recovering ...
 				 */
 				if (StandbyMode && stat_buf.st_size < expectedSize)
 					elevel = DEBUG1;
@@ -2975,6 +2977,7 @@ RestoreArchivedFile(char *path, const char *xlogfname,
 				xlogfname, rc)));
 
 not_available:
+
 	/*
 	 * if an archived file is not available, there might still be a version of
 	 * this file in XLOGDIR, so return that as the filename to open.
@@ -3141,6 +3144,7 @@ RemoveOldXlogFiles(uint32 log, uint32 seg, XLogRecPtr endptr)
 	struct dirent *xlde;
 	char		lastoff[MAXFNAMELEN];
 	char		path[MAXPGPATH];
+
 #ifdef WIN32
 	char		newpath[MAXPGPATH];
 #endif
@@ -3218,21 +3222,22 @@ RemoveOldXlogFiles(uint32 log, uint32 seg, XLogRecPtr endptr)
 				else
 				{
 					/* No need for any more future segments... */
-					int rc;
+					int			rc;
 
 					ereport(DEBUG2,
 							(errmsg("removing transaction log file \"%s\"",
 									xlde->d_name)));
 
 #ifdef WIN32
+
 					/*
 					 * On Windows, if another process (e.g another backend)
 					 * holds the file open in FILE_SHARE_DELETE mode, unlink
 					 * will succeed, but the file will still show up in
-					 * directory listing until the last handle is closed.
-					 * To avoid confusing the lingering deleted file for a
-					 * live WAL file that needs to be archived, rename it
-					 * before deleting it.
+					 * directory listing until the last handle is closed. To
+					 * avoid confusing the lingering deleted file for a live
+					 * WAL file that needs to be archived, rename it before
+					 * deleting it.
 					 *
 					 * If another process holds the file open without
 					 * FILE_SHARE_DELETE flag, rename will fail. We'll try
@@ -3553,8 +3558,8 @@ ReadRecord(XLogRecPtr *RecPtr, int emode_arg, bool fetching_ckpt)
 		RecPtr = &tmpRecPtr;
 
 		/*
-		 * Align recptr to next page if no more records can fit on the
-		 * current page.
+		 * Align recptr to next page if no more records can fit on the current
+		 * page.
 		 */
 		if (XLOG_BLCKSZ - (RecPtr->xrecoff % XLOG_BLCKSZ) < SizeOfXLogRecord)
 		{
@@ -5093,8 +5098,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
 	UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
 
 	/*
-	 * If the ending log segment is still open, close it (to avoid
-	 * problems on Windows with trying to rename or delete an open file).
+	 * If the ending log segment is still open, close it (to avoid problems on
+	 * Windows with trying to rename or delete an open file).
 	 */
 	if (readFile >= 0)
 	{
@@ -5376,17 +5381,17 @@ CheckRequiredParameterValues(CheckPoint checkPoint)
 {
 	/* We ignore autovacuum_max_workers when we make this test. */
 	RecoveryRequiresIntParameter("max_connections",
-									MaxConnections, checkPoint.MaxConnections);
+								 MaxConnections, checkPoint.MaxConnections);
 
 	RecoveryRequiresIntParameter("max_prepared_xacts",
-									max_prepared_xacts, checkPoint.max_prepared_xacts);
+						  max_prepared_xacts, checkPoint.max_prepared_xacts);
 	RecoveryRequiresIntParameter("max_locks_per_xact",
-									max_locks_per_xact, checkPoint.max_locks_per_xact);
+						  max_locks_per_xact, checkPoint.max_locks_per_xact);
 
 	if (!checkPoint.XLogStandbyInfoMode)
 		ereport(ERROR,
-			(errmsg("recovery connections cannot start because the recovery_connections "
-					"parameter is disabled on the WAL source server")));
+				(errmsg("recovery connections cannot start because the recovery_connections "
+						"parameter is disabled on the WAL source server")));
 }
 
 /*
@@ -5464,12 +5469,12 @@ StartupXLOG(void)
 	ValidateXLOGDirectoryStructure();
 
 	/*
-	 * Clear out any old relcache cache files.  This is *necessary* if we
-	 * do any WAL replay, since that would probably result in the cache files
-	 * being out of sync with database reality.  In theory we could leave
-	 * them in place if the database had been cleanly shut down, but it
-	 * seems safest to just remove them always and let them be rebuilt
-	 * during the first backend startup.
+	 * Clear out any old relcache cache files.	This is *necessary* if we do
+	 * any WAL replay, since that would probably result in the cache files
+	 * being out of sync with database reality.  In theory we could leave them
+	 * in place if the database had been cleanly shut down, but it seems
+	 * safest to just remove them always and let them be rebuilt during the
+	 * first backend startup.
 	 */
 	RelationCacheInitFileRemove();
 
@@ -5648,8 +5653,8 @@ StartupXLOG(void)
 			{
 				if (recoveryTargetExact)
 					ereport(LOG,
-							(errmsg("starting point-in-time recovery to XID %u",
-									recoveryTargetXid)));
+						 (errmsg("starting point-in-time recovery to XID %u",
+								 recoveryTargetXid)));
 				else
 					ereport(LOG,
 							(errmsg("starting point-in-time recovery to %s",
@@ -5676,6 +5681,7 @@ StartupXLOG(void)
 			if (XLByteLT(ControlFile->minRecoveryPoint, checkPoint.redo))
 				ControlFile->minRecoveryPoint = checkPoint.redo;
 		}
+
 		/*
 		 * set backupStartupPoint if we're starting archive recovery from a
 		 * base backup
@@ -5714,14 +5720,14 @@ StartupXLOG(void)
 
 		/*
 		 * Initialize recovery connections, if enabled. We won't let backends
-		 * in yet, not until we've reached the min recovery point specified
-		 * in control file and we've established a recovery snapshot from a
+		 * in yet, not until we've reached the min recovery point specified in
+		 * control file and we've established a recovery snapshot from a
 		 * running-xacts WAL record.
 		 */
 		if (InArchiveRecovery && XLogRequestRecoveryConnections)
 		{
 			TransactionId *xids;
-			int nxids;
+			int			nxids;
 
 			CheckRequiredParameterValues(checkPoint);
 
@@ -5814,7 +5820,7 @@ StartupXLOG(void)
 			{
 #ifdef WAL_DEBUG
 				if (XLOG_DEBUG ||
-					(rmid == RM_XACT_ID && trace_recovery_messages <= DEBUG2) ||
+				 (rmid == RM_XACT_ID && trace_recovery_messages <= DEBUG2) ||
 					(rmid != RM_XACT_ID && trace_recovery_messages <= DEBUG3))
 				{
 					StringInfoData buf;
@@ -5845,14 +5851,14 @@ StartupXLOG(void)
 				{
 					reachedMinRecoveryPoint = true;
 					ereport(LOG,
-							(errmsg("consistent recovery state reached at %X/%X",
-									EndRecPtr.xlogid, EndRecPtr.xrecoff)));
+						(errmsg("consistent recovery state reached at %X/%X",
+								EndRecPtr.xlogid, EndRecPtr.xrecoff)));
 				}
 
 				/*
 				 * Have we got a valid starting snapshot that will allow
-				 * queries to be run? If so, we can tell postmaster that
-				 * the database is consistent now, enabling connections.
+				 * queries to be run? If so, we can tell postmaster that the
+				 * database is consistent now, enabling connections.
 				 */
 				if (standbyState == STANDBY_SNAPSHOT_READY &&
 					!backendsAllowed &&
@@ -5950,9 +5956,8 @@ StartupXLOG(void)
 
 	/*
 	 * We are now done reading the xlog from stream. Turn off streaming
-	 * recovery to force fetching the files (which would be required
-	 * at end of recovery, e.g., timeline history file) from archive or
-	 * pg_xlog.
+	 * recovery to force fetching the files (which would be required at end of
+	 * recovery, e.g., timeline history file) from archive or pg_xlog.
 	 */
 	StandbyMode = false;
 
@@ -6155,8 +6160,8 @@ StartupXLOG(void)
 	TransactionIdRetreat(ShmemVariableCache->latestCompletedXid);
 
 	/*
-	 * Start up the commit log and related stuff, too. In hot standby mode
-	 * we did this already before WAL replay.
+	 * Start up the commit log and related stuff, too. In hot standby mode we
+	 * did this already before WAL replay.
 	 */
 	if (standbyState == STANDBY_DISABLED)
 	{
@@ -6194,7 +6199,7 @@ StartupXLOG(void)
 	}
 
 	/*
-	 * All done.  Allow backends to write WAL.  (Although the bool flag is
+	 * All done.  Allow backends to write WAL.	(Although the bool flag is
 	 * probably atomic in itself, we use the info_lck here to ensure that
 	 * there are no race conditions concerning visibility of other recent
 	 * updates to shared memory.)
@@ -6222,9 +6227,9 @@ bool
 RecoveryInProgress(void)
 {
 	/*
-	 * We check shared state each time only until we leave recovery mode.
-	 * We can't re-enter recovery, so there's no need to keep checking after
-	 * the shared variable has once been seen false.
+	 * We check shared state each time only until we leave recovery mode. We
+	 * can't re-enter recovery, so there's no need to keep checking after the
+	 * shared variable has once been seen false.
 	 */
 	if (!LocalRecoveryInProgress)
 		return false;
@@ -6241,7 +6246,7 @@ RecoveryInProgress(void)
 		/*
 		 * Initialize TimeLineID and RedoRecPtr when we discover that recovery
 		 * is finished. InitPostgres() relies upon this behaviour to ensure
-		 * that InitXLOGAccess() is called at backend startup.  (If you change
+		 * that InitXLOGAccess() is called at backend startup.	(If you change
 		 * this, see also LocalSetXLogInsertAllowed.)
 		 */
 		if (!LocalRecoveryInProgress)
@@ -6262,9 +6267,9 @@ bool
 XLogInsertAllowed(void)
 {
 	/*
-	 * If value is "unconditionally true" or "unconditionally false",
-	 * just return it.  This provides the normal fast path once recovery
-	 * is known done.
+	 * If value is "unconditionally true" or "unconditionally false", just
+	 * return it.  This provides the normal fast path once recovery is known
+	 * done.
 	 */
 	if (LocalXLogInsertAllowed >= 0)
 		return (bool) LocalXLogInsertAllowed;
@@ -6276,8 +6281,8 @@ XLogInsertAllowed(void)
 		return false;
 
 	/*
-	 * On exit from recovery, reset to "unconditionally true", since there
-	 * is no need to keep checking.
+	 * On exit from recovery, reset to "unconditionally true", since there is
+	 * no need to keep checking.
 	 */
 	LocalXLogInsertAllowed = 1;
 	return true;
@@ -6938,9 +6943,9 @@ CreateCheckPoint(int flags)
 	CheckPointGuts(checkPoint.redo, flags);
 
 	/*
-	 * Take a snapshot of running transactions and write this to WAL.
-	 * This allows us to reconstruct the state of running transactions
-	 * during archive recovery, if required. Skip, if this info disabled.
+	 * Take a snapshot of running transactions and write this to WAL. This
+	 * allows us to reconstruct the state of running transactions during
+	 * archive recovery, if required. Skip, if this info disabled.
 	 *
 	 * If we are shutting down, or Startup process is completing crash
 	 * recovery we don't need to write running xact data.
@@ -6948,7 +6953,7 @@ CreateCheckPoint(int flags)
 	 * Update checkPoint.nextXid since we have a later value
 	 */
 	if (!shutdown && XLogStandbyInfoActive())
-		 LogStandbySnapshot(&checkPoint.oldestActiveXid, &checkPoint.nextXid);
+		LogStandbySnapshot(&checkPoint.oldestActiveXid, &checkPoint.nextXid);
 	else
 		checkPoint.oldestActiveXid = InvalidTransactionId;
 
@@ -6970,18 +6975,18 @@ CreateCheckPoint(int flags)
 	XLogFlush(recptr);
 
 	/*
-	 * We mustn't write any new WAL after a shutdown checkpoint, or it will
-	 * be overwritten at next startup.  No-one should even try, this just
-	 * allows sanity-checking.  In the case of an end-of-recovery checkpoint,
-	 * we want to just temporarily disable writing until the system has exited
+	 * We mustn't write any new WAL after a shutdown checkpoint, or it will be
+	 * overwritten at next startup.  No-one should even try, this just allows
+	 * sanity-checking.  In the case of an end-of-recovery checkpoint, we want
+	 * to just temporarily disable writing until the system has exited
 	 * recovery.
 	 */
 	if (shutdown)
 	{
 		if (flags & CHECKPOINT_END_OF_RECOVERY)
-			LocalXLogInsertAllowed = -1;	/* return to "check" state */
+			LocalXLogInsertAllowed = -1;		/* return to "check" state */
 		else
-			LocalXLogInsertAllowed = 0;		/* never again write WAL */
+			LocalXLogInsertAllowed = 0; /* never again write WAL */
 	}
 
 	/*
@@ -7036,18 +7041,17 @@ CreateCheckPoint(int flags)
 	smgrpostckpt();
 
 	/*
-	 * If there's connected standby servers doing XLOG streaming, don't
-	 * delete XLOG files that have not been streamed to all of them yet.
-	 * This does nothing to prevent them from being deleted when the
-	 * standby is disconnected (e.g because of network problems), but at
-	 * least it avoids an open replication connection from failing because
-	 * of that.
+	 * If there's connected standby servers doing XLOG streaming, don't delete
+	 * XLOG files that have not been streamed to all of them yet. This does
+	 * nothing to prevent them from being deleted when the standby is
+	 * disconnected (e.g because of network problems), but at least it avoids
+	 * an open replication connection from failing because of that.
 	 */
 	if ((_logId || _logSeg) && MaxWalSenders > 0)
 	{
-		XLogRecPtr oldest;
-		uint32	log;
-		uint32	seg;
+		XLogRecPtr	oldest;
+		uint32		log;
+		uint32		seg;
 
 		oldest = GetOldestWALSendPointer();
 		if (oldest.xlogid != 0 || oldest.xrecoff != 0)
@@ -7055,15 +7059,15 @@ CreateCheckPoint(int flags)
 			XLByteToSeg(oldest, log, seg);
 			if (log < _logId || (log == _logId && seg < _logSeg))
 			{
-				_logId	= log;
-				_logSeg	= seg;
+				_logId = log;
+				_logSeg = seg;
 			}
 		}
 	}
 
 	/*
-	 * Delete old log files (those no longer needed even for
-	 * previous checkpoint or the standbys in XLOG streaming).
+	 * Delete old log files (those no longer needed even for previous
+	 * checkpoint or the standbys in XLOG streaming).
 	 */
 	if (_logId || _logSeg)
 	{
@@ -7262,8 +7266,8 @@ CreateRestartPoint(int flags)
 	/*
 	 * Update pg_control, using current time.  Check that it still shows
 	 * IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
-	 * this is a quick hack to make sure nothing really bad happens if
-	 * somehow we get here after the end-of-recovery checkpoint.
+	 * this is a quick hack to make sure nothing really bad happens if somehow
+	 * we get here after the end-of-recovery checkpoint.
 	 */
 	LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
 	if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY &&
@@ -7312,9 +7316,9 @@ CreateRestartPoint(int flags)
 		LogCheckpointEnd(true);
 
 	ereport((log_checkpoints ? LOG : DEBUG2),
-			(errmsg("recovery restart point at %X/%X with latest known log time %s",
-					lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff,
-					timestamptz_to_str(GetLatestXLogTime()))));
+	 (errmsg("recovery restart point at %X/%X with latest known log time %s",
+			 lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff,
+			 timestamptz_to_str(GetLatestXLogTime()))));
 
 	LWLockRelease(CheckpointLock);
 	return true;
@@ -7522,6 +7526,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
 	else if (info == XLOG_BACKUP_END)
 	{
 		XLogRecPtr	startpoint;
+
 		memcpy(&startpoint, XLogRecGetData(record), sizeof(startpoint));
 
 		if (XLByteEQ(ControlFile->backupStartPoint, startpoint))
@@ -7550,12 +7555,12 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
 		if (InArchiveRecovery)
 		{
 			/*
-			 * Note: We don't print the reason string from the record,
-			 * because that gets added as a line using xlog_desc()
+			 * Note: We don't print the reason string from the record, because
+			 * that gets added as a line using xlog_desc()
 			 */
 			ereport(WARNING,
-					(errmsg("unlogged operation performed, data may be missing"),
-					 errhint("This can happen if you temporarily disable archive_mode without taking a new base backup.")));
+				(errmsg("unlogged operation performed, data may be missing"),
+				 errhint("This can happen if you temporarily disable archive_mode without taking a new base backup.")));
 		}
 	}
 }
@@ -7601,7 +7606,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
 	}
 	else if (info == XLOG_BACKUP_END)
 	{
-		XLogRecPtr startpoint;
+		XLogRecPtr	startpoint;
 
 		memcpy(&startpoint, rec, sizeof(XLogRecPtr));
 		appendStringInfo(buf, "backup end: %X/%X",
@@ -7609,7 +7614,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
 	}
 	else if (info == XLOG_UNLOGGED)
 	{
-		char *reason = rec;
+		char	   *reason = rec;
 
 		appendStringInfo(buf, "unlogged operation: %s", reason);
 	}
@@ -7649,7 +7654,7 @@ xlog_outrec(StringInfo buf, XLogRecord *record)
 static int
 get_sync_bit(int method)
 {
-	int o_direct_flag = 0;
+	int			o_direct_flag = 0;
 
 	/* If fsync is disabled, never open in sync mode */
 	if (!enableFsync)
@@ -7658,11 +7663,11 @@ get_sync_bit(int method)
 	/*
 	 * Optimize writes by bypassing kernel cache with O_DIRECT when using
 	 * O_SYNC, O_DSYNC or O_FSYNC. But only if archiving and streaming are
-	 * disabled, otherwise the archive command or walsender process will
-	 * read the WAL soon after writing it, which is guaranteed to cause a
-	 * physical read if we bypassed the kernel cache. We also skip the
-	 * posix_fadvise(POSIX_FADV_DONTNEED) call in XLogFileClose() for the
-	 * same reason.
+	 * disabled, otherwise the archive command or walsender process will read
+	 * the WAL soon after writing it, which is guaranteed to cause a physical
+	 * read if we bypassed the kernel cache. We also skip the
+	 * posix_fadvise(POSIX_FADV_DONTNEED) call in XLogFileClose() for the same
+	 * reason.
 	 *
 	 * Never use O_DIRECT in walreceiver process for similar reasons; the WAL
 	 * written by walreceiver is normally read by the startup process soon
@@ -7985,7 +7990,7 @@ pg_stop_backup(PG_FUNCTION_ARGS)
 {
 	XLogRecPtr	startpoint;
 	XLogRecPtr	stoppoint;
-	XLogRecData	rdata;
+	XLogRecData rdata;
 	pg_time_t	stamp_time;
 	char		strfbuf[128];
 	char		histfilepath[MAXPGPATH];
@@ -8132,8 +8137,8 @@ pg_stop_backup(PG_FUNCTION_ARGS)
 	 *
 	 * We wait forever, since archive_command is supposed to work and we
 	 * assume the admin wanted his backup to work completely. If you don't
-	 * wish to wait, you can set statement_timeout.  Also, some notices
-	 * are issued to clue in anyone who might be doing this interactively.
+	 * wish to wait, you can set statement_timeout.  Also, some notices are
+	 * issued to clue in anyone who might be doing this interactively.
 	 */
 	XLByteToPrevSeg(stoppoint, _logId, _logSeg);
 	XLogFileName(lastxlogfilename, ThisTimeLineID, _logId, _logSeg);
@@ -8161,9 +8166,9 @@ pg_stop_backup(PG_FUNCTION_ARGS)
 			ereport(WARNING,
 					(errmsg("pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)",
 							waits),
-					 errhint("Check that your archive_command is executing properly. "
-							 "pg_stop_backup can be cancelled safely, "
-							 "but the database backup will not be usable without all the WAL segments.")));
+			errhint("Check that your archive_command is executing properly. "
+					"pg_stop_backup can be cancelled safely, "
+					"but the database backup will not be usable without all the WAL segments.")));
 		}
 	}
 
@@ -8621,6 +8626,7 @@ HandleStartupProcInterrupts(void)
 		got_SIGHUP = false;
 		ProcessConfigFile(PGC_SIGHUP);
 	}
+
 	/*
 	 * Check if we were requested to exit without finishing recovery.
 	 */
@@ -8653,10 +8659,11 @@ StartupProcessMain(void)
 	 */
 	pqsignal(SIGHUP, StartupProcSigHupHandler); /* reload config file */
 	pqsignal(SIGINT, SIG_IGN);	/* ignore query cancel */
-	pqsignal(SIGTERM, StartupProcShutdownHandler);	/* request shutdown */
-	pqsignal(SIGQUIT, startupproc_quickdie);		/* hard crash time */
+	pqsignal(SIGTERM, StartupProcShutdownHandler);		/* request shutdown */
+	pqsignal(SIGQUIT, startupproc_quickdie);	/* hard crash time */
 	if (XLogRequestRecoveryConnections)
-		pqsignal(SIGALRM, handle_standby_sig_alarm); /* ignored unless InHotStandby */
+		pqsignal(SIGALRM, handle_standby_sig_alarm);	/* ignored unless
+														 * InHotStandby */
 	else
 		pqsignal(SIGALRM, SIG_IGN);
 	pqsignal(SIGPIPE, SIG_IGN);
@@ -8731,20 +8738,20 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
 	{
 		if (StandbyMode)
 		{
-			bool last_restore_failed = false;
+			bool		last_restore_failed = false;
 
 			/*
 			 * In standby mode, wait for the requested record to become
-			 * available, either via restore_command succeeding to restore
-			 * the segment, or via walreceiver having streamed the record.
+			 * available, either via restore_command succeeding to restore the
+			 * segment, or via walreceiver having streamed the record.
 			 */
 			for (;;)
 			{
 				if (WalRcvInProgress())
 				{
 					/*
-					 * While walreceiver is active, wait for new WAL to
-					 * arrive from primary.
+					 * While walreceiver is active, wait for new WAL to arrive
+					 * from primary.
 					 */
 					receivedUpto = GetWalRcvWriteRecPtr();
 					if (XLByteLT(*RecPtr, receivedUpto))
@@ -8798,10 +8805,10 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
 
 					/*
 					 * If we succeeded restoring some segments from archive
-					 * since the last connection attempt (or we haven't
-					 * tried streaming yet, retry immediately. But if we
-					 * haven't, assume the problem is persistent, so be
-					 * less aggressive.
+					 * since the last connection attempt (or we haven't tried
+					 * streaming yet, retry immediately. But if we haven't,
+					 * assume the problem is persistent, so be less
+					 * aggressive.
 					 */
 					if (last_restore_failed)
 					{
@@ -8813,7 +8820,7 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
 						 */
 						if (CheckForStandbyTrigger())
 							goto next_record_is_invalid;
-						pg_usleep(5000000L); /* 5 seconds */
+						pg_usleep(5000000L);	/* 5 seconds */
 					}
 					last_restore_failed = true;
 
@@ -8832,8 +8839,8 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
 				}
 
 				/*
-				 * This possibly-long loop needs to handle interrupts of startup
-				 * process.
+				 * This possibly-long loop needs to handle interrupts of
+				 * startup process.
 				 */
 				HandleStartupProcInterrupts();
 			}
@@ -8857,16 +8864,16 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
 	}
 
 	/*
-	 * At this point, we have the right segment open and we know the
-	 * requested record is in it.
+	 * At this point, we have the right segment open and we know the requested
+	 * record is in it.
 	 */
 	Assert(readFile != -1);
 
 	/*
-	 * If the current segment is being streamed from master, calculate
-	 * how much of the current page we have received already. We know the
-	 * requested record has been received, but this is for the benefit
-	 * of future calls, to allow quick exit at the top of this function.
+	 * If the current segment is being streamed from master, calculate how
+	 * much of the current page we have received already. We know the
+	 * requested record has been received, but this is for the benefit of
+	 * future calls, to allow quick exit at the top of this function.
 	 */
 	if (readStreamed)
 	{
@@ -8909,16 +8916,16 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
 	{
 		ereport(emode,
 				(errcode_for_file_access(),
-				 errmsg("could not seek in log file %u, segment %u to offset %u: %m",
-						readId, readSeg, readOff)));
+		 errmsg("could not seek in log file %u, segment %u to offset %u: %m",
+				readId, readSeg, readOff)));
 		goto next_record_is_invalid;
 	}
 	if (read(readFile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
 	{
 		ereport(emode,
 				(errcode_for_file_access(),
-				 errmsg("could not read from log file %u, segment %u, offset %u: %m",
-						readId, readSeg, readOff)));
+		 errmsg("could not read from log file %u, segment %u, offset %u: %m",
+				readId, readSeg, readOff)));
 		goto next_record_is_invalid;
 	}
 	if (!ValidXLOGHeader((XLogPageHeader) readBuf, emode))
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index d2b7c1e58548b8588bbbc7423f3cefd4199d914a..a9c5d1fd53b631ec9fecaf48900918cf249a0f7c 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.259 2010/02/07 20:48:09 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.260 2010/02/26 02:00:35 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -370,7 +370,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
 #endif
 
 		/*
-		 * Assign the ProcSignalSlot for an auxiliary process.  Since it
+		 * Assign the ProcSignalSlot for an auxiliary process.	Since it
 		 * doesn't have a BackendId, the slot is statically allocated based on
 		 * the auxiliary process type (auxType).  Backends use slots indexed
 		 * in the range from 1 to MaxBackends (inclusive), so we use
@@ -493,8 +493,8 @@ BootstrapModeMain(void)
 	boot_yyparse();
 
 	/*
-	 * We should now know about all mapped relations, so it's okay to
-	 * write out the initial relation mapping files.
+	 * We should now know about all mapped relations, so it's okay to write
+	 * out the initial relation mapping files.
 	 */
 	RelationMapFinishBootstrap();
 
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 1576191737a5a5807233749991ee93cd3a70d507..0488f765965f95fcd4b3a5f780f4e8325e81b4b1 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.162 2010/02/14 18:42:12 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.163 2010/02/26 02:00:35 momjian Exp $
  *
  * NOTES
  *	  See acl.h.
@@ -85,8 +85,8 @@ typedef struct
  */
 typedef struct
 {
-	Oid			roleid;				/* owning role */
-	Oid			nspid;				/* namespace, or InvalidOid if none */
+	Oid			roleid;			/* owning role */
+	Oid			nspid;			/* namespace, or InvalidOid if none */
 	/* remaining fields are same as in InternalGrant: */
 	bool		is_grant;
 	GrantObjectType objtype;
@@ -353,7 +353,7 @@ ExecuteGrantStmt(GrantStmt *stmt)
 		case ACL_TARGET_ALL_IN_SCHEMA:
 			istmt.objects = objectsInSchemaToOids(stmt->objtype, stmt->objects);
 			break;
-		/* ACL_TARGET_DEFAULTS should not be seen here */
+			/* ACL_TARGET_DEFAULTS should not be seen here */
 		default:
 			elog(ERROR, "unrecognized GrantStmt.targtype: %d",
 				 (int) stmt->targtype);
@@ -611,7 +611,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
 		case ACL_OBJECT_LARGEOBJECT:
 			foreach(cell, objnames)
 			{
-				Oid		lobjOid = intVal(lfirst(cell));
+				Oid			lobjOid = intVal(lfirst(cell));
 
 				if (!LargeObjectExists(lobjOid))
 					ereport(ERROR,
@@ -880,8 +880,8 @@ ExecAlterDefaultPrivilegesStmt(AlterDefaultPrivilegesStmt *stmt)
 	}
 
 	/*
-	 * Convert action->privileges, a list of privilege strings,
-	 * into an AclMode bitmask.
+	 * Convert action->privileges, a list of privilege strings, into an
+	 * AclMode bitmask.
 	 */
 	switch (action->objtype)
 	{
@@ -928,7 +928,7 @@ ExecAlterDefaultPrivilegesStmt(AlterDefaultPrivilegesStmt *stmt)
 			if (privnode->cols)
 				ereport(ERROR,
 						(errcode(ERRCODE_INVALID_GRANT_OPERATION),
-						 errmsg("default privileges cannot be set for columns")));
+					errmsg("default privileges cannot be set for columns")));
 
 			if (privnode->priv_name == NULL)	/* parser mistake? */
 				elog(ERROR, "AccessPriv node must specify privilege");
@@ -962,10 +962,10 @@ ExecAlterDefaultPrivilegesStmt(AlterDefaultPrivilegesStmt *stmt)
 			iacls.roleid = get_roleid_checked(rolename);
 
 			/*
-			 * We insist that calling user be a member of each target role.
-			 * If he has that, he could become that role anyway via SET ROLE,
-			 * so FOR ROLE is just a syntactic convenience and doesn't give
-			 * any special privileges.
+			 * We insist that calling user be a member of each target role. If
+			 * he has that, he could become that role anyway via SET ROLE, so
+			 * FOR ROLE is just a syntactic convenience and doesn't give any
+			 * special privileges.
 			 */
 			check_is_member_of_role(GetUserId(), iacls.roleid);
 
@@ -1050,8 +1050,8 @@ SetDefaultACL(InternalDefaultACL *iacls)
 	rel = heap_open(DefaultAclRelationId, RowExclusiveLock);
 
 	/*
-	 * Convert ACL object type to pg_default_acl object type
-	 * and handle all_privs option
+	 * Convert ACL object type to pg_default_acl object type and handle
+	 * all_privs option
 	 */
 	switch (iacls->objtype)
 	{
@@ -1084,7 +1084,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
 	tuple = SearchSysCache3(DEFACLROLENSPOBJ,
 							ObjectIdGetDatum(iacls->roleid),
 							ObjectIdGetDatum(iacls->nspid),
-					 		CharGetDatum(objtype));
+							CharGetDatum(objtype));
 
 	if (HeapTupleIsValid(tuple))
 	{
@@ -1110,9 +1110,9 @@ SetDefaultACL(InternalDefaultACL *iacls)
 	{
 		/*
 		 * If we are creating a global entry, start with the hard-wired
-		 * defaults and modify as per command.  Otherwise, start with an empty
-		 * ACL and modify that.  This is needed because global entries
-		 * replace the hard-wired defaults, while others do not.
+		 * defaults and modify as per command.	Otherwise, start with an empty
+		 * ACL and modify that.  This is needed because global entries replace
+		 * the hard-wired defaults, while others do not.
 		 */
 		if (!OidIsValid(iacls->nspid))
 			old_acl = acldefault(iacls->objtype, iacls->roleid);
@@ -1128,8 +1128,8 @@ SetDefaultACL(InternalDefaultACL *iacls)
 	noldmembers = aclmembers(old_acl, &oldmembers);
 
 	/*
-	 * Generate new ACL.  Grantor of rights is always the same as the
-	 * target role.
+	 * Generate new ACL.  Grantor of rights is always the same as the target
+	 * role.
 	 */
 	new_acl = merge_acl_with_grant(old_acl,
 								   iacls->is_grant,
@@ -1180,7 +1180,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
 		if (OidIsValid(iacls->nspid))
 		{
 			ObjectAddress myself,
-						  referenced;
+						referenced;
 
 			myself.classId = DefaultAclRelationId;
 			myself.objectId = HeapTupleGetOid(newtuple);
@@ -2046,7 +2046,7 @@ ExecGrant_Fdw(InternalGrant *istmt)
 		Oid		   *newmembers;
 
 		tuple = SearchSysCache1(FOREIGNDATAWRAPPEROID,
-							    ObjectIdGetDatum(fdwid));
+								ObjectIdGetDatum(fdwid));
 		if (!HeapTupleIsValid(tuple))
 			elog(ERROR, "cache lookup failed for foreign-data wrapper %u", fdwid);
 
@@ -2499,7 +2499,7 @@ ExecGrant_Largeobject(InternalGrant *istmt)
 	foreach(cell, istmt->objects)
 	{
 		Oid			loid = lfirst_oid(cell);
-		Form_pg_largeobject_metadata	form_lo_meta;
+		Form_pg_largeobject_metadata form_lo_meta;
 		char		loname[NAMEDATALEN];
 		Datum		aclDatum;
 		bool		isNull;
@@ -2517,8 +2517,8 @@ ExecGrant_Largeobject(InternalGrant *istmt)
 		int			nnewmembers;
 		Oid		   *oldmembers;
 		Oid		   *newmembers;
-		ScanKeyData	entry[1];
-		SysScanDesc	scan;
+		ScanKeyData entry[1];
+		SysScanDesc scan;
 		HeapTuple	tuple;
 
 		/* There's no syscache for pg_largeobject_metadata */
@@ -3494,8 +3494,8 @@ pg_largeobject_aclmask_snapshot(Oid lobj_oid, Oid roleid,
 {
 	AclMode		result;
 	Relation	pg_lo_meta;
-	ScanKeyData	entry[1];
-	SysScanDesc	scan;
+	ScanKeyData entry[1];
+	SysScanDesc scan;
 	HeapTuple	tuple;
 	Datum		aclDatum;
 	bool		isNull;
@@ -3669,8 +3669,8 @@ pg_tablespace_aclmask(Oid spc_oid, Oid roleid,
 	ownerId = ((Form_pg_tablespace) GETSTRUCT(tuple))->spcowner;
 
 	aclDatum = SysCacheGetAttr(TABLESPACEOID, tuple,
-								   Anum_pg_tablespace_spcacl,
-								   &isNull);
+							   Anum_pg_tablespace_spcacl,
+							   &isNull);
 
 	if (isNull)
 	{
@@ -4190,8 +4190,8 @@ bool
 pg_largeobject_ownercheck(Oid lobj_oid, Oid roleid)
 {
 	Relation	pg_lo_meta;
-	ScanKeyData	entry[1];
-	SysScanDesc	scan;
+	ScanKeyData entry[1];
+	SysScanDesc scan;
 	HeapTuple	tuple;
 	Oid			ownerId;
 
@@ -4484,8 +4484,8 @@ get_default_acl_internal(Oid roleId, Oid nsp_oid, char objtype)
 
 	if (HeapTupleIsValid(tuple))
 	{
-		Datum	aclDatum;
-		bool	isNull;
+		Datum		aclDatum;
+		bool		isNull;
 
 		aclDatum = SysCacheGetAttr(DEFACLROLENSPOBJ, tuple,
 								   Anum_pg_default_acl_defaclacl,
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 943cc4920ecdf9e4afd5c12e0fb1d97dd31cde39..a7eb09d8613033cd898e1e2ad41bf2ceee29f9e2 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -10,7 +10,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.88 2010/02/07 20:48:09 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.89 2010/02/26 02:00:36 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -163,7 +163,7 @@ GetDatabasePath(Oid dbNode, Oid spcNode)
 	{
 		/* All other tablespaces are accessed via symlinks */
 		pathlen = 9 + 1 + OIDCHARS + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) +
-				  1 + OIDCHARS + 1;
+			1 + OIDCHARS + 1;
 		path = (char *) palloc(pathlen);
 		snprintf(path, pathlen, "pg_tblspc/%u/%s/%u",
 				 spcNode, TABLESPACE_VERSION_DIRECTORY, dbNode);
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index e4fa47d27f9a9ecbb6f60db69c3365da82cc2c9e..491c402a03b0025e94097d950e07188d07c87e16 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.95 2010/02/14 18:42:12 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.96 2010/02/26 02:00:36 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -148,7 +148,7 @@ static const Oid object_classes[MAX_OCLASS] = {
 	AuthIdRelationId,			/* OCLASS_ROLE */
 	DatabaseRelationId,			/* OCLASS_DATABASE */
 	TableSpaceRelationId,		/* OCLASS_TBLSPACE */
-	ForeignDataWrapperRelationId,	/* OCLASS_FDW */
+	ForeignDataWrapperRelationId,		/* OCLASS_FDW */
 	ForeignServerRelationId,	/* OCLASS_FOREIGN_SERVER */
 	UserMappingRelationId,		/* OCLASS_USER_MAPPING */
 	DefaultAclRelationId		/* OCLASS_DEFACL */
@@ -1129,8 +1129,8 @@ doDeletion(const ObjectAddress *object)
 			break;
 
 			/*
-			 * OCLASS_ROLE, OCLASS_DATABASE, OCLASS_TBLSPACE intentionally
-			 * not handled here
+			 * OCLASS_ROLE, OCLASS_DATABASE, OCLASS_TBLSPACE intentionally not
+			 * handled here
 			 */
 
 		case OCLASS_FDW:
@@ -2637,31 +2637,31 @@ getObjectDescription(const ObjectAddress *object)
 					case DEFACLOBJ_RELATION:
 						appendStringInfo(&buffer,
 										 _("default privileges on new relations belonging to role %s"),
-										 GetUserNameFromId(defacl->defaclrole));
+									  GetUserNameFromId(defacl->defaclrole));
 						break;
 					case DEFACLOBJ_SEQUENCE:
 						appendStringInfo(&buffer,
 										 _("default privileges on new sequences belonging to role %s"),
-										 GetUserNameFromId(defacl->defaclrole));
+									  GetUserNameFromId(defacl->defaclrole));
 						break;
 					case DEFACLOBJ_FUNCTION:
 						appendStringInfo(&buffer,
 										 _("default privileges on new functions belonging to role %s"),
-										 GetUserNameFromId(defacl->defaclrole));
+									  GetUserNameFromId(defacl->defaclrole));
 						break;
 					default:
 						/* shouldn't get here */
 						appendStringInfo(&buffer,
-										 _("default privileges belonging to role %s"),
-										 GetUserNameFromId(defacl->defaclrole));
+								_("default privileges belonging to role %s"),
+									  GetUserNameFromId(defacl->defaclrole));
 						break;
 				}
 
 				if (OidIsValid(defacl->defaclnamespace))
 				{
 					appendStringInfo(&buffer,
-									_(" in schema %s"),
-									get_namespace_name(defacl->defaclnamespace));
+									 _(" in schema %s"),
+								get_namespace_name(defacl->defaclnamespace));
 				}
 
 				systable_endscan(rcscan);
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index b4d448da3e4ba574c1fc053d973f0cccdaf8059f..39aec680c086926ba232082a07ad58e83cf8536a 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.371 2010/02/14 18:42:13 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.372 2010/02/26 02:00:36 momjian Exp $
  *
  *
  * INTERFACE ROUTINES
@@ -71,8 +71,8 @@
 
 
 /* Kluge for upgrade-in-place support */
-Oid binary_upgrade_next_heap_relfilenode = InvalidOid;
-Oid binary_upgrade_next_toast_relfilenode = InvalidOid;
+Oid			binary_upgrade_next_heap_relfilenode = InvalidOid;
+Oid			binary_upgrade_next_toast_relfilenode = InvalidOid;
 
 static void AddNewRelationTuple(Relation pg_class_desc,
 					Relation new_rel_desc,
@@ -455,9 +455,9 @@ CheckAttributeType(const char *attname, Oid atttypid,
 	{
 		/*
 		 * Refuse any attempt to create a pseudo-type column, except for a
-		 * special hack for pg_statistic: allow ANYARRAY when modifying
-		 * system catalogs (this allows creating pg_statistic and cloning it
-		 * during VACUUM FULL)
+		 * special hack for pg_statistic: allow ANYARRAY when modifying system
+		 * catalogs (this allows creating pg_statistic and cloning it during
+		 * VACUUM FULL)
 		 */
 		if (atttypid != ANYARRAYOID || !allow_system_table_mods)
 			ereport(ERROR,
@@ -657,7 +657,7 @@ AddNewAttributeTuples(Oid new_rel_oid,
  * Tuple data is taken from new_rel_desc->rd_rel, except for the
  * variable-width fields which are not present in a cached reldesc.
  * relacl and reloptions are passed in Datum form (to avoid having
- * to reference the data types in heap.h).  Pass (Datum) 0 to set them
+ * to reference the data types in heap.h).	Pass (Datum) 0 to set them
  * to NULL.
  * --------------------------------
  */
@@ -825,7 +825,7 @@ AddNewRelationType(const char *typeName,
 				   Oid new_array_type)
 {
 	return
-		TypeCreate(new_row_type,		/* optional predetermined OID */
+		TypeCreate(new_row_type,	/* optional predetermined OID */
 				   typeName,	/* type name */
 				   typeNamespace,		/* type namespace */
 				   new_rel_oid, /* relation oid */
@@ -1032,9 +1032,9 @@ heap_create_with_catalog(const char *relname,
 
 	/*
 	 * Since defining a relation also defines a complex type, we add a new
-	 * system type corresponding to the new relation.  The OID of the type
-	 * can be preselected by the caller, but if reltypeid is InvalidOid,
-	 * we'll generate a new OID for it.
+	 * system type corresponding to the new relation.  The OID of the type can
+	 * be preselected by the caller, but if reltypeid is InvalidOid, we'll
+	 * generate a new OID for it.
 	 *
 	 * NOTE: we could get a unique-index failure here, in case someone else is
 	 * creating the same type name in parallel but hadn't committed yet when
@@ -1116,14 +1116,14 @@ heap_create_with_catalog(const char *relname,
 
 	/*
 	 * Make a dependency link to force the relation to be deleted if its
-	 * namespace is.  Also make a dependency link to its owner, as well
-	 * as dependencies for any roles mentioned in the default ACL.
+	 * namespace is.  Also make a dependency link to its owner, as well as
+	 * dependencies for any roles mentioned in the default ACL.
 	 *
 	 * For composite types, these dependencies are tracked for the pg_type
 	 * entry, so we needn't record them here.  Likewise, TOAST tables don't
 	 * need a namespace dependency (they live in a pinned namespace) nor an
-	 * owner dependency (they depend indirectly through the parent table),
-	 * nor should they have any ACL entries.
+	 * owner dependency (they depend indirectly through the parent table), nor
+	 * should they have any ACL entries.
 	 *
 	 * Also, skip this in bootstrap mode, since we don't make dependencies
 	 * while bootstrapping.
@@ -1774,7 +1774,7 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr,
 						  ' ',
 						  ' ',
 						  ' ',
-						  NULL,	/* not an exclusion constraint */
+						  NULL, /* not an exclusion constraint */
 						  expr, /* Tree form of check constraint */
 						  ccbin,	/* Binary form of check constraint */
 						  ccsrc,	/* Source form of check constraint */
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index e90b92dcf6e5a65349861c583e1de86111407062..dea6889075f2bf3d29cf627767dd698e86fe0328 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.336 2010/02/14 18:42:13 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.337 2010/02/26 02:00:36 momjian Exp $
  *
  *
  * INTERFACE ROUTINES
@@ -70,7 +70,7 @@
 
 
 /* Kluge for upgrade-in-place support */
-Oid binary_upgrade_next_index_relfilenode = InvalidOid;
+Oid			binary_upgrade_next_index_relfilenode = InvalidOid;
 
 /* state info for validate_index bulkdelete callback */
 typedef struct
@@ -272,7 +272,7 @@ ConstructTupleDescriptor(Relation heapRelation,
 		/*
 		 * Set the attribute name as specified by caller.
 		 */
-		if (colnames_item == NULL)	/* shouldn't happen */
+		if (colnames_item == NULL)		/* shouldn't happen */
 			elog(ERROR, "too few entries in colnames list");
 		namestrcpy(&to->attname, (const char *) lfirst(colnames_item));
 		colnames_item = lnext(colnames_item);
@@ -561,8 +561,8 @@ index_create(Oid heapRelationId,
 
 	/*
 	 * The index will be in the same namespace as its parent table, and is
-	 * shared across databases if and only if the parent is.  Likewise,
-	 * it will use the relfilenode map if and only if the parent does.
+	 * shared across databases if and only if the parent is.  Likewise, it
+	 * will use the relfilenode map if and only if the parent does.
 	 */
 	namespaceId = RelationGetNamespace(heapRelation);
 	shared_relation = heapRelation->rd_rel->relisshared;
@@ -592,8 +592,8 @@ index_create(Oid heapRelationId,
 				 errmsg("concurrent index creation on system catalog tables is not supported")));
 
 	/*
-	 * This case is currently not supported, but there's no way to ask for
-	 * it in the grammar anyway, so it can't happen.
+	 * This case is currently not supported, but there's no way to ask for it
+	 * in the grammar anyway, so it can't happen.
 	 */
 	if (concurrent && is_exclusion)
 		ereport(ERROR,
@@ -775,7 +775,7 @@ index_create(Oid heapRelationId,
 										   indexInfo->ii_KeyAttrNumbers,
 										   indexInfo->ii_NumIndexAttrs,
 										   InvalidOid,	/* no domain */
-										   indexRelationId,	/* index OID */
+										   indexRelationId,		/* index OID */
 										   InvalidOid,	/* no foreign key */
 										   NULL,
 										   NULL,
@@ -810,7 +810,7 @@ index_create(Oid heapRelationId,
 				CreateTrigStmt *trigger;
 
 				heapRel = makeRangeVar(get_namespace_name(namespaceId),
-									   pstrdup(RelationGetRelationName(heapRelation)),
+							  pstrdup(RelationGetRelationName(heapRelation)),
 									   -1);
 
 				trigger = makeNode(CreateTrigStmt);
@@ -1436,8 +1436,8 @@ index_build(Relation heapRelation,
 	Assert(PointerIsValid(stats));
 
 	/*
-	 * If it's for an exclusion constraint, make a second pass over the
-	 * heap to verify that the constraint is satisfied.
+	 * If it's for an exclusion constraint, make a second pass over the heap
+	 * to verify that the constraint is satisfied.
 	 */
 	if (indexInfo->ii_ExclusionOps != NULL)
 		IndexCheckExclusion(heapRelation, indexRelation, indexInfo);
@@ -1710,7 +1710,7 @@ IndexBuildHeapScan(Relation heapRelation,
 					/*
 					 * Since caller should hold ShareLock or better, normally
 					 * the only way to see this is if it was inserted earlier
-					 * in our own transaction.  However, it can happen in
+					 * in our own transaction.	However, it can happen in
 					 * system catalogs, since we tend to release write lock
 					 * before commit there.  Give a warning if neither case
 					 * applies.
@@ -1761,9 +1761,9 @@ IndexBuildHeapScan(Relation heapRelation,
 
 						/*
 						 * If we are performing uniqueness checks, assuming
-						 * the tuple is dead could lead to missing a uniqueness
-						 * violation.  In that case we wait for the deleting
-						 * transaction to finish and check again.
+						 * the tuple is dead could lead to missing a
+						 * uniqueness violation.  In that case we wait for the
+						 * deleting transaction to finish and check again.
 						 */
 						if (checking_uniqueness)
 						{
@@ -2472,9 +2472,9 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
 
 	/*
 	 * If the index is marked invalid or not ready (ie, it's from a failed
-	 * CREATE INDEX CONCURRENTLY), and we didn't skip a uniqueness check,
-	 * we can now mark it valid.  This allows REINDEX to be used to clean up
-	 * in such cases.
+	 * CREATE INDEX CONCURRENTLY), and we didn't skip a uniqueness check, we
+	 * can now mark it valid.  This allows REINDEX to be used to clean up in
+	 * such cases.
 	 *
 	 * We can also reset indcheckxmin, because we have now done a
 	 * non-concurrent index build, *except* in the case where index_build
@@ -2568,7 +2568,7 @@ reindex_relation(Oid relid, bool toast_too, bool heap_rebuilt)
 	 * It is okay to not insert entries into the indexes we have not processed
 	 * yet because all of this is transaction-safe.  If we fail partway
 	 * through, the updated rows are dead and it doesn't matter whether they
-	 * have index entries.  Also, a new pg_class index will be created with a
+	 * have index entries.	Also, a new pg_class index will be created with a
 	 * correct entry for its own pg_class row because we do
 	 * RelationSetNewRelfilenode() before we do index_build().
 	 *
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index ac68b9639315899f28497eeb77f880654e029a28..e2e19985506b8ff838caf0f3169c0cb646181a2e 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -13,7 +13,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.124 2010/02/20 21:24:01 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.125 2010/02/26 02:00:36 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -190,7 +190,7 @@ static void RemoveTempRelations(Oid tempNamespaceId);
 static void RemoveTempRelationsCallback(int code, Datum arg);
 static void NamespaceCallback(Datum arg, int cacheid, ItemPointer tuplePtr);
 static bool MatchNamedCall(HeapTuple proctup, int nargs, List *argnames,
-						   int **argnumbers);
+			   int **argnumbers);
 
 /* These don't really need to appear in any header file */
 Datum		pg_table_is_visible(PG_FUNCTION_ARGS);
@@ -333,7 +333,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
 		}
 		/* use exact schema given */
 		namespaceId = GetSysCacheOid1(NAMESPACENAME,
-									  CStringGetDatum(newRelation->schemaname));
+								   CStringGetDatum(newRelation->schemaname));
 		if (!OidIsValid(namespaceId))
 			ereport(ERROR,
 					(errcode(ERRCODE_UNDEFINED_SCHEMA),
@@ -689,10 +689,9 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
 			/*
 			 * Call uses named or mixed notation
 			 *
-			 * Named or mixed notation can match a variadic function only
-			 * if expand_variadic is off; otherwise there is no way to match
-			 * the presumed-nameless parameters expanded from the variadic
-			 * array.
+			 * Named or mixed notation can match a variadic function only if
+			 * expand_variadic is off; otherwise there is no way to match the
+			 * presumed-nameless parameters expanded from the variadic array.
 			 */
 			if (OidIsValid(procform->provariadic) && expand_variadic)
 				continue;
@@ -702,7 +701,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
 			/*
 			 * Check argument count.
 			 */
-			Assert(nargs >= 0);			/* -1 not supported with argnames */
+			Assert(nargs >= 0); /* -1 not supported with argnames */
 
 			if (pronargs > nargs && expand_defaults)
 			{
@@ -732,7 +731,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames,
 			 * Call uses positional notation
 			 *
 			 * Check if function is variadic, and get variadic element type if
-			 * so.  If expand_variadic is false, we should just ignore
+			 * so.	If expand_variadic is false, we should just ignore
 			 * variadic-ness.
 			 */
 			if (pronargs <= nargs && expand_variadic)
@@ -1020,9 +1019,9 @@ MatchNamedCall(HeapTuple proctup, int nargs, List *argnames,
 	/* now examine the named args */
 	foreach(lc, argnames)
 	{
-		char   *argname = (char *) lfirst(lc);
-		bool	found;
-		int		i;
+		char	   *argname = (char *) lfirst(lc);
+		bool		found;
+		int			i;
 
 		pp = 0;
 		found = false;
@@ -1058,7 +1057,7 @@ MatchNamedCall(HeapTuple proctup, int nargs, List *argnames,
 	/* Check for default arguments */
 	if (nargs < pronargs)
 	{
-		int		first_arg_with_default = pronargs - procform->pronargdefaults;
+		int			first_arg_with_default = pronargs - procform->pronargdefaults;
 
 		for (pp = numposargs; pp < pronargs; pp++)
 		{
@@ -3021,10 +3020,10 @@ InitTempTableNamespace(void)
 	 * Do not allow a Hot Standby slave session to make temp tables.  Aside
 	 * from problems with modifying the system catalogs, there is a naming
 	 * conflict: pg_temp_N belongs to the session with BackendId N on the
-	 * master, not to a slave session with the same BackendId.  We should
-	 * not be able to get here anyway due to XactReadOnly checks, but let's
-	 * just make real sure.  Note that this also backstops various operations
-	 * that allow XactReadOnly transactions to modify temp tables; they'd need
+	 * master, not to a slave session with the same BackendId.	We should not
+	 * be able to get here anyway due to XactReadOnly checks, but let's just
+	 * make real sure.	Note that this also backstops various operations that
+	 * allow XactReadOnly transactions to modify temp tables; they'd need
 	 * RecoveryInProgress checks if not for this.
 	 */
 	if (RecoveryInProgress())
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index ddb16a27459ed133421104cb258b990b8c1fbd38..9672ecf0aa1d0baa9f151eae72f0987725aa178c 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.105 2010/02/14 18:42:13 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.106 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -360,8 +360,8 @@ lookup_agg_function(List *fnName,
 			ereport(ERROR,
 					(errcode(ERRCODE_DATATYPE_MISMATCH),
 					 errmsg("function %s requires run-time type coercion",
-					 func_signature_string(fnName, nargs,
-										   NIL, true_oid_array))));
+							func_signature_string(fnName, nargs,
+												  NIL, true_oid_array))));
 	}
 
 	/* Check aggregate creator has permission to call the function */
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 6d453538b32555a6b7c0fd3edf38c4d32af7d2ac..84dab8eb218e51d1b0f8e9b4fc8dbd845b7af5d8 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.52 2010/02/14 18:42:13 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.53 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -298,9 +298,9 @@ CreateConstraintEntry(const char *constraintName,
 	{
 		/*
 		 * Register normal dependency on the unique index that supports a
-		 * foreign-key constraint.  (Note: for indexes associated with
-		 * unique or primary-key constraints, the dependency runs the other
-		 * way, and is not made here.)
+		 * foreign-key constraint.	(Note: for indexes associated with unique
+		 * or primary-key constraints, the dependency runs the other way, and
+		 * is not made here.)
 		 */
 		ObjectAddress relobject;
 
@@ -342,11 +342,11 @@ CreateConstraintEntry(const char *constraintName,
 	}
 
 	/*
-	 * We don't bother to register dependencies on the exclusion operators
-	 * of an exclusion constraint.  We assume they are members of the opclass
-	 * supporting the index, so there's an indirect dependency via that.
-	 * (This would be pretty dicey for cross-type operators, but exclusion
-	 * operators can never be cross-type.)
+	 * We don't bother to register dependencies on the exclusion operators of
+	 * an exclusion constraint.  We assume they are members of the opclass
+	 * supporting the index, so there's an indirect dependency via that. (This
+	 * would be pretty dicey for cross-type operators, but exclusion operators
+	 * can never be cross-type.)
 	 */
 
 	if (conExpr != NULL)
@@ -764,8 +764,8 @@ GetConstraintByName(Oid relid, const char *conname)
 			if (OidIsValid(conOid))
 				ereport(ERROR,
 						(errcode(ERRCODE_DUPLICATE_OBJECT),
-						 errmsg("table \"%s\" has multiple constraints named \"%s\"",
-								get_rel_name(relid), conname)));
+				 errmsg("table \"%s\" has multiple constraints named \"%s\"",
+						get_rel_name(relid), conname)));
 			conOid = HeapTupleGetOid(tuple);
 		}
 	}
diff --git a/src/backend/catalog/pg_db_role_setting.c b/src/backend/catalog/pg_db_role_setting.c
index 3063186e612ab164a3d24994d4754bef80c3ce68..6687cbf21320b470549f998e39da818246c23e78 100644
--- a/src/backend/catalog/pg_db_role_setting.c
+++ b/src/backend/catalog/pg_db_role_setting.c
@@ -1,12 +1,12 @@
 /*
  * pg_db_role_setting.c
  *		Routines to support manipulation of the pg_db_role_setting relation
- *    
+ *
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *		$PostgreSQL: pgsql/src/backend/catalog/pg_db_role_setting.c,v 1.2 2010/01/02 16:57:36 momjian Exp $
+ *		$PostgreSQL: pgsql/src/backend/catalog/pg_db_role_setting.c,v 1.3 2010/02/26 02:00:37 momjian Exp $
  */
 #include "postgres.h"
 
@@ -51,11 +51,11 @@ AlterSetting(Oid databaseid, Oid roleid, VariableSetStmt *setstmt)
 	 *
 	 * - in RESET ALL, simply delete the pg_db_role_setting tuple (if any)
 	 *
-	 * - in other commands, if there's a tuple in pg_db_role_setting, update it;
-	 *   if it ends up empty, delete it
+	 * - in other commands, if there's a tuple in pg_db_role_setting, update
+	 * it; if it ends up empty, delete it
 	 *
 	 * - otherwise, insert a new pg_db_role_setting tuple, but only if the
-	 *   command is not RESET
+	 * command is not RESET
 	 */
 	if (setstmt->kind == VAR_RESET_ALL)
 	{
@@ -111,7 +111,7 @@ AlterSetting(Oid databaseid, Oid roleid, VariableSetStmt *setstmt)
 		ArrayType  *a;
 
 		memset(nulls, false, sizeof(nulls));
-		
+
 		a = GUCArrayAdd(NULL, setstmt->name, valuestr);
 
 		values[Anum_pg_db_role_setting_setdatabase - 1] =
@@ -134,17 +134,17 @@ AlterSetting(Oid databaseid, Oid roleid, VariableSetStmt *setstmt)
 
 /*
  * Drop some settings from the catalog.  These can be for a particular
- * database, or for a particular role.  (It is of course possible to do both
+ * database, or for a particular role.	(It is of course possible to do both
  * too, but it doesn't make sense for current uses.)
  */
 void
 DropSetting(Oid databaseid, Oid roleid)
 {
-	Relation		relsetting;
-	HeapScanDesc	scan;
-	ScanKeyData		keys[2];
-	HeapTuple		tup;
-	int				numkeys = 0;
+	Relation	relsetting;
+	HeapScanDesc scan;
+	ScanKeyData keys[2];
+	HeapTuple	tup;
+	int			numkeys = 0;
 
 	relsetting = heap_open(DbRoleSettingRelationId, RowExclusiveLock);
 
@@ -190,9 +190,9 @@ DropSetting(Oid databaseid, Oid roleid)
 void
 ApplySetting(Oid databaseid, Oid roleid, Relation relsetting, GucSource source)
 {
-	SysScanDesc		scan;
-	ScanKeyData		keys[2];
-	HeapTuple		tup;
+	SysScanDesc scan;
+	ScanKeyData keys[2];
+	HeapTuple	tup;
 
 	ScanKeyInit(&keys[0],
 				Anum_pg_db_role_setting_setdatabase,
@@ -209,8 +209,8 @@ ApplySetting(Oid databaseid, Oid roleid, Relation relsetting, GucSource source)
 							  SnapshotNow, 2, keys);
 	while (HeapTupleIsValid(tup = systable_getnext(scan)))
 	{
-		bool	isnull;
-		Datum	datum;
+		bool		isnull;
+		Datum		datum;
 
 		datum = heap_getattr(tup, Anum_pg_db_role_setting_setconfig,
 							 RelationGetDescr(relsetting), &isnull);
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 446b865cf89b8c07bd161ea0e9374f0a01525996..dba215f6122f933dcb5a6411be992501b1d41428 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -7,7 +7,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/pg_enum.c,v 1.13 2010/01/02 16:57:36 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/pg_enum.c,v 1.14 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -59,32 +59,32 @@ EnumValuesCreate(Oid enumTypeOid, List *vals,
 	tupDesc = pg_enum->rd_att;
 
 	/*
-	 *	Allocate oids
+	 * Allocate oids
 	 */
 	oids = (Oid *) palloc(num_elems * sizeof(Oid));
 	if (OidIsValid(binary_upgrade_next_pg_enum_oid))
 	{
-			if (num_elems != 1)
-				ereport(ERROR,
-						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-						 errmsg("EnumValuesCreate() can only set a single OID")));
-			oids[0] = binary_upgrade_next_pg_enum_oid;
-			binary_upgrade_next_pg_enum_oid = InvalidOid;
-	}	
+		if (num_elems != 1)
+			ereport(ERROR,
+					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+					 errmsg("EnumValuesCreate() can only set a single OID")));
+		oids[0] = binary_upgrade_next_pg_enum_oid;
+		binary_upgrade_next_pg_enum_oid = InvalidOid;
+	}
 	else
 	{
 		/*
-		 * While this method does not absolutely guarantee that we generate
-		 * no duplicate oids (since we haven't entered each oid into the
-		 * table before allocating the next), trouble could only occur if
-		 * the oid counter wraps all the way around before we finish. Which
-		 * seems unlikely.
+		 * While this method does not absolutely guarantee that we generate no
+		 * duplicate oids (since we haven't entered each oid into the table
+		 * before allocating the next), trouble could only occur if the oid
+		 * counter wraps all the way around before we finish. Which seems
+		 * unlikely.
 		 */
 		for (elemno = 0; elemno < num_elems; elemno++)
 		{
 			/*
-			 *	The pg_enum.oid is stored in user tables.  This oid must be
-			 *	preserved by binary upgrades.
+			 * The pg_enum.oid is stored in user tables.  This oid must be
+			 * preserved by binary upgrades.
 			 */
 			oids[elemno] = GetNewOid(pg_enum);
 		}
diff --git a/src/backend/catalog/pg_inherits.c b/src/backend/catalog/pg_inherits.c
index 14bef8bf3ba2d2330639d6c47287e715b05ce933..d852e3554be4ba090242433a3a5c7d6cb0287550 100644
--- a/src/backend/catalog/pg_inherits.c
+++ b/src/backend/catalog/pg_inherits.c
@@ -13,7 +13,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/pg_inherits.c,v 1.7 2010/02/14 18:42:13 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/pg_inherits.c,v 1.8 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -158,7 +158,8 @@ find_inheritance_children(Oid parentrelId, LOCKMODE lockmode)
 List *
 find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
 {
-	List	   *rels_list, *rel_numparents;
+	List	   *rels_list,
+			   *rel_numparents;
 	ListCell   *l;
 
 	/*
@@ -189,8 +190,8 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
 		 */
 		foreach(lc, currentchildren)
 		{
-			Oid		child_oid = lfirst_oid(lc);
-			bool	found = false;
+			Oid			child_oid = lfirst_oid(lc);
+			bool		found = false;
 			ListCell   *lo;
 			ListCell   *li;
 
diff --git a/src/backend/catalog/pg_largeobject.c b/src/backend/catalog/pg_largeobject.c
index 500a6c9bae664dcc84eb1d8778ebbbdbc2cc1380..0be0be93cd271ed3f9cd9c262dd49785439375a6 100644
--- a/src/backend/catalog/pg_largeobject.c
+++ b/src/backend/catalog/pg_largeobject.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/pg_largeobject.c,v 1.38 2010/02/17 04:19:39 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/pg_largeobject.c,v 1.39 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -79,7 +79,7 @@ LargeObjectCreate(Oid loid)
 }
 
 /*
- * Drop a large object having the given LO identifier.  Both the data pages
+ * Drop a large object having the given LO identifier.	Both the data pages
  * and metadata must be dropped.
  */
 void
@@ -103,7 +103,7 @@ LargeObjectDrop(Oid loid)
 	ScanKeyInit(&skey[0],
 				ObjectIdAttributeNumber,
 				BTEqualStrategyNumber, F_OIDEQ,
-				ObjectIdGetDatum(loid));	
+				ObjectIdGetDatum(loid));
 
 	scan = systable_beginscan(pg_lo_meta,
 							  LargeObjectMetadataOidIndexId, true,
@@ -150,10 +150,10 @@ LargeObjectDrop(Oid loid)
 void
 LargeObjectAlterOwner(Oid loid, Oid newOwnerId)
 {
-	Form_pg_largeobject_metadata	form_lo_meta;
+	Form_pg_largeobject_metadata form_lo_meta;
 	Relation	pg_lo_meta;
-	ScanKeyData	skey[1];
-	SysScanDesc	scan;
+	ScanKeyData skey[1];
+	SysScanDesc scan;
 	HeapTuple	oldtup;
 	HeapTuple	newtup;
 
@@ -189,9 +189,8 @@ LargeObjectAlterOwner(Oid loid, Oid newOwnerId)
 		if (!superuser())
 		{
 			/*
-			 * lo_compat_privileges is not checked here, because ALTER
-			 * LARGE OBJECT ... OWNER did not exist at all prior to
-			 * PostgreSQL 9.0.
+			 * lo_compat_privileges is not checked here, because ALTER LARGE
+			 * OBJECT ... OWNER did not exist at all prior to PostgreSQL 9.0.
 			 *
 			 * We must be the owner of the existing object.
 			 */
@@ -213,8 +212,8 @@ LargeObjectAlterOwner(Oid loid, Oid newOwnerId)
 		replaces[Anum_pg_largeobject_metadata_lomowner - 1] = true;
 
 		/*
-		 * Determine the modified ACL for the new owner.
-		 * This is only necessary when the ACL is non-null.
+		 * Determine the modified ACL for the new owner. This is only
+		 * necessary when the ACL is non-null.
 		 */
 		aclDatum = heap_getattr(oldtup,
 								Anum_pg_largeobject_metadata_lomacl,
@@ -261,8 +260,8 @@ bool
 LargeObjectExists(Oid loid)
 {
 	Relation	pg_lo_meta;
-	ScanKeyData	skey[1];
-	SysScanDesc	sd;
+	ScanKeyData skey[1];
+	SysScanDesc sd;
 	HeapTuple	tuple;
 	bool		retval = false;
 
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index fc87a80c4d1e3bd11bd061572e71ab3b51bc1fa6..2b216098939cf68594b5935304bf0db00c33bf30 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.171 2010/02/14 18:42:13 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.172 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -396,8 +396,8 @@ ProcedureCreate(const char *procedureName,
 
 		/*
 		 * If there were any named input parameters, check to make sure the
-		 * names have not been changed, as this could break existing calls.
-		 * We allow adding names to formerly unnamed parameters, though.
+		 * names have not been changed, as this could break existing calls. We
+		 * allow adding names to formerly unnamed parameters, though.
 		 */
 		proargnames = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup,
 									  Anum_pg_proc_proargnames,
@@ -431,11 +431,11 @@ ProcedureCreate(const char *procedureName,
 					strcmp(old_arg_names[j], new_arg_names[j]) != 0)
 					ereport(ERROR,
 							(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
-							 errmsg("cannot change name of input parameter \"%s\"",
-									old_arg_names[j]),
+					   errmsg("cannot change name of input parameter \"%s\"",
+							  old_arg_names[j]),
 							 errhint("Use DROP FUNCTION first.")));
 			}
-		 }
+		}
 
 		/*
 		 * If there are existing defaults, check compatibility: redefinition
@@ -845,7 +845,7 @@ sql_function_parse_error_callback(void *arg)
 
 /*
  * Adjust a syntax error occurring inside the function body of a CREATE
- * FUNCTION or DO command.  This can be used by any function validator or
+ * FUNCTION or DO command.	This can be used by any function validator or
  * anonymous-block handler, not only for SQL-language functions.
  * It is assumed that the syntax error position is initially relative to the
  * function body string (as passed in).  If possible, we adjust the position
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 999776402dab00d26452c58e9fc6d52b8f3e7396..df65e1086ee57a65c05587c4b0f62d1e0a1e01fa 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.39 2010/02/14 18:42:13 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.40 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1015,7 +1015,7 @@ shdepLockAndCheckObject(Oid classId, Oid objectId)
 				pfree(database);
 				break;
 			}
-		
+
 
 		default:
 			elog(ERROR, "unrecognized shared classId: %u", classId);
@@ -1351,9 +1351,10 @@ shdepReassignOwned(List *roleids, Oid newrole)
 					break;
 
 				case DefaultAclRelationId:
+
 					/*
-					 * Ignore default ACLs; they should be handled by
-					 * DROP OWNED, not REASSIGN OWNED.
+					 * Ignore default ACLs; they should be handled by DROP
+					 * OWNED, not REASSIGN OWNED.
 					 */
 					break;
 
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index e8ff841a72580fddd627d7a70a5ba9d785803eb7..d4fdea91aa08ff9be2192820791eb1248df62bbf 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.132 2010/02/14 18:42:13 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.133 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -32,7 +32,7 @@
 #include "utils/rel.h"
 #include "utils/syscache.h"
 
-Oid binary_upgrade_next_pg_type_oid = InvalidOid;
+Oid			binary_upgrade_next_pg_type_oid = InvalidOid;
 
 /* ----------------------------------------------------------------
  *		TypeShellMake
@@ -424,7 +424,7 @@ TypeCreate(Oid newTypeOid,
 			binary_upgrade_next_pg_type_oid = InvalidOid;
 		}
 		/* else allow system to assign oid */
-		
+
 		typeObjectId = simple_heap_insert(pg_type_desc, tup);
 	}
 
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index 363d21ac613d40d0e08fe40b4fb6d6bccc4a26cd..86e7daa1a03c9cb358578e1fcc0c3c7fcf132ab3 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/catalog/toasting.c,v 1.31 2010/02/14 18:42:13 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/catalog/toasting.c,v 1.32 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -32,9 +32,9 @@
 #include "utils/syscache.h"
 
 /* Kluges for upgrade-in-place support */
-extern Oid binary_upgrade_next_toast_relfilenode;
+extern Oid	binary_upgrade_next_toast_relfilenode;
 
-Oid binary_upgrade_next_pg_type_toast_oid = InvalidOid;
+Oid			binary_upgrade_next_pg_type_toast_oid = InvalidOid;
 
 static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
 				   Datum reloptions);
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index 233ac1bc3d2311a505b9f9e0229e42789849ed5f..26a28d55a765b8e147758ade404ce023f630753a 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.34 2010/02/01 19:28:56 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.35 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -126,7 +126,7 @@ ExecRenameStmt(RenameStmt *stmt)
 								  stmt->subname,		/* old att name */
 								  stmt->newname,		/* new att name */
 								  interpretInhOption(stmt->relation->inhOpt),	/* recursive? */
-								  0);			/* expected inhcount */
+								  0);	/* expected inhcount */
 						break;
 					case OBJECT_TRIGGER:
 						renametrig(relid,
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 32fb44051c6a52ac399abafda626eefc291bfdef..22734b7619521ebea4a4baf9ec31ff1c01b5308c 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.151 2010/02/14 18:42:13 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.152 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -99,11 +99,11 @@ static double random_fract(void);
 static double init_selection_state(int n);
 static double get_next_S(double t, int n, double *stateptr);
 static int	compare_rows(const void *a, const void *b);
-static int	acquire_inherited_sample_rows(Relation onerel,
+static int acquire_inherited_sample_rows(Relation onerel,
 							  HeapTuple *rows, int targrows,
 							  double *totalrows, double *totaldeadrows);
 static void update_attstats(Oid relid, bool inh,
-							int natts, VacAttrStats **vacattrstats);
+				int natts, VacAttrStats **vacattrstats);
 static Datum std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
 static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
 
@@ -289,8 +289,8 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
 						RelationGetRelationName(onerel))));
 
 	/*
-	 * Set up a working context so that we can easily free whatever junk
-	 * gets created.
+	 * Set up a working context so that we can easily free whatever junk gets
+	 * created.
 	 */
 	anl_context = AllocSetContextCreate(CurrentMemoryContext,
 										"Analyze",
@@ -364,8 +364,8 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
 	 * Open all indexes of the relation, and see if there are any analyzable
 	 * columns in the indexes.	We do not analyze index columns if there was
 	 * an explicit column list in the ANALYZE command, however.  If we are
-	 * doing a recursive scan, we don't want to touch the parent's indexes
-	 * at all.
+	 * doing a recursive scan, we don't want to touch the parent's indexes at
+	 * all.
 	 */
 	if (!inh)
 		vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);
@@ -495,7 +495,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
 		{
 			VacAttrStats *stats = vacattrstats[i];
 			AttributeOpts *aopt =
-				get_attribute_options(onerel->rd_id, stats->attr->attnum);
+			get_attribute_options(onerel->rd_id, stats->attr->attnum);
 
 			stats->rows = rows;
 			stats->tupDesc = onerel->rd_att;
@@ -510,8 +510,9 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
 			 */
 			if (aopt != NULL)
 			{
-				float8	n_distinct =
-					inh ? aopt->n_distinct_inherited : aopt->n_distinct;
+				float8		n_distinct =
+				inh ? aopt->n_distinct_inherited : aopt->n_distinct;
+
 				if (n_distinct != 0.0)
 					stats->stadistinct = n_distinct;
 			}
@@ -546,8 +547,8 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
 	}
 
 	/*
-	 * Update pages/tuples stats in pg_class, but not if we're inside a
-	 * VACUUM that got a more precise number.
+	 * Update pages/tuples stats in pg_class, but not if we're inside a VACUUM
+	 * that got a more precise number.
 	 */
 	if (update_reltuples)
 		vac_update_relstats(onerel,
@@ -574,10 +575,9 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
 	}
 
 	/*
-	 * Report ANALYZE to the stats collector, too; likewise, tell it to
-	 * adopt these numbers only if we're not inside a VACUUM that got a
-	 * better number.  However, a call with inh = true shouldn't reset
-	 * the stats.
+	 * Report ANALYZE to the stats collector, too; likewise, tell it to adopt
+	 * these numbers only if we're not inside a VACUUM that got a better
+	 * number.	However, a call with inh = true shouldn't reset the stats.
 	 */
 	if (!inh)
 		pgstat_report_analyze(onerel, update_reltuples,
@@ -762,8 +762,8 @@ compute_index_stats(Relation onerel, double totalrows,
 			{
 				VacAttrStats *stats = thisdata->vacattrstats[i];
 				AttributeOpts *aopt =
-					get_attribute_options(stats->attr->attrelid,
-						stats->attr->attnum);
+				get_attribute_options(stats->attr->attrelid,
+									  stats->attr->attnum);
 
 				stats->exprvals = exprvals + i;
 				stats->exprnulls = exprnulls + i;
@@ -1436,10 +1436,10 @@ acquire_inherited_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
 	}
 
 	/*
-	 * Now sample rows from each relation, proportionally to its fraction
-	 * of the total block count.  (This might be less than desirable if the
-	 * child rels have radically different free-space percentages, but it's
-	 * not clear that it's worth working harder.)
+	 * Now sample rows from each relation, proportionally to its fraction of
+	 * the total block count.  (This might be less than desirable if the child
+	 * rels have radically different free-space percentages, but it's not
+	 * clear that it's worth working harder.)
 	 */
 	numrows = 0;
 	*totalrows = 0;
@@ -1451,7 +1451,7 @@ acquire_inherited_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
 
 		if (childblocks > 0)
 		{
-			int		childtargrows;
+			int			childtargrows;
 
 			childtargrows = (int) rint(targrows * childblocks / totalblocks);
 			/* Make sure we don't overrun due to roundoff error */
@@ -1478,10 +1478,10 @@ acquire_inherited_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
 
 					map = convert_tuples_by_name(RelationGetDescr(childrel),
 												 RelationGetDescr(onerel),
-												 gettext_noop("could not convert row type"));
+								 gettext_noop("could not convert row type"));
 					if (map != NULL)
 					{
-						int		j;
+						int			j;
 
 						for (j = 0; j < childrows; j++)
 						{
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index c7b60de32a9c5ea138aa991c254ac1f25ced22d4..11c84e7f3c89f734188e48b47edda8c52e1d6c30 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.154 2010/02/20 21:24:02 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.155 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -149,7 +149,7 @@
  *
  * This struct declaration has the maximal length, but in a real queue entry
  * the data area is only big enough for the actual channel and payload strings
- * (each null-terminated).  AsyncQueueEntryEmptySize is the minimum possible
+ * (each null-terminated).	AsyncQueueEntryEmptySize is the minimum possible
  * entry size, if both channel and payload strings are empty (but note it
  * doesn't include alignment padding).
  *
@@ -158,11 +158,11 @@
  */
 typedef struct AsyncQueueEntry
 {
-	int				length;		/* total allocated length of entry */
-	Oid				dboid;		/* sender's database OID */
-	TransactionId	xid;		/* sender's XID */
-	int32			srcPid;		/* sender's PID */
-	char			data[NAMEDATALEN + NOTIFY_PAYLOAD_MAX_LENGTH];
+	int			length;			/* total allocated length of entry */
+	Oid			dboid;			/* sender's database OID */
+	TransactionId xid;			/* sender's XID */
+	int32		srcPid;			/* sender's PID */
+	char		data[NAMEDATALEN + NOTIFY_PAYLOAD_MAX_LENGTH];
 } AsyncQueueEntry;
 
 /* Currently, no field of AsyncQueueEntry requires more than int alignment */
@@ -175,8 +175,8 @@ typedef struct AsyncQueueEntry
  */
 typedef struct QueuePosition
 {
-	int				page;		/* SLRU page number */
-	int				offset;		/* byte offset within page */
+	int			page;			/* SLRU page number */
+	int			offset;			/* byte offset within page */
 } QueuePosition;
 
 #define QUEUE_POS_PAGE(x)		((x).page)
@@ -202,11 +202,11 @@ typedef struct QueuePosition
  */
 typedef struct QueueBackendStatus
 {
-	int32			pid;		/* either a PID or InvalidPid */
-	QueuePosition	pos;		/* backend has read queue up to here */
+	int32		pid;			/* either a PID or InvalidPid */
+	QueuePosition pos;			/* backend has read queue up to here */
 } QueueBackendStatus;
 
-#define	InvalidPid				(-1)
+#define InvalidPid				(-1)
 
 /*
  * Shared memory state for LISTEN/NOTIFY (excluding its SLRU stuff)
@@ -230,15 +230,15 @@ typedef struct QueueBackendStatus
  */
 typedef struct AsyncQueueControl
 {
-	QueuePosition		head;		/* head points to the next free location */
-	QueuePosition 		tail;		/* the global tail is equivalent to the
-									   tail of the "slowest" backend */
-	TimestampTz			lastQueueFillWarn;	/* time of last queue-full msg */
-	QueueBackendStatus	backend[1];	/* actually of length MaxBackends+1 */
+	QueuePosition head;			/* head points to the next free location */
+	QueuePosition tail;			/* the global tail is equivalent to the tail
+								 * of the "slowest" backend */
+	TimestampTz lastQueueFillWarn;		/* time of last queue-full msg */
+	QueueBackendStatus backend[1];		/* actually of length MaxBackends+1 */
 	/* DO NOT ADD FURTHER STRUCT MEMBERS HERE */
 } AsyncQueueControl;
 
-static AsyncQueueControl   *asyncQueueControl;
+static AsyncQueueControl *asyncQueueControl;
 
 #define QUEUE_HEAD					(asyncQueueControl->head)
 #define QUEUE_TAIL					(asyncQueueControl->tail)
@@ -248,11 +248,11 @@ static AsyncQueueControl   *asyncQueueControl;
 /*
  * The SLRU buffer area through which we access the notification queue
  */
-static SlruCtlData			AsyncCtlData;
+static SlruCtlData AsyncCtlData;
 
 #define AsyncCtl					(&AsyncCtlData)
 #define QUEUE_PAGESIZE				BLCKSZ
-#define QUEUE_FULL_WARN_INTERVAL	5000	/* warn at most once every 5s */
+#define QUEUE_FULL_WARN_INTERVAL	5000		/* warn at most once every 5s */
 
 /*
  * slru.c currently assumes that all filenames are four characters of hex
@@ -265,7 +265,7 @@ static SlruCtlData			AsyncCtlData;
  *
  * The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2
  * pages, because more than that would confuse slru.c into thinking there
- * was a wraparound condition.  With the default BLCKSZ this means there
+ * was a wraparound condition.	With the default BLCKSZ this means there
  * can be up to 8GB of queued-and-not-read data.
  *
  * Note: it's possible to redefine QUEUE_MAX_PAGE with a smaller multiple of
@@ -309,7 +309,7 @@ static List *upperPendingActions = NIL; /* list of upper-xact lists */
 
 /*
  * State for outbound notifies consists of a list of all channels+payloads
- * NOTIFYed in the current transaction.	We do not actually perform a NOTIFY
+ * NOTIFYed in the current transaction. We do not actually perform a NOTIFY
  * until and unless the transaction commits.  pendingNotifies is NIL if no
  * NOTIFYs have been done in the current transaction.
  *
@@ -325,11 +325,11 @@ static List *upperPendingActions = NIL; /* list of upper-xact lists */
  */
 typedef struct Notification
 {
-	char		   *channel;	/* channel name */
-	char		   *payload;	/* payload string (can be empty) */
+	char	   *channel;		/* channel name */
+	char	   *payload;		/* payload string (can be empty) */
 } Notification;
 
-static List *pendingNotifies = NIL;				/* list of Notifications */
+static List *pendingNotifies = NIL;		/* list of Notifications */
 
 static List *upperPendingNotifies = NIL;		/* list of upper-xact lists */
 
@@ -348,8 +348,10 @@ static volatile sig_atomic_t notifyInterruptOccurred = 0;
 
 /* True if we've registered an on_shmem_exit cleanup */
 static bool unlistenExitRegistered = false;
+
 /* has this backend sent notifications in the current transaction? */
 static bool backendHasSentNotifications = false;
+
 /* has this backend executed its first LISTEN in the current transaction? */
 static bool backendHasExecutedInitialListen = false;
 
@@ -380,8 +382,8 @@ static bool asyncQueueProcessPageEntries(QueuePosition *current,
 static void asyncQueueAdvanceTail(void);
 static void ProcessIncomingNotify(void);
 static void NotifyMyFrontEnd(const char *channel,
-							 const char *payload,
-							 int32 srcPid);
+				 const char *payload,
+				 int32 srcPid);
 static bool AsyncExistsPendingNotify(const char *channel, const char *payload);
 static void ClearPendingActionsAndNotifies(void);
 
@@ -408,17 +410,17 @@ asyncQueuePagePrecedesLogically(int p, int q)
 	int			diff;
 
 	/*
-	 * We have to compare modulo (QUEUE_MAX_PAGE+1)/2.  Both inputs should
-	 * be in the range 0..QUEUE_MAX_PAGE.
+	 * We have to compare modulo (QUEUE_MAX_PAGE+1)/2.	Both inputs should be
+	 * in the range 0..QUEUE_MAX_PAGE.
 	 */
 	Assert(p >= 0 && p <= QUEUE_MAX_PAGE);
 	Assert(q >= 0 && q <= QUEUE_MAX_PAGE);
 
 	diff = p - q;
-	if (diff >= ((QUEUE_MAX_PAGE+1)/2))
-		diff -= QUEUE_MAX_PAGE+1;
-	else if (diff < -((QUEUE_MAX_PAGE+1)/2))
-		diff += QUEUE_MAX_PAGE+1;
+	if (diff >= ((QUEUE_MAX_PAGE + 1) / 2))
+		diff -= QUEUE_MAX_PAGE + 1;
+	else if (diff < -((QUEUE_MAX_PAGE + 1) / 2))
+		diff += QUEUE_MAX_PAGE + 1;
 	return diff < 0;
 }
 
@@ -428,7 +430,7 @@ asyncQueuePagePrecedesLogically(int p, int q)
 Size
 AsyncShmemSize(void)
 {
-	Size	size;
+	Size		size;
 
 	/* This had better match AsyncShmemInit */
 	size = mul_size(MaxBackends, sizeof(QueueBackendStatus));
@@ -445,9 +447,9 @@ AsyncShmemSize(void)
 void
 AsyncShmemInit(void)
 {
-	bool	found;
-	int		slotno;
-	Size	size;
+	bool		found;
+	int			slotno;
+	Size		size;
 
 	/*
 	 * Create or attach to the AsyncQueueControl structure.
@@ -468,7 +470,7 @@ AsyncShmemInit(void)
 	if (!found)
 	{
 		/* First time through, so initialize it */
-		int		i;
+		int			i;
 
 		SET_QUEUE_POS(QUEUE_HEAD, 0, 0);
 		SET_QUEUE_POS(QUEUE_TAIL, 0, 0);
@@ -598,8 +600,8 @@ Async_Notify(const char *channel, const char *payload)
 		n->payload = "";
 
 	/*
-	 * We want to preserve the order so we need to append every
-	 * notification. See comments at AsyncExistsPendingNotify().
+	 * We want to preserve the order so we need to append every notification.
+	 * See comments at AsyncExistsPendingNotify().
 	 */
 	pendingNotifies = lappend(pendingNotifies, n);
 
@@ -698,13 +700,13 @@ Async_UnlistenAll(void)
 Datum
 pg_listening_channels(PG_FUNCTION_ARGS)
 {
-	FuncCallContext	   *funcctx;
-	ListCell		  **lcp;
+	FuncCallContext *funcctx;
+	ListCell  **lcp;
 
 	/* stuff done only on the first call of the function */
 	if (SRF_IS_FIRSTCALL())
 	{
-		MemoryContext	oldcontext;
+		MemoryContext oldcontext;
 
 		/* create a function context for cross-call persistence */
 		funcctx = SRF_FIRSTCALL_INIT();
@@ -726,7 +728,7 @@ pg_listening_channels(PG_FUNCTION_ARGS)
 
 	while (*lcp != NULL)
 	{
-		char   *channel = (char *) lfirst(*lcp);
+		char	   *channel = (char *) lfirst(*lcp);
 
 		*lcp = lnext(*lcp);
 		SRF_RETURN_NEXT(funcctx, CStringGetTextDatum(channel));
@@ -818,9 +820,9 @@ PreCommit_Notify(void)
 
 		/*
 		 * Make sure that we have an XID assigned to the current transaction.
-		 * GetCurrentTransactionId is cheap if we already have an XID, but
-		 * not so cheap if we don't, and we'd prefer not to do that work
-		 * while holding AsyncQueueLock.
+		 * GetCurrentTransactionId is cheap if we already have an XID, but not
+		 * so cheap if we don't, and we'd prefer not to do that work while
+		 * holding AsyncQueueLock.
 		 */
 		(void) GetCurrentTransactionId();
 
@@ -850,7 +852,7 @@ PreCommit_Notify(void)
 		while (nextNotify != NULL)
 		{
 			/*
-			 * Add the pending notifications to the queue.  We acquire and
+			 * Add the pending notifications to the queue.	We acquire and
 			 * release AsyncQueueLock once per page, which might be overkill
 			 * but it does allow readers to get in while we're doing this.
 			 *
@@ -866,7 +868,7 @@ PreCommit_Notify(void)
 			if (asyncQueueIsFull())
 				ereport(ERROR,
 						(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
-						 errmsg("too many notifications in the NOTIFY queue")));
+					  errmsg("too many notifications in the NOTIFY queue")));
 			nextNotify = asyncQueueAddEntries(nextNotify);
 			LWLockRelease(AsyncQueueLock);
 		}
@@ -915,8 +917,8 @@ AtCommit_Notify(void)
 	}
 
 	/*
-	 * If we did an initial LISTEN, listenChannels now has the entry, so
-	 * we no longer need or want the flag to be set.
+	 * If we did an initial LISTEN, listenChannels now has the entry, so we no
+	 * longer need or want the flag to be set.
 	 */
 	backendHasExecutedInitialListen = false;
 
@@ -943,15 +945,15 @@ Exec_ListenPreCommit(void)
 		elog(DEBUG1, "Exec_ListenPreCommit(%d)", MyProcPid);
 
 	/*
-	 * We need this variable to detect an aborted initial LISTEN.
-	 * In that case we would set up our pointer but not listen on any channel.
-	 * This flag gets cleared in AtCommit_Notify or AtAbort_Notify().
+	 * We need this variable to detect an aborted initial LISTEN. In that case
+	 * we would set up our pointer but not listen on any channel. This flag
+	 * gets cleared in AtCommit_Notify or AtAbort_Notify().
 	 */
 	backendHasExecutedInitialListen = true;
 
 	/*
-	 * Before registering, make sure we will unlisten before dying.
-	 * (Note: this action does not get undone if we abort later.)
+	 * Before registering, make sure we will unlisten before dying. (Note:
+	 * this action does not get undone if we abort later.)
 	 */
 	if (!unlistenExitRegistered)
 	{
@@ -977,8 +979,8 @@ Exec_ListenPreCommit(void)
 	 * already-committed notifications. Still, we could get notifications that
 	 * have already committed before we started to LISTEN.
 	 *
-	 * Note that we are not yet listening on anything, so we won't deliver
-	 * any notification to the frontend.
+	 * Note that we are not yet listening on anything, so we won't deliver any
+	 * notification to the frontend.
 	 *
 	 * This will also advance the global tail pointer if possible.
 	 */
@@ -1020,8 +1022,8 @@ Exec_ListenCommit(const char *channel)
 static void
 Exec_UnlistenCommit(const char *channel)
 {
-	ListCell *q;
-	ListCell *prev;
+	ListCell   *q;
+	ListCell   *prev;
 
 	if (Trace_notify)
 		elog(DEBUG1, "Exec_UnlistenCommit(%s,%d)", channel, MyProcPid);
@@ -1029,7 +1031,7 @@ Exec_UnlistenCommit(const char *channel)
 	prev = NULL;
 	foreach(q, listenChannels)
 	{
-		char *lchan = (char *) lfirst(q);
+		char	   *lchan = (char *) lfirst(q);
 
 		if (strcmp(lchan, channel) == 0)
 		{
@@ -1078,12 +1080,12 @@ Exec_UnlistenAllCommit(void)
  * The reason that this is not done in AtCommit_Notify is that there is
  * a nonzero chance of errors here (for example, encoding conversion errors
  * while trying to format messages to our frontend).  An error during
- * AtCommit_Notify would be a PANIC condition.  The timing is also arranged
+ * AtCommit_Notify would be a PANIC condition.	The timing is also arranged
  * to ensure that a transaction's self-notifies are delivered to the frontend
  * before it gets the terminating ReadyForQuery message.
  *
  * Note that we send signals and process the queue even if the transaction
- * eventually aborted.  This is because we need to clean out whatever got
+ * eventually aborted.	This is because we need to clean out whatever got
  * added to the queue.
  *
  * NOTE: we are outside of any transaction here.
@@ -1098,9 +1100,9 @@ ProcessCompletedNotifies(void)
 		return;
 
 	/*
-	 * We reset the flag immediately; otherwise, if any sort of error
-	 * occurs below, we'd be locked up in an infinite loop, because
-	 * control will come right back here after error cleanup.
+	 * We reset the flag immediately; otherwise, if any sort of error occurs
+	 * below, we'd be locked up in an infinite loop, because control will come
+	 * right back here after error cleanup.
 	 */
 	backendHasSentNotifications = false;
 
@@ -1108,8 +1110,8 @@ ProcessCompletedNotifies(void)
 		elog(DEBUG1, "ProcessCompletedNotifies");
 
 	/*
-	 * We must run asyncQueueReadAllNotifications inside a transaction,
-	 * else bad things happen if it gets an error.
+	 * We must run asyncQueueReadAllNotifications inside a transaction, else
+	 * bad things happen if it gets an error.
 	 */
 	StartTransactionCommand();
 
@@ -1125,11 +1127,11 @@ ProcessCompletedNotifies(void)
 	{
 		/*
 		 * If we found no other listening backends, and we aren't listening
-		 * ourselves, then we must execute asyncQueueAdvanceTail to flush
-		 * the queue, because ain't nobody else gonna do it.  This prevents
-		 * queue overflow when we're sending useless notifies to nobody.
-		 * (A new listener could have joined since we looked, but if so this
-		 * is harmless.)
+		 * ourselves, then we must execute asyncQueueAdvanceTail to flush the
+		 * queue, because ain't nobody else gonna do it.  This prevents queue
+		 * overflow when we're sending useless notifies to nobody. (A new
+		 * listener could have joined since we looked, but if so this is
+		 * harmless.)
 		 */
 		asyncQueueAdvanceTail();
 	}
@@ -1164,14 +1166,14 @@ IsListeningOn(const char *channel)
 
 /*
  * Remove our entry from the listeners array when we are no longer listening
- * on any channel.  NB: must not fail if we're already not listening.
+ * on any channel.	NB: must not fail if we're already not listening.
  */
 static void
 asyncQueueUnregister(void)
 {
-	bool	  advanceTail;
+	bool		advanceTail;
 
-	Assert(listenChannels == NIL);				/* else caller error */
+	Assert(listenChannels == NIL);		/* else caller error */
 
 	LWLockAcquire(AsyncQueueLock, LW_SHARED);
 	/* check if entry is valid and oldest ... */
@@ -1200,7 +1202,7 @@ asyncQueueIsFull(void)
 	/*
 	 * The queue is full if creating a new head page would create a page that
 	 * logically precedes the current global tail pointer, ie, the head
-	 * pointer would wrap around compared to the tail.  We cannot create such
+	 * pointer would wrap around compared to the tail.	We cannot create such
 	 * a head page for fear of confusing slru.c.  For safety we round the tail
 	 * pointer back to a segment boundary (compare the truncation logic in
 	 * asyncQueueAdvanceTail).
@@ -1219,15 +1221,15 @@ asyncQueueIsFull(void)
 
 /*
  * Advance the QueuePosition to the next entry, assuming that the current
- * entry is of length entryLength.  If we jump to a new page the function
+ * entry is of length entryLength.	If we jump to a new page the function
  * returns true, else false.
  */
 static bool
 asyncQueueAdvance(QueuePosition *position, int entryLength)
 {
-	int		pageno = QUEUE_POS_PAGE(*position);
-	int		offset = QUEUE_POS_OFFSET(*position);
-	bool	pageJump = false;
+	int			pageno = QUEUE_POS_PAGE(*position);
+	int			offset = QUEUE_POS_OFFSET(*position);
+	bool		pageJump = false;
 
 	/*
 	 * Move to the next writing position: First jump over what we have just
@@ -1245,7 +1247,7 @@ asyncQueueAdvance(QueuePosition *position, int entryLength)
 	{
 		pageno++;
 		if (pageno > QUEUE_MAX_PAGE)
-			pageno = 0;							/* wrap around */
+			pageno = 0;			/* wrap around */
 		offset = 0;
 		pageJump = true;
 	}
@@ -1260,9 +1262,9 @@ asyncQueueAdvance(QueuePosition *position, int entryLength)
 static void
 asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe)
 {
-	size_t	channellen = strlen(n->channel);
-	size_t	payloadlen = strlen(n->payload);
-	int		entryLength;
+	size_t		channellen = strlen(n->channel);
+	size_t		payloadlen = strlen(n->payload);
+	int			entryLength;
 
 	Assert(channellen < NAMEDATALEN);
 	Assert(payloadlen < NOTIFY_PAYLOAD_MAX_LENGTH);
@@ -1288,7 +1290,7 @@ asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe)
  * the last byte which simplifies reading the page later.
  *
  * We are passed the list cell containing the next notification to write
- * and return the first still-unwritten cell back.  Eventually we will return
+ * and return the first still-unwritten cell back.	Eventually we will return
  * NULL indicating all is done.
  *
  * We are holding AsyncQueueLock already from the caller and grab AsyncCtlLock
@@ -1297,10 +1299,10 @@ asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe)
 static ListCell *
 asyncQueueAddEntries(ListCell *nextNotify)
 {
-	AsyncQueueEntry	qe;
-	int				pageno;
-	int				offset;
-	int				slotno;
+	AsyncQueueEntry qe;
+	int			pageno;
+	int			offset;
+	int			slotno;
 
 	/* We hold both AsyncQueueLock and AsyncCtlLock during this operation */
 	LWLockAcquire(AsyncCtlLock, LW_EXCLUSIVE);
@@ -1313,7 +1315,7 @@ asyncQueueAddEntries(ListCell *nextNotify)
 
 	while (nextNotify != NULL)
 	{
-		Notification   *n = (Notification *) lfirst(nextNotify);
+		Notification *n = (Notification *) lfirst(nextNotify);
 
 		/* Construct a valid queue entry in local variable qe */
 		asyncQueueNotificationToEntry(n, &qe);
@@ -1335,8 +1337,8 @@ asyncQueueAddEntries(ListCell *nextNotify)
 			 */
 			qe.length = QUEUE_PAGESIZE - offset;
 			qe.dboid = InvalidOid;
-			qe.data[0] = '\0'; /* empty channel */
-			qe.data[1] = '\0'; /* empty payload */
+			qe.data[0] = '\0';	/* empty channel */
+			qe.data[1] = '\0';	/* empty payload */
 		}
 
 		/* Now copy qe into the shared buffer page */
@@ -1348,12 +1350,12 @@ asyncQueueAddEntries(ListCell *nextNotify)
 		if (asyncQueueAdvance(&(QUEUE_HEAD), qe.length))
 		{
 			/*
-			 * Page is full, so we're done here, but first fill the next
-			 * page with zeroes.  The reason to do this is to ensure that
-			 * slru.c's idea of the head page is always the same as ours,
-			 * which avoids boundary problems in SimpleLruTruncate.  The
-			 * test in asyncQueueIsFull() ensured that there is room to
-			 * create this page without overrunning the queue.
+			 * Page is full, so we're done here, but first fill the next page
+			 * with zeroes.  The reason to do this is to ensure that slru.c's
+			 * idea of the head page is always the same as ours, which avoids
+			 * boundary problems in SimpleLruTruncate.	The test in
+			 * asyncQueueIsFull() ensured that there is room to create this
+			 * page without overrunning the queue.
 			 */
 			slotno = SimpleLruZeroPage(AsyncCtl, QUEUE_POS_PAGE(QUEUE_HEAD));
 			/* And exit the loop */
@@ -1377,24 +1379,24 @@ asyncQueueAddEntries(ListCell *nextNotify)
 static void
 asyncQueueFillWarning(void)
 {
-	int				headPage = QUEUE_POS_PAGE(QUEUE_HEAD);
-	int				tailPage = QUEUE_POS_PAGE(QUEUE_TAIL);
-	int				occupied;
-	double			fillDegree;
-	TimestampTz		t;
+	int			headPage = QUEUE_POS_PAGE(QUEUE_HEAD);
+	int			tailPage = QUEUE_POS_PAGE(QUEUE_TAIL);
+	int			occupied;
+	double		fillDegree;
+	TimestampTz t;
 
 	occupied = headPage - tailPage;
 
 	if (occupied == 0)
 		return;					/* fast exit for common case */
-	
+
 	if (occupied < 0)
 	{
 		/* head has wrapped around, tail not yet */
-		occupied += QUEUE_MAX_PAGE+1;
+		occupied += QUEUE_MAX_PAGE + 1;
 	}
 
-	fillDegree = (double) occupied / (double) ((QUEUE_MAX_PAGE+1)/2);
+	fillDegree = (double) occupied / (double) ((QUEUE_MAX_PAGE + 1) / 2);
 
 	if (fillDegree < 0.5)
 		return;
@@ -1404,9 +1406,9 @@ asyncQueueFillWarning(void)
 	if (TimestampDifferenceExceeds(asyncQueueControl->lastQueueFillWarn,
 								   t, QUEUE_FULL_WARN_INTERVAL))
 	{
-		QueuePosition	min = QUEUE_HEAD;
-		int32			minPid = InvalidPid;
-		int				i;
+		QueuePosition min = QUEUE_HEAD;
+		int32		minPid = InvalidPid;
+		int			i;
 
 		for (i = 1; i <= MaxBackends; i++)
 		{
@@ -1455,13 +1457,13 @@ SignalBackends(void)
 	int32		pid;
 
 	/*
-	 * Identify all backends that are listening and not already up-to-date.
-	 * We don't want to send signals while holding the AsyncQueueLock, so
-	 * we just build a list of target PIDs.
+	 * Identify all backends that are listening and not already up-to-date. We
+	 * don't want to send signals while holding the AsyncQueueLock, so we just
+	 * build a list of target PIDs.
 	 *
-	 * XXX in principle these pallocs could fail, which would be bad.
-	 * Maybe preallocate the arrays?  But in practice this is only run
-	 * in trivial transactions, so there should surely be space available.
+	 * XXX in principle these pallocs could fail, which would be bad. Maybe
+	 * preallocate the arrays?	But in practice this is only run in trivial
+	 * transactions, so there should surely be space available.
 	 */
 	pids = (int32 *) palloc(MaxBackends * sizeof(int32));
 	ids = (BackendId *) palloc(MaxBackends * sizeof(BackendId));
@@ -1493,8 +1495,8 @@ SignalBackends(void)
 		/*
 		 * Note: assuming things aren't broken, a signal failure here could
 		 * only occur if the target backend exited since we released
-		 * AsyncQueueLock; which is unlikely but certainly possible.
-		 * So we just log a low-level debug message if it happens.
+		 * AsyncQueueLock; which is unlikely but certainly possible. So we
+		 * just log a low-level debug message if it happens.
 		 */
 		if (SendProcSignal(pid, PROCSIG_NOTIFY_INTERRUPT, ids[i]) < 0)
 			elog(DEBUG3, "could not signal backend with PID %d: %m", pid);
@@ -1521,8 +1523,8 @@ AtAbort_Notify(void)
 {
 	/*
 	 * If we LISTEN but then roll back the transaction we have set our pointer
-	 * but have not made any entry in listenChannels. In that case, remove
-	 * our pointer again.
+	 * but have not made any entry in listenChannels. In that case, remove our
+	 * pointer again.
 	 */
 	if (backendHasExecutedInitialListen)
 	{
@@ -1778,7 +1780,7 @@ EnableNotifyInterrupt(void)
  *		is disabled until the next EnableNotifyInterrupt call.
  *
  *		The PROCSIG_CATCHUP_INTERRUPT signal handler also needs to call this,
- *		so as to prevent conflicts if one signal interrupts the other.  So we
+ *		so as to prevent conflicts if one signal interrupts the other.	So we
  *		must return the previous state of the flag.
  */
 bool
@@ -1799,15 +1801,17 @@ DisableNotifyInterrupt(void)
 static void
 asyncQueueReadAllNotifications(void)
 {
-	QueuePosition	pos;
-	QueuePosition	oldpos;
-	QueuePosition	head;
+	QueuePosition pos;
+	QueuePosition oldpos;
+	QueuePosition head;
 	bool		advanceTail;
+
 	/* page_buffer must be adequately aligned, so use a union */
-	union {
+	union
+	{
 		char		buf[QUEUE_PAGESIZE];
 		AsyncQueueEntry align;
-	} page_buffer;
+	}			page_buffer;
 
 	/* Fetch current state */
 	LWLockAcquire(AsyncQueueLock, LW_SHARED);
@@ -1829,16 +1833,16 @@ asyncQueueReadAllNotifications(void)
 	 * Especially we do not take into account different commit times.
 	 * Consider the following example:
 	 *
-	 * Backend 1:                    Backend 2:
+	 * Backend 1:					 Backend 2:
 	 *
 	 * transaction starts
 	 * NOTIFY foo;
 	 * commit starts
-	 *                               transaction starts
-	 *                               LISTEN foo;
-	 *                               commit starts
+	 *								 transaction starts
+	 *								 LISTEN foo;
+	 *								 commit starts
 	 * commit to clog
-	 *                               commit to clog
+	 *								 commit to clog
 	 *
 	 * It could happen that backend 2 sees the notification from backend 1 in
 	 * the queue.  Even though the notifying transaction committed before
@@ -1861,7 +1865,7 @@ asyncQueueReadAllNotifications(void)
 	{
 		bool		reachedStop;
 
-		do 
+		do
 		{
 			int			curpage = QUEUE_POS_PAGE(pos);
 			int			curoffset = QUEUE_POS_OFFSET(pos);
@@ -1871,7 +1875,7 @@ asyncQueueReadAllNotifications(void)
 			/*
 			 * We copy the data from SLRU into a local buffer, so as to avoid
 			 * holding the AsyncCtlLock while we are examining the entries and
-			 * possibly transmitting them to our frontend.  Copy only the part
+			 * possibly transmitting them to our frontend.	Copy only the part
 			 * of the page we will actually inspect.
 			 */
 			slotno = SimpleLruReadPage_ReadOnly(AsyncCtl, curpage,
@@ -1881,7 +1885,7 @@ asyncQueueReadAllNotifications(void)
 				/* we only want to read as far as head */
 				copysize = QUEUE_POS_OFFSET(head) - curoffset;
 				if (copysize < 0)
-					copysize = 0;			/* just for safety */
+					copysize = 0;		/* just for safety */
 			}
 			else
 			{
@@ -1899,9 +1903,9 @@ asyncQueueReadAllNotifications(void)
 			 * uncommitted message.
 			 *
 			 * Our stop position is what we found to be the head's position
-			 * when we entered this function. It might have changed
-			 * already. But if it has, we will receive (or have already
-			 * received and queued) another signal and come here again.
+			 * when we entered this function. It might have changed already.
+			 * But if it has, we will receive (or have already received and
+			 * queued) another signal and come here again.
 			 *
 			 * We are not holding AsyncQueueLock here! The queue can only
 			 * extend beyond the head pointer (see above) and we leave our
@@ -1945,7 +1949,7 @@ asyncQueueReadAllNotifications(void)
  * and deliver relevant ones to my frontend.
  *
  * The current page must have been fetched into page_buffer from shared
- * memory.  (We could access the page right in shared memory, but that
+ * memory.	(We could access the page right in shared memory, but that
  * would imply holding the AsyncCtlLock throughout this routine.)
  *
  * We stop if we reach the "stop" position, or reach a notification from an
@@ -1963,11 +1967,11 @@ asyncQueueProcessPageEntries(QueuePosition *current,
 {
 	bool		reachedStop = false;
 	bool		reachedEndOfPage;
-	AsyncQueueEntry	*qe;
+	AsyncQueueEntry *qe;
 
 	do
 	{
-		QueuePosition	thisentry = *current;
+		QueuePosition thisentry = *current;
 
 		if (QUEUE_POS_EQUAL(thisentry, stop))
 			break;
@@ -1975,9 +1979,9 @@ asyncQueueProcessPageEntries(QueuePosition *current,
 		qe = (AsyncQueueEntry *) (page_buffer + QUEUE_POS_OFFSET(thisentry));
 
 		/*
-		 * Advance *current over this message, possibly to the next page.
-		 * As noted in the comments for asyncQueueReadAllNotifications, we
-		 * must do this before possibly failing while processing the message.
+		 * Advance *current over this message, possibly to the next page. As
+		 * noted in the comments for asyncQueueReadAllNotifications, we must
+		 * do this before possibly failing while processing the message.
 		 */
 		reachedEndOfPage = asyncQueueAdvance(current, qe->length);
 
@@ -1987,12 +1991,12 @@ asyncQueueProcessPageEntries(QueuePosition *current,
 			if (TransactionIdDidCommit(qe->xid))
 			{
 				/* qe->data is the null-terminated channel name */
-				char   *channel = qe->data;
+				char	   *channel = qe->data;
 
 				if (IsListeningOn(channel))
 				{
 					/* payload follows channel name */
-					char   *payload = qe->data + strlen(channel) + 1;
+					char	   *payload = qe->data + strlen(channel) + 1;
 
 					NotifyMyFrontEnd(channel, payload, qe->srcPid);
 				}
@@ -2008,12 +2012,12 @@ asyncQueueProcessPageEntries(QueuePosition *current,
 			{
 				/*
 				 * The transaction has neither committed nor aborted so far,
-				 * so we can't process its message yet.  Break out of the loop,
-				 * but first back up *current so we will reprocess the message
-				 * next time.  (Note: it is unlikely but not impossible for
-				 * TransactionIdDidCommit to fail, so we can't really avoid
-				 * this advance-then-back-up behavior when dealing with an
-				 * uncommitted message.)
+				 * so we can't process its message yet.  Break out of the
+				 * loop, but first back up *current so we will reprocess the
+				 * message next time.  (Note: it is unlikely but not
+				 * impossible for TransactionIdDidCommit to fail, so we can't
+				 * really avoid this advance-then-back-up behavior when
+				 * dealing with an uncommitted message.)
 				 */
 				*current = thisentry;
 				reachedStop = true;
@@ -2037,11 +2041,11 @@ asyncQueueProcessPageEntries(QueuePosition *current,
 static void
 asyncQueueAdvanceTail(void)
 {
-	QueuePosition	min;
-	int				i;
-	int				oldtailpage;
-	int				newtailpage;
-	int				boundary;
+	QueuePosition min;
+	int			i;
+	int			oldtailpage;
+	int			newtailpage;
+	int			boundary;
 
 	LWLockAcquire(AsyncQueueLock, LW_EXCLUSIVE);
 	min = QUEUE_HEAD;
@@ -2058,16 +2062,16 @@ asyncQueueAdvanceTail(void)
 	 * We can truncate something if the global tail advanced across an SLRU
 	 * segment boundary.
 	 *
-	 * XXX it might be better to truncate only once every several segments,
-	 * to reduce the number of directory scans.
+	 * XXX it might be better to truncate only once every several segments, to
+	 * reduce the number of directory scans.
 	 */
 	newtailpage = QUEUE_POS_PAGE(min);
 	boundary = newtailpage - (newtailpage % SLRU_PAGES_PER_SEGMENT);
 	if (asyncQueuePagePrecedesLogically(oldtailpage, boundary))
 	{
 		/*
-		 * SimpleLruTruncate() will ask for AsyncCtlLock but will also
-		 * release the lock again.
+		 * SimpleLruTruncate() will ask for AsyncCtlLock but will also release
+		 * the lock again.
 		 */
 		SimpleLruTruncate(AsyncCtl, newtailpage);
 	}
@@ -2104,8 +2108,8 @@ ProcessIncomingNotify(void)
 	notifyInterruptOccurred = 0;
 
 	/*
-	 * We must run asyncQueueReadAllNotifications inside a transaction,
-	 * else bad things happen if it gets an error.
+	 * We must run asyncQueueReadAllNotifications inside a transaction, else
+	 * bad things happen if it gets an error.
 	 */
 	StartTransactionCommand();
 
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index eed4d51edcc974c064e13388b879e663e91df6b0..78df9a8da8565bef0649027aa1f5b88cc0f7c655 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -1,7 +1,7 @@
 /*-------------------------------------------------------------------------
  *
  * cluster.c
- *	  CLUSTER a table on an index.  This is now also used for VACUUM FULL.
+ *	  CLUSTER a table on an index.	This is now also used for VACUUM FULL.
  *
  * There is hardly anything left of Paul Brown's original implementation...
  *
@@ -11,7 +11,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.201 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.202 2010/02/26 02:00:37 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -64,7 +64,7 @@ typedef struct
 
 
 static void rebuild_relation(Relation OldHeap, Oid indexOid,
-							 int freeze_min_age, int freeze_table_age);
+				 int freeze_min_age, int freeze_table_age);
 static void copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
 			   int freeze_min_age, int freeze_table_age,
 			   bool *pSwapToastByContent, TransactionId *pFreezeXid);
@@ -252,7 +252,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
  * them incrementally while we load the table.
  *
  * If indexOid is InvalidOid, the table will be rewritten in physical order
- * instead of index order.  This is the new implementation of VACUUM FULL,
+ * instead of index order.	This is the new implementation of VACUUM FULL,
  * and error messages should refer to the operation as VACUUM not CLUSTER.
  */
 void
@@ -301,8 +301,8 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose,
 		 * check in the "recheck" case is appropriate (which currently means
 		 * somebody is executing a database-wide CLUSTER), because there is
 		 * another check in cluster() which will stop any attempt to cluster
-		 * remote temp tables by name.	There is another check in
-		 * cluster_rel which is redundant, but we leave it for extra safety.
+		 * remote temp tables by name.	There is another check in cluster_rel
+		 * which is redundant, but we leave it for extra safety.
 		 */
 		if (RELATION_IS_OTHER_TEMP(OldHeap))
 		{
@@ -325,7 +325,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose,
 			 * Check that the index is still the one with indisclustered set.
 			 */
 			tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexOid));
-			if (!HeapTupleIsValid(tuple))	/* probably can't happen */
+			if (!HeapTupleIsValid(tuple))		/* probably can't happen */
 			{
 				relation_close(OldHeap, AccessExclusiveLock);
 				return;
@@ -353,19 +353,19 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose,
 				 errmsg("cannot cluster a shared catalog")));
 
 	/*
-	 * Don't process temp tables of other backends ... their local
-	 * buffer manager is not going to cope.
+	 * Don't process temp tables of other backends ... their local buffer
+	 * manager is not going to cope.
 	 */
 	if (RELATION_IS_OTHER_TEMP(OldHeap))
 	{
 		if (OidIsValid(indexOid))
 			ereport(ERROR,
 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-					 errmsg("cannot cluster temporary tables of other sessions")));
+			   errmsg("cannot cluster temporary tables of other sessions")));
 		else
 			ereport(ERROR,
 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-					 errmsg("cannot vacuum temporary tables of other sessions")));
+				errmsg("cannot vacuum temporary tables of other sessions")));
 	}
 
 	/*
@@ -664,8 +664,8 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace)
 	 * the old, or we will have problems with the TEMP status of temp tables.
 	 *
 	 * Note: the new heap is not a shared relation, even if we are rebuilding
-	 * a shared rel.  However, we do make the new heap mapped if the source
-	 * is mapped.  This simplifies swap_relation_files, and is absolutely
+	 * a shared rel.  However, we do make the new heap mapped if the source is
+	 * mapped.	This simplifies swap_relation_files, and is absolutely
 	 * necessary for rebuilding pg_class, for reasons explained there.
 	 */
 	snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", OIDOldHeap);
@@ -701,9 +701,9 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace)
 	 * If necessary, create a TOAST table for the new relation.
 	 *
 	 * If the relation doesn't have a TOAST table already, we can't need one
-	 * for the new relation.  The other way around is possible though: if
-	 * some wide columns have been dropped, AlterTableCreateToastTable
-	 * can decide that no TOAST table is needed for the new table.
+	 * for the new relation.  The other way around is possible though: if some
+	 * wide columns have been dropped, AlterTableCreateToastTable can decide
+	 * that no TOAST table is needed for the new table.
 	 *
 	 * Note that AlterTableCreateToastTable ends with CommandCounterIncrement,
 	 * so that the TOAST table will be visible for insertion.
@@ -782,18 +782,18 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
 	isnull = (bool *) palloc(natts * sizeof(bool));
 
 	/*
-	 * We need to log the copied data in WAL iff WAL archiving/streaming
-	 * is enabled AND it's not a temp rel.
+	 * We need to log the copied data in WAL iff WAL archiving/streaming is
+	 * enabled AND it's not a temp rel.
 	 */
 	use_wal = XLogIsNeeded() && !NewHeap->rd_istemp;
 
 	/*
-	 * Write an XLOG UNLOGGED record if WAL-logging was skipped because
-	 * WAL archiving is not enabled.
+	 * Write an XLOG UNLOGGED record if WAL-logging was skipped because WAL
+	 * archiving is not enabled.
 	 */
 	if (!use_wal && !NewHeap->rd_istemp)
 	{
-		char reason[NAMEDATALEN + 32];
+		char		reason[NAMEDATALEN + 32];
 
 		if (OldIndex != NULL)
 			snprintf(reason, sizeof(reason), "CLUSTER on \"%s\"",
@@ -810,7 +810,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
 	/*
 	 * If both tables have TOAST tables, perform toast swap by content.  It is
 	 * possible that the old table has a toast table but the new one doesn't,
-	 * if toastable columns have been dropped.  In that case we have to do
+	 * if toastable columns have been dropped.	In that case we have to do
 	 * swap by links.  This is okay because swap by content is only essential
 	 * for system catalogs, and we don't support schema changes for them.
 	 */
@@ -824,7 +824,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
 		 * data will eventually be found.  Set this up by setting rd_toastoid.
 		 * Note that we must hold NewHeap open until we are done writing data,
 		 * since the relcache will not guarantee to remember this setting once
-		 * the relation is closed.  Also, this technique depends on the fact
+		 * the relation is closed.	Also, this technique depends on the fact
 		 * that no one will try to read from the NewHeap until after we've
 		 * finished writing it and swapping the rels --- otherwise they could
 		 * follow the toast pointers to the wrong place.
@@ -860,8 +860,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
 	rwstate = begin_heap_rewrite(NewHeap, OldestXmin, FreezeXid, use_wal);
 
 	/*
-	 * Scan through the OldHeap, either in OldIndex order or sequentially,
-	 * and copy each tuple into the NewHeap.  To ensure we see recently-dead
+	 * Scan through the OldHeap, either in OldIndex order or sequentially, and
+	 * copy each tuple into the NewHeap.  To ensure we see recently-dead
 	 * tuples that still need to be copied, we scan with SnapshotAny and use
 	 * HeapTupleSatisfiesVacuum for the visibility test.
 	 */
@@ -924,12 +924,12 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
 			case HEAPTUPLE_INSERT_IN_PROGRESS:
 
 				/*
-				 * Since we hold exclusive lock on the relation, normally
-				 * the only way to see this is if it was inserted earlier
-				 * in our own transaction.  However, it can happen in system
+				 * Since we hold exclusive lock on the relation, normally the
+				 * only way to see this is if it was inserted earlier in our
+				 * own transaction.  However, it can happen in system
 				 * catalogs, since we tend to release write lock before commit
-				 * there.  Give a warning if neither case applies; but in
-				 * any case we had better copy it.
+				 * there.  Give a warning if neither case applies; but in any
+				 * case we had better copy it.
 				 */
 				if (!is_system_catalog &&
 					!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data)))
@@ -1139,7 +1139,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
 				 NameStr(relform2->relname), r2);
 
 		/*
-		 * Send replacement mappings to relmapper.  Note these won't actually
+		 * Send replacement mappings to relmapper.	Note these won't actually
 		 * take effect until CommandCounterIncrement.
 		 */
 		RelationMapUpdateMap(r1, relfilenode2, relform1->relisshared, false);
@@ -1151,10 +1151,10 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
 
 	/*
 	 * In the case of a shared catalog, these next few steps will only affect
-	 * our own database's pg_class row; but that's okay, because they are
-	 * all noncritical updates.  That's also an important fact for the case
-	 * of a mapped catalog, because it's possible that we'll commit the map
-	 * change and then fail to commit the pg_class update.
+	 * our own database's pg_class row; but that's okay, because they are all
+	 * noncritical updates.  That's also an important fact for the case of a
+	 * mapped catalog, because it's possible that we'll commit the map change
+	 * and then fail to commit the pg_class update.
 	 */
 
 	/* set rel1's frozen Xid */
@@ -1181,10 +1181,10 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
 	/*
 	 * Update the tuples in pg_class --- unless the target relation of the
 	 * swap is pg_class itself.  In that case, there is zero point in making
-	 * changes because we'd be updating the old data that we're about to
-	 * throw away.  Because the real work being done here for a mapped relation
-	 * is just to change the relation map settings, it's all right to not
-	 * update the pg_class rows in this case.
+	 * changes because we'd be updating the old data that we're about to throw
+	 * away.  Because the real work being done here for a mapped relation is
+	 * just to change the relation map settings, it's all right to not update
+	 * the pg_class rows in this case.
 	 */
 	if (!target_is_pg_class)
 	{
@@ -1248,8 +1248,8 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
 			/*
 			 * We disallow this case for system catalogs, to avoid the
 			 * possibility that the catalog we're rebuilding is one of the
-			 * ones the dependency changes would change.  It's too late
-			 * to be making any data changes to the target catalog.
+			 * ones the dependency changes would change.  It's too late to be
+			 * making any data changes to the target catalog.
 			 */
 			if (IsSystemClass(relform1))
 				elog(ERROR, "cannot swap toast files by links for system catalogs");
@@ -1302,12 +1302,12 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
 	 */
 	if (swap_toast_by_content &&
 		relform1->reltoastidxid && relform2->reltoastidxid)
-			swap_relation_files(relform1->reltoastidxid,
-								relform2->reltoastidxid,
-								target_is_pg_class,
-								swap_toast_by_content,
-								InvalidTransactionId,
-								mapped_tables);
+		swap_relation_files(relform1->reltoastidxid,
+							relform2->reltoastidxid,
+							target_is_pg_class,
+							swap_toast_by_content,
+							InvalidTransactionId,
+							mapped_tables);
 
 	/* Clean up. */
 	heap_freetuple(reltup1);
@@ -1327,7 +1327,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
 	 * non-transient relation.)
 	 *
 	 * Caution: the placement of this step interacts with the decision to
-	 * handle toast rels by recursion.  When we are trying to rebuild pg_class
+	 * handle toast rels by recursion.	When we are trying to rebuild pg_class
 	 * itself, the smgr close on pg_class must happen after all accesses in
 	 * this function.
 	 */
@@ -1369,12 +1369,12 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
 
 	/*
 	 * Rebuild each index on the relation (but not the toast table, which is
-	 * all-new at this point).  It is important to do this before the DROP
+	 * all-new at this point).	It is important to do this before the DROP
 	 * step because if we are processing a system catalog that will be used
-	 * during DROP, we want to have its indexes available.  There is no
+	 * during DROP, we want to have its indexes available.	There is no
 	 * advantage to the other order anyway because this is all transactional,
-	 * so no chance to reclaim disk space before commit.  We do not need
-	 * a final CommandCounterIncrement() because reindex_relation does it.
+	 * so no chance to reclaim disk space before commit.  We do not need a
+	 * final CommandCounterIncrement() because reindex_relation does it.
 	 */
 	reindex_relation(OIDOldHeap, false, true);
 
@@ -1393,9 +1393,9 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
 
 	/*
 	 * Now we must remove any relation mapping entries that we set up for the
-	 * transient table, as well as its toast table and toast index if any.
-	 * If we fail to do this before commit, the relmapper will complain about
-	 * new permanent map entries being added post-bootstrap.
+	 * transient table, as well as its toast table and toast index if any. If
+	 * we fail to do this before commit, the relmapper will complain about new
+	 * permanent map entries being added post-bootstrap.
 	 */
 	for (i = 0; OidIsValid(mapped_tables[i]); i++)
 		RelationMapRemoveMapping(mapped_tables[i]);
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index 6577af4969bdf37803b3efc6f89d973e9d8d963c..2cf8aff6aec1116ca1449e27c61b638864db1310 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
  * Copyright (c) 1996-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.113 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.114 2010/02/26 02:00:38 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -498,11 +498,11 @@ GetComment(Oid oid, Oid classoid, int32 subid)
 	sd = systable_beginscan(description, DescriptionObjIndexId, true,
 							SnapshotNow, 3, skey);
 
-	comment  = NULL;
+	comment = NULL;
 	while ((tuple = systable_getnext(sd)) != NULL)
 	{
-		Datum	value;
-		bool	isnull;
+		Datum		value;
+		bool		isnull;
 
 		/* Found the tuple, get description field */
 		value = heap_getattr(tuple, Anum_pg_description_description, tupdesc, &isnull);
@@ -631,9 +631,8 @@ CommentAttribute(List *qualname, char *comment)
 	 * Allow comments only on columns of tables, views, and composite types
 	 * (which are the only relkinds for which pg_dump will dump per-column
 	 * comments).  In particular we wish to disallow comments on index
-	 * columns, because the naming of an index's columns may change across
-	 * PG versions, so dumping per-column comments could create reload
-	 * failures.
+	 * columns, because the naming of an index's columns may change across PG
+	 * versions, so dumping per-column comments could create reload failures.
 	 */
 	if (relation->rd_rel->relkind != RELKIND_RELATION &&
 		relation->rd_rel->relkind != RELKIND_VIEW &&
@@ -903,7 +902,7 @@ CommentRule(List *qualname, char *comment)
 
 		/* Find the rule's pg_rewrite tuple, get its OID */
 		tuple = SearchSysCache2(RULERELNAME,
-						 		ObjectIdGetDatum(reloid),
+								ObjectIdGetDatum(reloid),
 								PointerGetDatum(rulename));
 		if (!HeapTupleIsValid(tuple))
 			ereport(ERROR,
@@ -1358,7 +1357,7 @@ CommentOpFamily(List *qualname, List *arguments, char *comment)
 		namespaceId = LookupExplicitNamespace(schemaname);
 		tuple = SearchSysCache3(OPFAMILYAMNAMENSP,
 								ObjectIdGetDatum(amID),
-						 		PointerGetDatum(opfname),
+								PointerGetDatum(opfname),
 								ObjectIdGetDatum(namespaceId));
 	}
 	else
@@ -1448,9 +1447,8 @@ CommentLargeObject(List *qualname, char *comment)
 	/*
 	 * Call CreateComments() to create/drop the comments
 	 *
-	 * See the comment in the inv_create() which describes
-	 * the reason why LargeObjectRelationId is used instead
-	 * of LargeObjectMetadataRelationId.
+	 * See the comment in the inv_create() which describes the reason why
+	 * LargeObjectRelationId is used instead of LargeObjectMetadataRelationId.
 	 */
 	CreateComments(loid, LargeObjectRelationId, 0, comment);
 }
diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c
index 389a7df482be3d56efb36901825ae92637afa53a..5f18cf7f2a84886f64b5ddb07c148d56a6626f67 100644
--- a/src/backend/commands/constraint.c
+++ b/src/backend/commands/constraint.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/constraint.c,v 1.3 2010/01/02 16:57:37 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/constraint.c,v 1.4 2010/02/26 02:00:38 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -49,9 +49,9 @@ unique_key_recheck(PG_FUNCTION_ARGS)
 	bool		isnull[INDEX_MAX_KEYS];
 
 	/*
-	 * Make sure this is being called as an AFTER ROW trigger.  Note:
-	 * translatable error strings are shared with ri_triggers.c, so
-	 * resist the temptation to fold the function name into them.
+	 * Make sure this is being called as an AFTER ROW trigger.	Note:
+	 * translatable error strings are shared with ri_triggers.c, so resist the
+	 * temptation to fold the function name into them.
 	 */
 	if (!CALLED_AS_TRIGGER(fcinfo))
 		ereport(ERROR,
@@ -86,7 +86,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
 	 * If the new_row is now dead (ie, inserted and then deleted within our
 	 * transaction), we can skip the check.  However, we have to be careful,
 	 * because this trigger gets queued only in response to index insertions;
-	 * which means it does not get queued for HOT updates.  The row we are
+	 * which means it does not get queued for HOT updates.	The row we are
 	 * called for might now be dead, but have a live HOT child, in which case
 	 * we still need to make the check.  Therefore we have to use
 	 * heap_hot_search, not just HeapTupleSatisfiesVisibility as is done in
@@ -109,9 +109,9 @@ unique_key_recheck(PG_FUNCTION_ARGS)
 	}
 
 	/*
-	 * Open the index, acquiring a RowExclusiveLock, just as if we were
-	 * going to update it.  (This protects against possible changes of the
-	 * index schema, not against concurrent updates.)
+	 * Open the index, acquiring a RowExclusiveLock, just as if we were going
+	 * to update it.  (This protects against possible changes of the index
+	 * schema, not against concurrent updates.)
 	 */
 	indexRel = index_open(trigdata->tg_trigger->tgconstrindid,
 						  RowExclusiveLock);
@@ -125,9 +125,9 @@ unique_key_recheck(PG_FUNCTION_ARGS)
 	ExecStoreTuple(new_row, slot, InvalidBuffer, false);
 
 	/*
-	 * Typically the index won't have expressions, but if it does we need
-	 * an EState to evaluate them.  We need it for exclusion constraints
-	 * too, even if they are just on simple columns.
+	 * Typically the index won't have expressions, but if it does we need an
+	 * EState to evaluate them.  We need it for exclusion constraints too,
+	 * even if they are just on simple columns.
 	 */
 	if (indexInfo->ii_Expressions != NIL ||
 		indexInfo->ii_ExclusionOps != NULL)
@@ -140,13 +140,13 @@ unique_key_recheck(PG_FUNCTION_ARGS)
 		estate = NULL;
 
 	/*
-	 * Form the index values and isnull flags for the index entry that
-	 * we need to check.
+	 * Form the index values and isnull flags for the index entry that we need
+	 * to check.
 	 *
-	 * Note: if the index uses functions that are not as immutable as they
-	 * are supposed to be, this could produce an index tuple different from
-	 * the original.  The index AM can catch such errors by verifying that
-	 * it finds a matching index entry with the tuple's TID.  For exclusion
+	 * Note: if the index uses functions that are not as immutable as they are
+	 * supposed to be, this could produce an index tuple different from the
+	 * original.  The index AM can catch such errors by verifying that it
+	 * finds a matching index entry with the tuple's TID.  For exclusion
 	 * constraints we check this in check_exclusion_constraint().
 	 */
 	FormIndexDatum(indexInfo, slot, estate, values, isnull);
@@ -166,8 +166,8 @@ unique_key_recheck(PG_FUNCTION_ARGS)
 	else
 	{
 		/*
-		 * For exclusion constraints we just do the normal check, but now
-		 * it's okay to throw error.
+		 * For exclusion constraints we just do the normal check, but now it's
+		 * okay to throw error.
 		 */
 		check_exclusion_constraint(trigdata->tg_relation, indexRel, indexInfo,
 								   &(new_row->t_self), values, isnull,
@@ -175,8 +175,8 @@ unique_key_recheck(PG_FUNCTION_ARGS)
 	}
 
 	/*
-	 * If that worked, then this index entry is unique or non-excluded,
-	 * and we are done.
+	 * If that worked, then this index entry is unique or non-excluded, and we
+	 * are done.
 	 */
 	if (estate != NULL)
 		FreeExecutorState(estate);
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index fbcc4afb968f897b02f585c59a42b929df6661d1..9031cd1fa5113afd7002406c58bd0cc7b6042429 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.325 2010/02/20 21:24:02 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.326 2010/02/26 02:00:38 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -743,7 +743,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
 
 		if (strcmp(defel->defname, "format") == 0)
 		{
-			char   *fmt = defGetString(defel);
+			char	   *fmt = defGetString(defel);
 
 			if (format_specified)
 				ereport(ERROR,
@@ -751,7 +751,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
 						 errmsg("conflicting or redundant options")));
 			format_specified = true;
 			if (strcmp(fmt, "text") == 0)
-				/* default format */ ;
+				 /* default format */ ;
 			else if (strcmp(fmt, "csv") == 0)
 				cstate->csv_mode = true;
 			else if (strcmp(fmt, "binary") == 0)
@@ -821,9 +821,9 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
 				force_quote = (List *) defel->arg;
 			else
 				ereport(ERROR,
-					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("argument to option \"%s\" must be a list of column names",
-							defel->defname)));
+						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+						 errmsg("argument to option \"%s\" must be a list of column names",
+								defel->defname)));
 		}
 		else if (strcmp(defel->defname, "force_not_null") == 0)
 		{
@@ -835,9 +835,9 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
 				force_notnull = (List *) defel->arg;
 			else
 				ereport(ERROR,
-					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("argument to option \"%s\" must be a list of column names",
-							defel->defname)));
+						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+						 errmsg("argument to option \"%s\" must be a list of column names",
+								defel->defname)));
 		}
 		else
 			ereport(ERROR,
@@ -1113,7 +1113,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
 	cstate->force_quote_flags = (bool *) palloc0(num_phys_attrs * sizeof(bool));
 	if (force_quote_all)
 	{
-		int		i;
+		int			i;
 
 		for (i = 0; i < num_phys_attrs; i++)
 			cstate->force_quote_flags[i] = true;
@@ -2150,7 +2150,7 @@ CopyFrom(CopyState cstate)
 
 		if (!skip_tuple)
 		{
-			List *recheckIndexes = NIL;
+			List	   *recheckIndexes = NIL;
 
 			/* Place tuple in tuple slot */
 			ExecStoreTuple(tuple, slot, InvalidBuffer, false);
@@ -2224,7 +2224,8 @@ CopyFrom(CopyState cstate)
 	 */
 	if (hi_options & HEAP_INSERT_SKIP_WAL)
 	{
-		char reason[NAMEDATALEN + 30];
+		char		reason[NAMEDATALEN + 30];
+
 		snprintf(reason, sizeof(reason), "COPY FROM on \"%s\"",
 				 RelationGetRelationName(cstate->rel));
 		XLogReportUnloggedStatement(reason);
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 9c6f1b6936a5af64b95f9e3c1c55a10c6bd2d93d..4b7131b7097aa562a2890331e35a0529cb59225c 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -13,7 +13,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.234 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.235 2010/02/26 02:00:38 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -338,11 +338,11 @@ createdb(const CreatedbStmt *stmt)
 	 * fails when presented with data in an encoding it's not expecting. We
 	 * allow mismatch in four cases:
 	 *
-	 * 1. locale encoding = SQL_ASCII, which means that the locale is
-	 * C/POSIX which works with any encoding.
+	 * 1. locale encoding = SQL_ASCII, which means that the locale is C/POSIX
+	 * which works with any encoding.
 	 *
-	 * 2. locale encoding = -1, which means that we couldn't determine
-	 * the locale's encoding and have to trust the user to get it right.
+	 * 2. locale encoding = -1, which means that we couldn't determine the
+	 * locale's encoding and have to trust the user to get it right.
 	 *
 	 * 3. selected encoding is UTF8 and platform is win32. This is because
 	 * UTF8 is a pseudo codepage that is supported in all locales since it's
@@ -551,7 +551,7 @@ createdb(const CreatedbStmt *stmt)
 
 	/*
 	 * We deliberately set datacl to default (NULL), rather than copying it
-	 * from the template database.  Copying it would be a bad idea when the
+	 * from the template database.	Copying it would be a bad idea when the
 	 * owner is not the same as the template's owner.
 	 */
 	new_record_nulls[Anum_pg_database_datacl - 1] = true;
@@ -871,9 +871,9 @@ dropdb(const char *dbname, bool missing_ok)
 	heap_close(pgdbrel, NoLock);
 
 	/*
-	 * Force synchronous commit, thus minimizing the window between removal
-	 * of the database files and commital of the transaction. If we crash
-	 * before committing, we'll have a DB that's gone on disk but still there
+	 * Force synchronous commit, thus minimizing the window between removal of
+	 * the database files and commital of the transaction. If we crash before
+	 * committing, we'll have a DB that's gone on disk but still there
 	 * according to pg_database, which is not good.
 	 */
 	ForceSyncCommit();
@@ -1402,13 +1402,13 @@ AlterDatabase(AlterDatabaseStmt *stmt, bool isTopLevel)
 void
 AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
 {
-	Oid		datid = get_database_oid(stmt->dbname);
+	Oid			datid = get_database_oid(stmt->dbname);
 
 	if (!OidIsValid(datid))
-  		ereport(ERROR,
-  				(errcode(ERRCODE_UNDEFINED_DATABASE),
-  				 errmsg("database \"%s\" does not exist", stmt->dbname)));
-  
+		ereport(ERROR,
+				(errcode(ERRCODE_UNDEFINED_DATABASE),
+				 errmsg("database \"%s\" does not exist", stmt->dbname)));
+
 	/*
 	 * Obtain a lock on the database and make sure it didn't go away in the
 	 * meantime.
@@ -1416,11 +1416,11 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
 	shdepLockAndCheckObject(DatabaseRelationId, datid);
 
 	if (!pg_database_ownercheck(datid, GetUserId()))
-  		aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
-  					   stmt->dbname);
+		aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
+					   stmt->dbname);
 
 	AlterSetting(datid, InvalidOid, stmt->setstmt);
-  
+
 	UnlockSharedObject(DatabaseRelationId, datid, 0, AccessShareLock);
 }
 
@@ -1936,9 +1936,10 @@ dbase_redo(XLogRecPtr lsn, XLogRecord *record)
 		if (InHotStandby)
 		{
 			/*
-			 * Lock database while we resolve conflicts to ensure that InitPostgres()
-			 * cannot fully re-execute concurrently. This avoids backends re-connecting
-			 * automatically to same database, which can happen in some cases.
+			 * Lock database while we resolve conflicts to ensure that
+			 * InitPostgres() cannot fully re-execute concurrently. This
+			 * avoids backends re-connecting automatically to same database,
+			 * which can happen in some cases.
 			 */
 			LockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock);
 			ResolveRecoveryConflictWithDatabase(xlrec->db_id);
@@ -1962,10 +1963,11 @@ dbase_redo(XLogRecPtr lsn, XLogRecord *record)
 		if (InHotStandby)
 		{
 			/*
-			 * Release locks prior to commit. XXX There is a race condition here that may allow
-			 * backends to reconnect, but the window for this is small because the gap between
-			 * here and commit is mostly fairly small and it is unlikely that people will be
-			 * dropping databases that we are trying to connect to anyway.
+			 * Release locks prior to commit. XXX There is a race condition
+			 * here that may allow backends to reconnect, but the window for
+			 * this is small because the gap between here and commit is mostly
+			 * fairly small and it is unlikely that people will be dropping
+			 * databases that we are trying to connect to anyway.
 			 */
 			UnlockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock);
 		}
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index a3d45872b4393e6a4eb7470749e2fd7bdd0cbad6..cf029cd539a95d3aa3b1e918c87beed3e2aec6aa 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.107 2010/01/02 16:57:37 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.108 2010/02/26 02:00:38 momjian Exp $
  *
  * DESCRIPTION
  *	  The "DefineFoo" routines take the parse tree and pick out the
@@ -156,8 +156,8 @@ defGetBoolean(DefElem *def)
 				char	   *sval = defGetString(def);
 
 				/*
-				 * The set of strings accepted here should match up with
-				 * the grammar's opt_boolean production.
+				 * The set of strings accepted here should match up with the
+				 * grammar's opt_boolean production.
 				 */
 				if (pg_strcasecmp(sval, "true") == 0)
 					return true;
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 9100c404ad71aefc119e08a5c7a2734bebd741d5..2409a01e2dd5f9b8439d2264aa90f3718e2ef6f8 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994-5, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.203 2010/02/16 22:19:59 adunstan Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.204 2010/02/26 02:00:39 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -55,9 +55,9 @@ static void report_triggers(ResultRelInfo *rInfo, bool show_relname,
 				ExplainState *es);
 static double elapsed_time(instr_time *starttime);
 static void ExplainNode(Plan *plan, PlanState *planstate,
-				Plan *outer_plan,
-				const char *relationship, const char *plan_name,
-				ExplainState *es);
+			Plan *outer_plan,
+			const char *relationship, const char *plan_name,
+			ExplainState *es);
 static void show_plan_tlist(Plan *plan, ExplainState *es);
 static void show_qual(List *qual, const char *qlabel, Plan *plan,
 		  Plan *outer_plan, bool useprefix, ExplainState *es);
@@ -74,25 +74,26 @@ static void ExplainScanTarget(Scan *plan, ExplainState *es);
 static void ExplainMemberNodes(List *plans, PlanState **planstate,
 				   Plan *outer_plan, ExplainState *es);
 static void ExplainSubPlans(List *plans, const char *relationship,
-							ExplainState *es);
+				ExplainState *es);
 static void ExplainPropertyList(const char *qlabel, List *data,
-								ExplainState *es);
+					ExplainState *es);
 static void ExplainProperty(const char *qlabel, const char *value,
-							bool numeric, ExplainState *es);
-#define ExplainPropertyText(qlabel, value, es)  \
+				bool numeric, ExplainState *es);
+
+#define ExplainPropertyText(qlabel, value, es)	\
 	ExplainProperty(qlabel, value, false, es)
 static void ExplainPropertyInteger(const char *qlabel, int value,
-								   ExplainState *es);
+					   ExplainState *es);
 static void ExplainPropertyLong(const char *qlabel, long value,
-								ExplainState *es);
+					ExplainState *es);
 static void ExplainPropertyFloat(const char *qlabel, double value, int ndigits,
-								 ExplainState *es);
+					 ExplainState *es);
 static void ExplainOpenGroup(const char *objtype, const char *labelname,
 				 bool labeled, ExplainState *es);
 static void ExplainCloseGroup(const char *objtype, const char *labelname,
-				 bool labeled, ExplainState *es);
+				  bool labeled, ExplainState *es);
 static void ExplainDummyGroup(const char *objtype, const char *labelname,
-							  ExplainState *es);
+				  ExplainState *es);
 static void ExplainXMLTag(const char *tagname, int flags, ExplainState *es);
 static void ExplainJSONLineEnding(ExplainState *es);
 static void ExplainYAMLLineStarting(ExplainState *es);
@@ -120,7 +121,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
 	/* Parse options list. */
 	foreach(lc, stmt->options)
 	{
-		DefElem *opt = (DefElem *) lfirst(lc);
+		DefElem    *opt = (DefElem *) lfirst(lc);
 
 		if (strcmp(opt->defname, "analyze") == 0)
 			es.analyze = defGetBoolean(opt);
@@ -132,7 +133,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
 			es.buffers = defGetBoolean(opt);
 		else if (strcmp(opt->defname, "format") == 0)
 		{
-			char   *p = defGetString(opt);
+			char	   *p = defGetString(opt);
 
 			if (strcmp(p, "text") == 0)
 				es.format = EXPLAIN_FORMAT_TEXT;
@@ -144,9 +145,9 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
 				es.format = EXPLAIN_FORMAT_YAML;
 			else
 				ereport(ERROR,
-					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("unrecognized value for EXPLAIN option \"%s\": \"%s\"",
-							opt->defname, p)));
+						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				errmsg("unrecognized value for EXPLAIN option \"%s\": \"%s\"",
+					   opt->defname, p)));
 		}
 		else
 			ereport(ERROR,
@@ -157,14 +158,14 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
 
 	if (es.buffers && !es.analyze)
 		ereport(ERROR,
-			(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-			 errmsg("EXPLAIN option BUFFERS requires ANALYZE")));
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("EXPLAIN option BUFFERS requires ANALYZE")));
 
 	/*
 	 * Parse analysis was done already, but we still have to run the rule
-	 * rewriter.  We do not do AcquireRewriteLocks: we assume the query
-	 * either came straight from the parser, or suitable locks were
-	 * acquired by plancache.c.
+	 * rewriter.  We do not do AcquireRewriteLocks: we assume the query either
+	 * came straight from the parser, or suitable locks were acquired by
+	 * plancache.c.
 	 *
 	 * Because the rewriter and planner tend to scribble on the input, we make
 	 * a preliminary copy of the source querytree.	This prevents problems in
@@ -244,11 +245,11 @@ ExplainResultDesc(ExplainStmt *stmt)
 	/* Check for XML format option */
 	foreach(lc, stmt->options)
 	{
-		DefElem *opt = (DefElem *) lfirst(lc);
+		DefElem    *opt = (DefElem *) lfirst(lc);
 
 		if (strcmp(opt->defname, "format") == 0)
 		{
-			char   *p = defGetString(opt);
+			char	   *p = defGetString(opt);
 
 			xml = (strcmp(p, "xml") == 0);
 			/* don't "break", as ExplainQuery will use the last value */
@@ -322,7 +323,7 @@ ExplainOneUtility(Node *utilityStmt, ExplainState *es,
 	{
 		if (es->format == EXPLAIN_FORMAT_TEXT)
 			appendStringInfoString(es->str,
-							   "Utility statements have no plan structure\n");
+							  "Utility statements have no plan structure\n");
 		else
 			ExplainDummyGroup("Utility Statement", NULL, es);
 	}
@@ -472,7 +473,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, ExplainState *es,
  *	  convert a QueryDesc's plan tree to text and append it to es->str
  *
  * The caller should have set up the options fields of *es, as well as
- * initializing the output buffer es->str.  Other fields in *es are
+ * initializing the output buffer es->str.	Other fields in *es are
  * initialized here.
  *
  * NB: will not work on utility statements
@@ -489,10 +490,10 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
 
 /*
  * ExplainQueryText -
- *    add a "Query Text" node that contains the actual text of the query
- * 
+ *	  add a "Query Text" node that contains the actual text of the query
+ *
  * The caller should have set up the options fields of *es, as well as
- * initializing the output buffer es->str. 
+ * initializing the output buffer es->str.
  *
  */
 void
@@ -538,8 +539,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, ExplainState *es)
 
 		/*
 		 * In text format, we avoid printing both the trigger name and the
-		 * constraint name unless VERBOSE is specified.  In non-text
-		 * formats we just print everything.
+		 * constraint name unless VERBOSE is specified.  In non-text formats
+		 * we just print everything.
 		 */
 		if (es->format == EXPLAIN_FORMAT_TEXT)
 		{
@@ -657,11 +658,11 @@ ExplainNode(Plan *plan, PlanState *planstate,
 			pname = sname = "Nested Loop";
 			break;
 		case T_MergeJoin:
-			pname = "Merge";		/* "Join" gets added by jointype switch */
+			pname = "Merge";	/* "Join" gets added by jointype switch */
 			sname = "Merge Join";
 			break;
 		case T_HashJoin:
-			pname = "Hash";			/* "Join" gets added by jointype switch */
+			pname = "Hash";		/* "Join" gets added by jointype switch */
 			sname = "Hash Join";
 			break;
 		case T_SeqScan:
@@ -801,9 +802,9 @@ ExplainNode(Plan *plan, PlanState *planstate,
 	{
 		case T_IndexScan:
 			{
-				IndexScan *indexscan = (IndexScan *) plan;
+				IndexScan  *indexscan = (IndexScan *) plan;
 				const char *indexname =
-					explain_get_index_name(indexscan->indexid);
+				explain_get_index_name(indexscan->indexid);
 
 				if (es->format == EXPLAIN_FORMAT_TEXT)
 				{
@@ -849,7 +850,7 @@ ExplainNode(Plan *plan, PlanState *planstate,
 			{
 				BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan;
 				const char *indexname =
-					explain_get_index_name(bitmapindexscan->indexid);
+				explain_get_index_name(bitmapindexscan->indexid);
 
 				if (es->format == EXPLAIN_FORMAT_TEXT)
 					appendStringInfo(es->str, " on %s", indexname);
@@ -1084,14 +1085,14 @@ ExplainNode(Plan *plan, PlanState *planstate,
 
 		if (es->format == EXPLAIN_FORMAT_TEXT)
 		{
-			bool	has_shared = (usage->shared_blks_hit > 0 ||
-								  usage->shared_blks_read > 0 ||
-								  usage->shared_blks_written);
-			bool	has_local = (usage->local_blks_hit > 0 ||
-								 usage->local_blks_read > 0 ||
-								 usage->local_blks_written);
-			bool	has_temp = (usage->temp_blks_read > 0 ||
-								usage->temp_blks_written);
+			bool		has_shared = (usage->shared_blks_hit > 0 ||
+									  usage->shared_blks_read > 0 ||
+									  usage->shared_blks_written);
+			bool		has_local = (usage->local_blks_hit > 0 ||
+									 usage->local_blks_read > 0 ||
+									 usage->local_blks_written);
+			bool		has_temp = (usage->temp_blks_read > 0 ||
+									usage->temp_blks_written);
 
 			/* Show only positive counter values. */
 			if (has_shared || has_local || has_temp)
@@ -1104,13 +1105,13 @@ ExplainNode(Plan *plan, PlanState *planstate,
 					appendStringInfoString(es->str, " shared");
 					if (usage->shared_blks_hit > 0)
 						appendStringInfo(es->str, " hit=%ld",
-							usage->shared_blks_hit);
+										 usage->shared_blks_hit);
 					if (usage->shared_blks_read > 0)
 						appendStringInfo(es->str, " read=%ld",
-							usage->shared_blks_read);
+										 usage->shared_blks_read);
 					if (usage->shared_blks_written > 0)
 						appendStringInfo(es->str, " written=%ld",
-							usage->shared_blks_written);
+										 usage->shared_blks_written);
 					if (has_local || has_temp)
 						appendStringInfoChar(es->str, ',');
 				}
@@ -1119,13 +1120,13 @@ ExplainNode(Plan *plan, PlanState *planstate,
 					appendStringInfoString(es->str, " local");
 					if (usage->local_blks_hit > 0)
 						appendStringInfo(es->str, " hit=%ld",
-							usage->local_blks_hit);
+										 usage->local_blks_hit);
 					if (usage->local_blks_read > 0)
 						appendStringInfo(es->str, " read=%ld",
-							usage->local_blks_read);
+										 usage->local_blks_read);
 					if (usage->local_blks_written > 0)
 						appendStringInfo(es->str, " written=%ld",
-							usage->local_blks_written);
+										 usage->local_blks_written);
 					if (has_temp)
 						appendStringInfoChar(es->str, ',');
 				}
@@ -1134,10 +1135,10 @@ ExplainNode(Plan *plan, PlanState *planstate,
 					appendStringInfoString(es->str, " temp");
 					if (usage->temp_blks_read > 0)
 						appendStringInfo(es->str, " read=%ld",
-							usage->temp_blks_read);
+										 usage->temp_blks_read);
 					if (usage->temp_blks_written > 0)
 						appendStringInfo(es->str, " written=%ld",
-							usage->temp_blks_written);
+										 usage->temp_blks_written);
 				}
 				appendStringInfoChar(es->str, '\n');
 			}
@@ -1283,7 +1284,7 @@ show_plan_tlist(Plan *plan, ExplainState *es)
 		TargetEntry *tle = (TargetEntry *) lfirst(lc);
 
 		result = lappend(result,
-					     deparse_expression((Node *) tle->expr, context,
+						 deparse_expression((Node *) tle->expr, context,
 											useprefix, false));
 	}
 
@@ -1403,7 +1404,7 @@ show_sort_info(SortState *sortstate, ExplainState *es)
 	if (es->analyze && sortstate->sort_Done &&
 		sortstate->tuplesortstate != NULL)
 	{
-		Tuplesortstate	*state = (Tuplesortstate *) sortstate->tuplesortstate;
+		Tuplesortstate *state = (Tuplesortstate *) sortstate->tuplesortstate;
 		const char *sortMethod;
 		const char *spaceType;
 		long		spaceUsed;
@@ -1438,7 +1439,8 @@ show_hash_info(HashState *hashstate, ExplainState *es)
 
 	if (hashtable)
 	{
-		long spacePeakKb = (hashtable->spacePeak + 1023) / 1024;
+		long		spacePeakKb = (hashtable->spacePeak + 1023) / 1024;
+
 		if (es->format != EXPLAIN_FORMAT_TEXT)
 		{
 			ExplainPropertyLong("Hash Buckets", hashtable->nbuckets, es);
@@ -1451,7 +1453,7 @@ show_hash_info(HashState *hashstate, ExplainState *es)
 		{
 			appendStringInfoSpaces(es->str, es->indent * 2);
 			appendStringInfo(es->str,
-							 "Buckets: %d  Batches: %d (originally %d)  Memory Usage: %ldkB\n",
+			"Buckets: %d  Batches: %d (originally %d)  Memory Usage: %ldkB\n",
 							 hashtable->nbuckets, hashtable->nbatch,
 							 hashtable->nbatch_original, spacePeakKb);
 		}
@@ -1459,7 +1461,7 @@ show_hash_info(HashState *hashstate, ExplainState *es)
 		{
 			appendStringInfoSpaces(es->str, es->indent * 2);
 			appendStringInfo(es->str,
-							 "Buckets: %d  Batches: %d  Memory Usage: %ldkB\n",
+						   "Buckets: %d  Batches: %d  Memory Usage: %ldkB\n",
 							 hashtable->nbuckets, hashtable->nbatch,
 							 spacePeakKb);
 		}
@@ -1600,7 +1602,7 @@ ExplainScanTarget(Scan *plan, ExplainState *es)
  */
 static void
 ExplainMemberNodes(List *plans, PlanState **planstate, Plan *outer_plan,
-		           ExplainState *es)
+				   ExplainState *es)
 {
 	ListCell   *lst;
 	int			j = 0;
@@ -1667,7 +1669,7 @@ ExplainPropertyList(const char *qlabel, List *data, ExplainState *es)
 			ExplainXMLTag(qlabel, X_OPENING, es);
 			foreach(lc, data)
 			{
-				char   *str;
+				char	   *str;
 
 				appendStringInfoSpaces(es->str, es->indent * 2 + 2);
 				appendStringInfoString(es->str, "<Item>");
@@ -1731,7 +1733,7 @@ ExplainProperty(const char *qlabel, const char *value, bool numeric,
 
 		case EXPLAIN_FORMAT_XML:
 			{
-				char   *str;
+				char	   *str;
 
 				appendStringInfoSpaces(es->str, es->indent * 2);
 				ExplainXMLTag(qlabel, X_OPENING | X_NOWHITESPACE, es);
@@ -1768,7 +1770,7 @@ ExplainProperty(const char *qlabel, const char *value, bool numeric,
 static void
 ExplainPropertyInteger(const char *qlabel, int value, ExplainState *es)
 {
-	char	buf[32];
+	char		buf[32];
 
 	snprintf(buf, sizeof(buf), "%d", value);
 	ExplainProperty(qlabel, buf, true, es);
@@ -1780,7 +1782,7 @@ ExplainPropertyInteger(const char *qlabel, int value, ExplainState *es)
 static void
 ExplainPropertyLong(const char *qlabel, long value, ExplainState *es)
 {
-	char	buf[32];
+	char		buf[32];
 
 	snprintf(buf, sizeof(buf), "%ld", value);
 	ExplainProperty(qlabel, buf, true, es);
@@ -1794,7 +1796,7 @@ static void
 ExplainPropertyFloat(const char *qlabel, double value, int ndigits,
 					 ExplainState *es)
 {
-	char	buf[256];
+	char		buf[256];
 
 	snprintf(buf, sizeof(buf), "%.*f", ndigits, value);
 	ExplainProperty(qlabel, buf, true, es);
@@ -1837,8 +1839,8 @@ ExplainOpenGroup(const char *objtype, const char *labelname,
 			/*
 			 * In JSON format, the grouping_stack is an integer list.  0 means
 			 * we've emitted nothing at this grouping level, 1 means we've
-			 * emitted something (and so the next item needs a comma).
-			 * See ExplainJSONLineEnding().
+			 * emitted something (and so the next item needs a comma). See
+			 * ExplainJSONLineEnding().
 			 */
 			es->grouping_stack = lcons_int(0, es->grouping_stack);
 			es->indent++;
@@ -1966,7 +1968,7 @@ ExplainBeginOutput(ExplainState *es)
 
 		case EXPLAIN_FORMAT_XML:
 			appendStringInfoString(es->str,
-								   "<explain xmlns=\"http://www.postgresql.org/2009/explain\">\n");
+			 "<explain xmlns=\"http://www.postgresql.org/2009/explain\">\n");
 			es->indent++;
 			break;
 
@@ -2065,7 +2067,7 @@ ExplainXMLTag(const char *tagname, int flags, ExplainState *es)
 /*
  * Emit a JSON line ending.
  *
- * JSON requires a comma after each property but the last.  To facilitate this,
+ * JSON requires a comma after each property but the last.	To facilitate this,
  * in JSON format, the text emitted for each property begins just prior to the
  * preceding line-break (and comma, if applicable).
  */
@@ -2086,7 +2088,7 @@ ExplainJSONLineEnding(ExplainState *es)
  * YAML lines are ordinarily indented by two spaces per indentation level.
  * The text emitted for each property begins just prior to the preceding
  * line-break, except for the first property in an unlabelled group, for which
- * it begins immediately after the "- " that introduces the group.  The first
+ * it begins immediately after the "- " that introduces the group.	The first
  * property of the group appears on the same line as the opening "- ".
  */
 static void
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 3de1a9b0caa51c51d029ee6c4a99e3da6772debf..26a3a52efc30f30bef5dab32d34107899c1a85d6 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -10,7 +10,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.117 2010/02/17 04:19:39 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.118 2010/02/26 02:00:39 momjian Exp $
  *
  * DESCRIPTION
  *	  These routines take the parse tree and pick out the
@@ -314,8 +314,8 @@ examine_parameter_list(List *parameters, Oid languageOid,
 					strcmp(prevfp->name, fp->name) == 0)
 					ereport(ERROR,
 							(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
-							 errmsg("parameter name \"%s\" used more than once",
-									fp->name)));
+						  errmsg("parameter name \"%s\" used more than once",
+								 fp->name)));
 			}
 
 			paramNames[i] = CStringGetTextDatum(fp->name);
@@ -2019,8 +2019,8 @@ ExecuteDoStmt(DoStmt *stmt)
 	if (!OidIsValid(laninline))
 		ereport(ERROR,
 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("language \"%s\" does not support inline code execution",
-						NameStr(languageStruct->lanname))));
+			 errmsg("language \"%s\" does not support inline code execution",
+					NameStr(languageStruct->lanname))));
 
 	ReleaseSysCache(languageTuple);
 
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index dee14d57f65e734779ab4004ed9c615f471d3c45..380eca12877ced397ab08ed6a2435de7b6323e5b 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.193 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.194 2010/02/26 02:00:39 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -310,8 +310,8 @@ DefineIndex(RangeVar *heapRelation,
 	if (exclusionOpNames != NIL && !OidIsValid(accessMethodForm->amgettuple))
 		ereport(ERROR,
 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("access method \"%s\" does not support exclusion constraints",
-						accessMethodName)));
+		errmsg("access method \"%s\" does not support exclusion constraints",
+			   accessMethodName)));
 
 	amcanorder = accessMethodForm->amcanorder;
 	amoptions = accessMethodForm->amoptions;
@@ -460,7 +460,7 @@ DefineIndex(RangeVar *heapRelation,
 		else
 		{
 			elog(ERROR, "unknown constraint type");
-			constraint_type = NULL;	/* keep compiler quiet */
+			constraint_type = NULL;		/* keep compiler quiet */
 		}
 
 		ereport(NOTICE,
@@ -476,8 +476,8 @@ DefineIndex(RangeVar *heapRelation,
 	heap_close(rel, NoLock);
 
 	/*
-	 * Make the catalog entries for the index, including constraints.
-	 * Then, if not skip_build || concurrent, actually build the index.
+	 * Make the catalog entries for the index, including constraints. Then, if
+	 * not skip_build || concurrent, actually build the index.
 	 */
 	indexRelationId =
 		index_create(relationId, indexRelationName, indexRelationId,
@@ -494,10 +494,10 @@ DefineIndex(RangeVar *heapRelation,
 
 	/*
 	 * For a concurrent build, it's important to make the catalog entries
-	 * visible to other transactions before we start to build the index.
-	 * That will prevent them from making incompatible HOT updates.  The new
-	 * index will be marked not indisready and not indisvalid, so that no one
-	 * else tries to either insert into it or use it for queries.
+	 * visible to other transactions before we start to build the index. That
+	 * will prevent them from making incompatible HOT updates.	The new index
+	 * will be marked not indisready and not indisvalid, so that no one else
+	 * tries to either insert into it or use it for queries.
 	 *
 	 * We must commit our current transaction so that the index becomes
 	 * visible; then start another.  Note that all the data structures we just
@@ -835,7 +835,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
 	/* Allocate space for exclusion operator info, if needed */
 	if (exclusionOpNames)
 	{
-		int		ncols = list_length(attList);
+		int			ncols = list_length(attList);
 
 		Assert(list_length(exclusionOpNames) == ncols);
 		indexInfo->ii_ExclusionOps = (Oid *) palloc(sizeof(Oid) * ncols);
@@ -941,10 +941,10 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
 		 */
 		if (nextExclOp)
 		{
-			List   *opname = (List *) lfirst(nextExclOp);
-			Oid		opid;
-			Oid		opfamily;
-			int		strat;
+			List	   *opname = (List *) lfirst(nextExclOp);
+			Oid			opid;
+			Oid			opfamily;
+			int			strat;
 
 			/*
 			 * Find the operator --- it must accept the column datatype
@@ -971,7 +971,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
 			strat = get_op_opfamily_strategy(opid, opfamily);
 			if (strat == 0)
 			{
-				HeapTuple opftuple;
+				HeapTuple	opftuple;
 				Form_pg_opfamily opfform;
 
 				/*
@@ -1433,7 +1433,7 @@ ChooseIndexNameAddition(List *colnames)
 		const char *name = (const char *) lfirst(lc);
 
 		if (buflen > 0)
-			buf[buflen++] = '_';			/* insert _ between names */
+			buf[buflen++] = '_';	/* insert _ between names */
 
 		/*
 		 * At this point we have buflen <= NAMEDATALEN.  name should be less
@@ -1449,7 +1449,7 @@ ChooseIndexNameAddition(List *colnames)
 
 /*
  * Select the actual names to be used for the columns of an index, given the
- * list of IndexElems for the columns.  This is mostly about ensuring the
+ * list of IndexElems for the columns.	This is mostly about ensuring the
  * names are unique so we don't get a conflicting-attribute-names error.
  *
  * Returns a List of plain strings (char *, not String nodes).
@@ -1470,11 +1470,11 @@ ChooseIndexColumnNames(List *indexElems)
 
 		/* Get the preliminary name from the IndexElem */
 		if (ielem->indexcolname)
-			origname = ielem->indexcolname;	/* caller-specified name */
+			origname = ielem->indexcolname;		/* caller-specified name */
 		else if (ielem->name)
-			origname = ielem->name;			/* simple column reference */
+			origname = ielem->name;		/* simple column reference */
 		else
-			origname = "expr";				/* default name for expression */
+			origname = "expr";	/* default name for expression */
 
 		/* If it conflicts with any previous column, tweak it */
 		curname = origname;
diff --git a/src/backend/commands/lockcmds.c b/src/backend/commands/lockcmds.c
index 31096e0beb666214a7567ed739aaa6d27686f5b7..34d657c0312a1c2900a3e1766703acb2f82abbde 100644
--- a/src/backend/commands/lockcmds.c
+++ b/src/backend/commands/lockcmds.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/lockcmds.c,v 1.28 2010/02/20 21:24:02 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/lockcmds.c,v 1.29 2010/02/26 02:00:39 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -48,11 +48,10 @@ LockTableCommand(LockStmt *lockstmt)
 		reloid = RangeVarGetRelid(relation, false);
 
 		/*
-		 * During recovery we only accept these variations:
-		 *   LOCK TABLE foo IN ACCESS SHARE MODE
-		 *   LOCK TABLE foo IN ROW SHARE MODE
-		 *   LOCK TABLE foo IN ROW EXCLUSIVE MODE
-		 * This test must match the restrictions defined in LockAcquire()
+		 * During recovery we only accept these variations: LOCK TABLE foo IN
+		 * ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo
+		 * IN ROW EXCLUSIVE MODE This test must match the restrictions defined
+		 * in LockAcquire()
 		 */
 		if (lockstmt->mode > RowExclusiveLock)
 			PreventCommandDuringRecovery("LOCK TABLE");
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 34f33670c3eb211b26209f249424735da5288328..8292ae1f77722dba3ef528d179fc904008853823 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.90 2010/02/23 22:51:42 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.91 2010/02/26 02:00:39 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -149,8 +149,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
 		}
 
 		/*
-		 * Likewise for the anonymous block handler, if required;
-		 * but we don't care about its return type.
+		 * Likewise for the anonymous block handler, if required; but we don't
+		 * care about its return type.
 		 */
 		if (pltemplate->tmplinline)
 		{
@@ -161,17 +161,17 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
 			{
 				inlineOid = ProcedureCreate(pltemplate->tmplinline,
 											PG_CATALOG_NAMESPACE,
-											false, /* replace */
-											false, /* returnsSet */
+											false,		/* replace */
+											false,		/* returnsSet */
 											VOIDOID,
 											ClanguageId,
 											F_FMGR_C_VALIDATOR,
 											pltemplate->tmplinline,
 											pltemplate->tmpllibrary,
-											false, /* isAgg */
-											false, /* isWindowFunc */
-											false, /* security_definer */
-											true, /* isStrict */
+											false,		/* isAgg */
+											false,		/* isWindowFunc */
+											false,		/* security_definer */
+											true,		/* isStrict */
 											PROVOLATILE_VOLATILE,
 											buildoidvector(funcargtypes, 1),
 											PointerGetDatum(NULL),
@@ -209,7 +209,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
 										 false, /* isAgg */
 										 false, /* isWindowFunc */
 										 false, /* security_definer */
-										 true, /* isStrict */
+										 true,	/* isStrict */
 										 PROVOLATILE_VOLATILE,
 										 buildoidvector(funcargtypes, 1),
 										 PointerGetDatum(NULL),
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 135749381387777609155dc91990794679533630..b30fdce73d09b5dc9a6c7fe57c6527e3b3b4eb28 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.56 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.57 2010/02/26 02:00:39 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -92,7 +92,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
 	 */
 	if (saved_uid != owner_uid)
 		SetUserIdAndSecContext(owner_uid,
-							   save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
+							save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
 
 	/* Create the schema's namespace */
 	namespaceId = NamespaceCreate(schemaName, owner_uid);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 228304562dadb08bc7f8a7adb457e31313fc9bad..c2360e2d84e5952a81dd02d3edcf02923f22e2c1 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.326 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.327 2010/02/26 02:00:39 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -304,9 +304,9 @@ static void ATAddCheckConstraint(List **wqueue,
 static void ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
 						  Constraint *fkconstraint);
 static void ATExecDropConstraint(Relation rel, const char *constrName,
-								 DropBehavior behavior,
-								 bool recurse, bool recursing,
-								 bool missing_ok);
+					 DropBehavior behavior,
+					 bool recurse, bool recursing,
+					 bool missing_ok);
 static void ATPrepAlterColumnType(List **wqueue,
 					  AlteredTableInfo *tab, Relation rel,
 					  bool recurse, bool recursing,
@@ -974,12 +974,11 @@ ExecuteTruncate(TruncateStmt *stmt)
 		Relation	rel = (Relation) lfirst(cell);
 
 		/*
-		 * Normally, we need a transaction-safe truncation here.  However,
-		 * if the table was either created in the current (sub)transaction
-		 * or has a new relfilenode in the current (sub)transaction, then
-		 * we can just truncate it in-place, because a rollback would
-		 * cause the whole table or the current physical file to be
-		 * thrown away anyway.
+		 * Normally, we need a transaction-safe truncation here.  However, if
+		 * the table was either created in the current (sub)transaction or has
+		 * a new relfilenode in the current (sub)transaction, then we can just
+		 * truncate it in-place, because a rollback would cause the whole
+		 * table or the current physical file to be thrown away anyway.
 		 */
 		if (rel->rd_createSubid == mySubid ||
 			rel->rd_newRelfilenodeSubid == mySubid)
@@ -1112,7 +1111,7 @@ truncate_check_rel(Relation rel)
 
 /*
  * storage_name
- *    returns the name corresponding to a typstorage/attstorage enum value
+ *	  returns the name corresponding to a typstorage/attstorage enum value
  */
 static const char *
 storage_name(char c)
@@ -1201,7 +1200,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
 	int			parentsWithOids = 0;
 	bool		have_bogus_defaults = false;
 	int			child_attno;
-	static Node	bogus_marker = { 0 };		/* marks conflicting defaults */
+	static Node bogus_marker = {0};		/* marks conflicting defaults */
 
 	/*
 	 * Check for and reject tables with too many columns. We perform this
@@ -1234,10 +1233,11 @@ MergeAttributes(List *schema, List *supers, bool istemp,
 		ListCell   *prev = entry;
 
 		if (coldef->typeName == NULL)
+
 			/*
-			 * Typed table column option that does not belong to a
-			 * column from the type.  This works because the columns
-			 * from the type come first in the list.
+			 * Typed table column option that does not belong to a column from
+			 * the type.  This works because the columns from the type come
+			 * first in the list.
 			 */
 			ereport(ERROR,
 					(errcode(ERRCODE_UNDEFINED_COLUMN),
@@ -1247,14 +1247,16 @@ MergeAttributes(List *schema, List *supers, bool istemp,
 		while (rest != NULL)
 		{
 			ColumnDef  *restdef = lfirst(rest);
-			ListCell   *next = lnext(rest); /* need to save it in case we delete it */
+			ListCell   *next = lnext(rest);		/* need to save it in case we
+												 * delete it */
 
 			if (strcmp(coldef->colname, restdef->colname) == 0)
 			{
 				if (coldef->is_from_type)
 				{
-					/* merge the column options into the column from
-					 * the type */
+					/*
+					 * merge the column options into the column from the type
+					 */
 					coldef->is_not_null = restdef->is_not_null;
 					coldef->raw_default = restdef->raw_default;
 					coldef->cooked_default = restdef->cooked_default;
@@ -1391,11 +1393,11 @@ MergeAttributes(List *schema, List *supers, bool istemp,
 				else if (def->storage != attribute->attstorage)
 					ereport(ERROR,
 							(errcode(ERRCODE_DATATYPE_MISMATCH),
-						errmsg("inherited column \"%s\" has a storage parameter conflict",
-							   attributeName),
-							   errdetail("%s versus %s",
-										 storage_name(def->storage),
-										 storage_name(attribute->attstorage))));
+							 errmsg("inherited column \"%s\" has a storage parameter conflict",
+									attributeName),
+							 errdetail("%s versus %s",
+									   storage_name(def->storage),
+									   storage_name(attribute->attstorage))));
 
 				def->inhcount++;
 				/* Merge of NOT NULL constraints = OR 'em together */
@@ -1563,11 +1565,11 @@ MergeAttributes(List *schema, List *supers, bool istemp,
 				else if (newdef->storage != 0 && def->storage != newdef->storage)
 					ereport(ERROR,
 							(errcode(ERRCODE_DATATYPE_MISMATCH),
-						errmsg("column \"%s\" has a storage parameter conflict",
-							   attributeName),
-							   errdetail("%s versus %s",
-										 storage_name(def->storage),
-										 storage_name(newdef->storage))));
+					 errmsg("column \"%s\" has a storage parameter conflict",
+							attributeName),
+							 errdetail("%s versus %s",
+									   storage_name(def->storage),
+									   storage_name(newdef->storage))));
 
 				/* Mark the column as locally defined */
 				def->is_local = true;
@@ -1978,8 +1980,10 @@ renameatt(Oid myrelid,
 	 */
 	if (recurse)
 	{
-		List	   *child_oids, *child_numparents;
-		ListCell   *lo, *li;
+		List	   *child_oids,
+				   *child_numparents;
+		ListCell   *lo,
+				   *li;
 
 		/*
 		 * we need the number of parents for each child so that the recursive
@@ -2039,13 +2043,13 @@ renameatt(Oid myrelid,
 						oldattname)));
 
 	/*
-	 * if the attribute is inherited, forbid the renaming.  if this is a
+	 * if the attribute is inherited, forbid the renaming.	if this is a
 	 * top-level call to renameatt(), then expected_parents will be 0, so the
 	 * effect of this code will be to prohibit the renaming if the attribute
 	 * is inherited at all.  if this is a recursive call to renameatt(),
 	 * expected_parents will be the number of parents the current relation has
-	 * within the inheritance hierarchy being processed, so we'll prohibit
-	 * the renaming only if there are additional parents from elsewhere.
+	 * within the inheritance hierarchy being processed, so we'll prohibit the
+	 * renaming only if there are additional parents from elsewhere.
 	 */
 	if (attform->attinhcount > expected_parents)
 		ereport(ERROR,
@@ -2861,9 +2865,9 @@ ATRewriteTables(List **wqueue)
 			OldHeap = heap_open(tab->relid, NoLock);
 
 			/*
-			 * We don't support rewriting of system catalogs; there are
-			 * too many corner cases and too little benefit.  In particular
-			 * this is certainly not going to work for mapped catalogs.
+			 * We don't support rewriting of system catalogs; there are too
+			 * many corner cases and too little benefit.  In particular this
+			 * is certainly not going to work for mapped catalogs.
 			 */
 			if (IsSystemRelation(OldHeap))
 				ereport(ERROR,
@@ -3007,11 +3011,10 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
 		newrel = NULL;
 
 	/*
-	 * Prepare a BulkInsertState and options for heap_insert. Because
-	 * we're building a new heap, we can skip WAL-logging and fsync it
-	 * to disk at the end instead (unless WAL-logging is required for
-	 * archiving or streaming replication). The FSM is empty too,
-	 * so don't bother using it.
+	 * Prepare a BulkInsertState and options for heap_insert. Because we're
+	 * building a new heap, we can skip WAL-logging and fsync it to disk at
+	 * the end instead (unless WAL-logging is required for archiving or
+	 * streaming replication). The FSM is empty too, so don't bother using it.
 	 */
 	if (newrel)
 	{
@@ -3255,7 +3258,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
 		/* If we skipped writing WAL, then we need to sync the heap. */
 		if (hi_options & HEAP_INSERT_SKIP_WAL)
 		{
-			char reason[NAMEDATALEN + 30];
+			char		reason[NAMEDATALEN + 30];
+
 			snprintf(reason, sizeof(reason), "table rewrite on \"%s\"",
 					 RelationGetRelationName(newrel));
 			XLogReportUnloggedStatement(reason);
@@ -4205,7 +4209,7 @@ ATExecSetOptions(Relation rel, const char *colName, Node *options,
 	/* Generate new proposed attoptions (text array) */
 	Assert(IsA(options, List));
 	datum = SysCacheGetAttr(ATTNAME, tuple, Anum_pg_attribute_attoptions,
-		&isnull);
+							&isnull);
 	newOptions = transformRelOptions(isnull ? (Datum) 0 : datum,
 									 (List *) options, NULL, NULL, false,
 									 isReset);
@@ -4338,8 +4342,10 @@ ATExecDropColumn(List **wqueue, Relation rel, const char *colName,
 	 * get the number of the attribute
 	 */
 	tuple = SearchSysCacheAttName(RelationGetRelid(rel), colName);
-	if (!HeapTupleIsValid(tuple)){
-		if (!missing_ok){
+	if (!HeapTupleIsValid(tuple))
+	{
+		if (!missing_ok)
+		{
 			ereport(ERROR,
 					(errcode(ERRCODE_UNDEFINED_COLUMN),
 					 errmsg("column \"%s\" of relation \"%s\" does not exist",
@@ -4574,9 +4580,10 @@ ATExecAddConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
 			break;
 
 		case CONSTR_FOREIGN:
+
 			/*
-			 * Note that we currently never recurse for FK constraints, so
-			 * the "recurse" flag is silently ignored.
+			 * Note that we currently never recurse for FK constraints, so the
+			 * "recurse" flag is silently ignored.
 			 *
 			 * Assign or validate constraint name
 			 */
@@ -4595,7 +4602,7 @@ ATExecAddConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
 			else
 				newConstraint->conname =
 					ChooseConstraintName(RelationGetRelationName(rel),
-										 strVal(linitial(newConstraint->fk_attrs)),
+								   strVal(linitial(newConstraint->fk_attrs)),
 										 "fkey",
 										 RelationGetNamespace(rel),
 										 NIL);
@@ -5093,9 +5100,9 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
 		if (indexStruct->indisprimary)
 		{
 			/*
-			 * Refuse to use a deferrable primary key.  This is per SQL spec,
-			 * and there would be a lot of interesting semantic problems if
-			 * we tried to allow it.
+			 * Refuse to use a deferrable primary key.	This is per SQL spec,
+			 * and there would be a lot of interesting semantic problems if we
+			 * tried to allow it.
 			 */
 			if (!indexStruct->indimmediate)
 				ereport(ERROR,
@@ -5243,15 +5250,15 @@ transformFkeyCheckAttrs(Relation pkrel,
 			}
 
 			/*
-			 * Refuse to use a deferrable unique/primary key.  This is per
-			 * SQL spec, and there would be a lot of interesting semantic
-			 * problems if we tried to allow it.
+			 * Refuse to use a deferrable unique/primary key.  This is per SQL
+			 * spec, and there would be a lot of interesting semantic problems
+			 * if we tried to allow it.
 			 */
 			if (found && !indexStruct->indimmediate)
 			{
 				/*
-				 * Remember that we found an otherwise matching index, so
-				 * that we can generate a more appropriate error message.
+				 * Remember that we found an otherwise matching index, so that
+				 * we can generate a more appropriate error message.
 				 */
 				found_deferrable = true;
 				found = false;
@@ -5623,12 +5630,14 @@ ATExecDropConstraint(Relation rel, const char *constrName,
 
 	systable_endscan(scan);
 
-	if (!found){
-		if (!missing_ok){
+	if (!found)
+	{
+		if (!missing_ok)
+		{
 			ereport(ERROR,
 					(errcode(ERRCODE_UNDEFINED_OBJECT),
-					 errmsg("constraint \"%s\" of relation \"%s\" does not exist",
-							constrName, RelationGetRelationName(rel))));
+				errmsg("constraint \"%s\" of relation \"%s\" does not exist",
+					   constrName, RelationGetRelationName(rel))));
 		}
 		else
 		{
@@ -5639,6 +5648,7 @@ ATExecDropConstraint(Relation rel, const char *constrName,
 			return;
 		}
 	}
+
 	/*
 	 * Propagate to children as appropriate.  Unlike most other ALTER
 	 * routines, we have to do this one level of recursion at a time; we can't
@@ -6997,12 +7007,13 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace)
 	heap_close(pg_class, RowExclusiveLock);
 
 	/*
-	 * Write an XLOG UNLOGGED record if WAL-logging was skipped because
-	 * WAL archiving is not enabled.
+	 * Write an XLOG UNLOGGED record if WAL-logging was skipped because WAL
+	 * archiving is not enabled.
 	 */
 	if (!XLogIsNeeded() && !rel->rd_istemp)
 	{
-		char reason[NAMEDATALEN + 40];
+		char		reason[NAMEDATALEN + 40];
+
 		snprintf(reason, sizeof(reason), "ALTER TABLE SET TABLESPACE on \"%s\"",
 				 RelationGetRelationName(rel));
 
@@ -7039,8 +7050,8 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
 	 * enabled AND it's not a temp rel.
 	 *
 	 * Note: If you change the conditions here, update the conditions in
-	 * ATExecSetTableSpace() for when an XLOG UNLOGGED record is written
-	 * to match.
+	 * ATExecSetTableSpace() for when an XLOG UNLOGGED record is written to
+	 * match.
 	 */
 	use_wal = XLogIsNeeded() && !istemp;
 
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 32c061279122bc1999e7ecdf402de6a127b3d926..5dcb3a60ab9f36fc3c7f367612cd1591ecd62c09 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -40,7 +40,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.73 2010/02/17 04:19:39 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.74 2010/02/26 02:00:39 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -85,7 +85,7 @@ char	   *temp_tablespaces = NULL;
 
 
 static void create_tablespace_directories(const char *location,
-										 const Oid tablespaceoid);
+							  const Oid tablespaceoid);
 static bool destroy_tablespace_directories(Oid tablespaceoid, bool redo);
 
 
@@ -159,8 +159,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
 
 					/*
 					 * Parent directories are missing during WAL replay, so
-					 * continue by creating simple parent directories
-					 * rather than a symlink.
+					 * continue by creating simple parent directories rather
+					 * than a symlink.
 					 */
 
 					/* create two parents up if not exist */
@@ -272,7 +272,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt)
 
 	/*
 	 * Check that location isn't too long. Remember that we're going to append
-	 * 'PG_XXX/<dboid>/<relid>.<nnn>'.  FYI, we never actually reference the
+	 * 'PG_XXX/<dboid>/<relid>.<nnn>'.	FYI, we never actually reference the
 	 * whole path, but mkdir() uses the first two parts.
 	 */
 	if (strlen(location) + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 +
@@ -535,13 +535,13 @@ DropTableSpace(DropTableSpaceStmt *stmt)
 static void
 create_tablespace_directories(const char *location, const Oid tablespaceoid)
 {
-	char *linkloc = palloc(OIDCHARS + OIDCHARS + 1);
-	char *location_with_version_dir = palloc(strlen(location) + 1 +
-										strlen(TABLESPACE_VERSION_DIRECTORY) + 1);
+	char	   *linkloc = palloc(OIDCHARS + OIDCHARS + 1);
+	char	   *location_with_version_dir = palloc(strlen(location) + 1 +
+								   strlen(TABLESPACE_VERSION_DIRECTORY) + 1);
 
 	sprintf(linkloc, "pg_tblspc/%u", tablespaceoid);
 	sprintf(location_with_version_dir, "%s/%s", location,
-										TABLESPACE_VERSION_DIRECTORY);
+			TABLESPACE_VERSION_DIRECTORY);
 
 	/*
 	 * Attempt to coerce target directory to safe permissions.	If this fails,
@@ -556,14 +556,14 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
 							location)));
 		else
 			ereport(ERROR,
-				(errcode_for_file_access(),
-				 errmsg("could not set permissions on directory \"%s\": %m",
-						location)));
+					(errcode_for_file_access(),
+				  errmsg("could not set permissions on directory \"%s\": %m",
+						 location)));
 	}
 
 	/*
-	 * The creation of the version directory prevents more than one
-	 * 	tablespace in a single location.
+	 * The creation of the version directory prevents more than one tablespace
+	 * in a single location.
 	 */
 	if (mkdir(location_with_version_dir, S_IRWXU) < 0)
 	{
@@ -575,8 +575,8 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
 		else
 			ereport(ERROR,
 					(errcode_for_file_access(),
-				  errmsg("could not create directory \"%s\": %m",
-						 location_with_version_dir)));
+					 errmsg("could not create directory \"%s\": %m",
+							location_with_version_dir)));
 	}
 
 	/*
@@ -613,9 +613,9 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo)
 	struct stat st;
 
 	linkloc_with_version_dir = palloc(9 + 1 + OIDCHARS + 1 +
-									strlen(TABLESPACE_VERSION_DIRECTORY));
+									  strlen(TABLESPACE_VERSION_DIRECTORY));
 	sprintf(linkloc_with_version_dir, "pg_tblspc/%u/%s", tablespaceoid,
-									TABLESPACE_VERSION_DIRECTORY);
+			TABLESPACE_VERSION_DIRECTORY);
 
 	/*
 	 * Check if the tablespace still contains any files.  We try to rmdir each
@@ -690,12 +690,12 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo)
 				(errcode_for_file_access(),
 				 errmsg("could not remove directory \"%s\": %m",
 						linkloc_with_version_dir)));
- 
+
 	/*
-	 * Try to remove the symlink.  We must however deal with the
-	 * possibility that it's a directory instead of a symlink --- this could
-	 * happen during WAL replay (see TablespaceCreateDbspace), and it is also
-	 * the case on Windows where junction points lstat() as directories.
+	 * Try to remove the symlink.  We must however deal with the possibility
+	 * that it's a directory instead of a symlink --- this could happen during
+	 * WAL replay (see TablespaceCreateDbspace), and it is also the case on
+	 * Windows where junction points lstat() as directories.
 	 */
 	linkloc = pstrdup(linkloc_with_version_dir);
 	get_parent_directory(linkloc);
@@ -948,7 +948,7 @@ AlterTableSpaceOptions(AlterTableSpaceOptionsStmt *stmt)
 		ereport(ERROR,
 				(errcode(ERRCODE_UNDEFINED_OBJECT),
 				 errmsg("tablespace \"%s\" does not exist",
-					stmt->tablespacename)));
+						stmt->tablespacename)));
 
 	/* Must be owner of the existing object */
 	if (!pg_tablespace_ownercheck(HeapTupleGetOid(tup), GetUserId()))
@@ -1366,30 +1366,30 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record)
 		xl_tblspc_drop_rec *xlrec = (xl_tblspc_drop_rec *) XLogRecGetData(record);
 
 		/*
-		 * If we issued a WAL record for a drop tablespace it is
-		 * because there were no files in it at all. That means that
-		 * no permanent objects can exist in it at this point.
+		 * If we issued a WAL record for a drop tablespace it is because there
+		 * were no files in it at all. That means that no permanent objects
+		 * can exist in it at this point.
 		 *
-		 * It is possible for standby users to be using this tablespace
-		 * as a location for their temporary files, so if we fail to
-		 * remove all files then do conflict processing and try again,
-		 * if currently enabled.
+		 * It is possible for standby users to be using this tablespace as a
+		 * location for their temporary files, so if we fail to remove all
+		 * files then do conflict processing and try again, if currently
+		 * enabled.
 		 */
 		if (!destroy_tablespace_directories(xlrec->ts_id, true))
 		{
 			ResolveRecoveryConflictWithTablespace(xlrec->ts_id);
 
 			/*
-			 * If we did recovery processing then hopefully the
-			 * backends who wrote temp files should have cleaned up and
-			 * exited by now. So lets recheck before we throw an error.
-			 * If !process_conflicts then this will just fail again.
+			 * If we did recovery processing then hopefully the backends who
+			 * wrote temp files should have cleaned up and exited by now. So
+			 * lets recheck before we throw an error. If !process_conflicts
+			 * then this will just fail again.
 			 */
 			if (!destroy_tablespace_directories(xlrec->ts_id, true))
 				ereport(ERROR,
-					(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-					 errmsg("tablespace %u is not empty",
-							xlrec->ts_id)));
+						(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+						 errmsg("tablespace %u is not empty",
+								xlrec->ts_id)));
 		}
 	}
 	else
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 49c913c00bbad00e39f40c9ef8121b5a64f27d32..aef7838eb6e54058df488b1a5e952fada491f4d5 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.261 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.262 2010/02/26 02:00:39 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -117,7 +117,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 {
 	int16		tgtype;
 	int			ncolumns;
-	int2       *columns;
+	int2	   *columns;
 	int2vector *tgattr;
 	Node	   *whenClause;
 	List	   *whenRtable;
@@ -196,10 +196,10 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 	 */
 	if (stmt->whenClause)
 	{
-		ParseState	   *pstate;
+		ParseState *pstate;
 		RangeTblEntry *rte;
-		List		   *varList;
-		ListCell	   *lc;
+		List	   *varList;
+		ListCell   *lc;
 
 		/* Set up a pstate to parse with */
 		pstate = make_parsestate(NULL);
@@ -230,7 +230,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 		if (pstate->p_hasSubLinks)
 			ereport(ERROR,
 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-					 errmsg("cannot use subquery in trigger WHEN condition")));
+				   errmsg("cannot use subquery in trigger WHEN condition")));
 		if (pstate->p_hasAggs)
 			ereport(ERROR,
 					(errcode(ERRCODE_GROUPING_ERROR),
@@ -238,7 +238,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 		if (pstate->p_hasWindowFuncs)
 			ereport(ERROR,
 					(errcode(ERRCODE_WINDOWING_ERROR),
-					 errmsg("cannot use window function in trigger WHEN condition")));
+			errmsg("cannot use window function in trigger WHEN condition")));
 
 		/*
 		 * Check for disallowed references to OLD/NEW.
@@ -364,11 +364,11 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 											  stmt->deferrable,
 											  stmt->initdeferred,
 											  RelationGetRelid(rel),
-											  NULL,	/* no conkey */
+											  NULL,		/* no conkey */
 											  0,
-											  InvalidOid,	/* no domain */
-											  InvalidOid,	/* no index */
-											  InvalidOid,	/* no foreign key */
+											  InvalidOid,		/* no domain */
+											  InvalidOid,		/* no index */
+											  InvalidOid,		/* no foreign key */
 											  NULL,
 											  NULL,
 											  NULL,
@@ -382,7 +382,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 											  NULL,
 											  NULL,
 											  true,		/* islocal */
-											  0);	/* inhcount */
+											  0);		/* inhcount */
 	}
 
 	/*
@@ -394,9 +394,9 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 	trigoid = GetNewOid(tgrel);
 
 	/*
-	 * If trigger is internally generated, modify the provided trigger name
-	 * to ensure uniqueness by appending the trigger OID.  (Callers will
-	 * usually supply a simple constant trigger name in these cases.)
+	 * If trigger is internally generated, modify the provided trigger name to
+	 * ensure uniqueness by appending the trigger OID.	(Callers will usually
+	 * supply a simple constant trigger name in these cases.)
 	 */
 	if (isInternal)
 	{
@@ -413,8 +413,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 	/*
 	 * Scan pg_trigger for existing triggers on relation.  We do this only to
 	 * give a nice error message if there's already a trigger of the same
-	 * name.  (The unique index on tgrelid/tgname would complain anyway.)
-	 * We can skip this for internally generated triggers, since the name
+	 * name.  (The unique index on tgrelid/tgname would complain anyway.) We
+	 * can skip this for internally generated triggers, since the name
 	 * modification above should be sufficient.
 	 *
 	 * NOTE that this is cool only because we have AccessExclusiveLock on the
@@ -435,8 +435,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 			if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
 				ereport(ERROR,
 						(errcode(ERRCODE_DUPLICATE_OBJECT),
-						 errmsg("trigger \"%s\" for relation \"%s\" already exists",
-								trigname, stmt->relation->relname)));
+				  errmsg("trigger \"%s\" for relation \"%s\" already exists",
+						 trigname, stmt->relation->relname)));
 		}
 		systable_endscan(tgscan);
 	}
@@ -515,17 +515,17 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 		columns = (int2 *) palloc(ncolumns * sizeof(int2));
 		foreach(cell, stmt->columns)
 		{
-			char   *name = strVal(lfirst(cell));
-			int2	attnum;
-			int		j;
+			char	   *name = strVal(lfirst(cell));
+			int2		attnum;
+			int			j;
 
-			/* Lookup column name.  System columns are not allowed */
+			/* Lookup column name.	System columns are not allowed */
 			attnum = attnameAttNum(rel, name, false);
 			if (attnum == InvalidAttrNumber)
 				ereport(ERROR,
 						(errcode(ERRCODE_UNDEFINED_COLUMN),
-						 errmsg("column \"%s\" of relation \"%s\" does not exist",
-								name, RelationGetRelationName(rel))));
+					errmsg("column \"%s\" of relation \"%s\" does not exist",
+						   name, RelationGetRelationName(rel))));
 
 			/* Check for duplicates */
 			for (j = i - 1; j >= 0; j--)
@@ -624,7 +624,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 	else
 	{
 		/*
-		 * User CREATE TRIGGER, so place dependencies.  We make trigger be
+		 * User CREATE TRIGGER, so place dependencies.	We make trigger be
 		 * auto-dropped if its relation is dropped or if the FK relation is
 		 * dropped.  (Auto drop is compatible with our pre-7.3 behavior.)
 		 */
@@ -641,6 +641,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 		}
 		/* Not possible to have an index dependency in this case */
 		Assert(!OidIsValid(indexOid));
+
 		/*
 		 * If it's a user-specified constraint trigger, make the constraint
 		 * internally dependent on the trigger instead of vice versa.
@@ -657,7 +658,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 	/* If column-specific trigger, add normal dependencies on columns */
 	if (columns != NULL)
 	{
-		int		i;
+		int			i;
 
 		referenced.classId = RelationRelationId;
 		referenced.objectId = RelationGetRelid(rel);
@@ -669,8 +670,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 	}
 
 	/*
-	 * If it has a WHEN clause, add dependencies on objects mentioned in
-	 * the expression (eg, functions, as well as any columns used).
+	 * If it has a WHEN clause, add dependencies on objects mentioned in the
+	 * expression (eg, functions, as well as any columns used).
 	 */
 	if (whenClause != NULL)
 		recordDependencyOnExpr(&myself, whenClause, whenRtable,
@@ -1714,9 +1715,9 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
 	 * comparison; so we just compare corresponding slots of the two sets.
 	 *
 	 * Note: comparing the stringToNode forms of the WHEN clauses means that
-	 * parse column locations will affect the result.  This is okay as long
-	 * as this function is only used for detecting exact equality, as for
-	 * example in checking for staleness of a cache entry.
+	 * parse column locations will affect the result.  This is okay as long as
+	 * this function is only used for detecting exact equality, as for example
+	 * in checking for staleness of a cache entry.
 	 */
 	if (trigdesc1 != NULL)
 	{
@@ -1763,11 +1764,11 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
 				if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
 					return false;
 			if (trig1->tgqual == NULL && trig2->tgqual == NULL)
-				/* ok */ ;
+				 /* ok */ ;
 			else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
 				return false;
 			else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
-					return false;
+				return false;
 		}
 	}
 	else if (trigdesc2 != NULL)
@@ -2114,7 +2115,7 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
 	int		   *tgindx;
 	int			i;
 	TriggerData LocTriggerData;
-	Bitmapset   *modifiedCols;
+	Bitmapset  *modifiedCols;
 
 	trigdesc = relinfo->ri_TrigDesc;
 
@@ -2185,7 +2186,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
 	HeapTuple	intuple = newtuple;
 	TupleTableSlot *newSlot;
 	int			i;
-	Bitmapset   *modifiedCols;
+	Bitmapset  *modifiedCols;
 
 	trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
 								   &newSlot);
@@ -2381,9 +2382,9 @@ ltrmark:;
 
 						/*
 						 * EvalPlanQual already locked the tuple, but we
-						 * re-call heap_lock_tuple anyway as an easy way
-						 * of re-fetching the correct tuple.  Speed is
-						 * hardly a criterion in this path anyhow.
+						 * re-call heap_lock_tuple anyway as an easy way of
+						 * re-fetching the correct tuple.  Speed is hardly a
+						 * criterion in this path anyhow.
 						 */
 						goto ltrmark;
 					}
@@ -2485,8 +2486,8 @@ TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
 		Assert(estate != NULL);
 
 		/*
-		 * trigger is an element of relinfo->ri_TrigDesc->triggers[];
-		 * find the matching element of relinfo->ri_TrigWhenExprs[]
+		 * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
+		 * matching element of relinfo->ri_TrigWhenExprs[]
 		 */
 		i = trigger - relinfo->ri_TrigDesc->triggers;
 		predicate = &relinfo->ri_TrigWhenExprs[i];
@@ -2498,7 +2499,7 @@ TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
 		 */
 		if (*predicate == NIL)
 		{
-			Node   *tgqual;
+			Node	   *tgqual;
 
 			oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
 			tgqual = stringToNode(trigger->tgqual);
@@ -3895,9 +3896,9 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
 		 * Handle SET CONSTRAINTS constraint-name [, ...]
 		 *
 		 * First, identify all the named constraints and make a list of their
-		 * OIDs.  Since, unlike the SQL spec, we allow multiple constraints
-		 * of the same name within a schema, the specifications are not
-		 * necessarily unique.  Our strategy is to target all matching
+		 * OIDs.  Since, unlike the SQL spec, we allow multiple constraints of
+		 * the same name within a schema, the specifications are not
+		 * necessarily unique.	Our strategy is to target all matching
 		 * constraints within the first search-path schema that has any
 		 * matches, but disregard matches in schemas beyond the first match.
 		 * (This is a bit odd but it's the historical behavior.)
@@ -4025,9 +4026,9 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
 				Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
 
 				/*
-				 * Silently skip triggers that are marked as non-deferrable
-				 * in pg_trigger.  This is not an error condition, since
-				 * a deferrable RI constraint may have some non-deferrable
+				 * Silently skip triggers that are marked as non-deferrable in
+				 * pg_trigger.	This is not an error condition, since a
+				 * deferrable RI constraint may have some non-deferrable
 				 * actions.
 				 */
 				if (pg_trigger->tgdeferrable)
@@ -4198,7 +4199,7 @@ AfterTriggerPendingOnRel(Oid relid)
  *	be fired for an event.
  *
  *	NOTE: this is called whenever there are any triggers associated with
- *	the event (even if they are disabled).  This function decides which
+ *	the event (even if they are disabled).	This function decides which
  *	triggers actually need to be queued.
  * ----------
  */
@@ -4217,9 +4218,9 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
 	int		   *tgindx;
 
 	/*
-	 * Check state.  We use normal tests not Asserts because it is possible
-	 * to reach here in the wrong state given misconfigured RI triggers,
-	 * in particular deferring a cascade action trigger.
+	 * Check state.  We use normal tests not Asserts because it is possible to
+	 * reach here in the wrong state given misconfigured RI triggers, in
+	 * particular deferring a cascade action trigger.
 	 */
 	if (afterTriggers == NULL)
 		elog(ERROR, "AfterTriggerSaveEvent() called outside of transaction");
@@ -4363,9 +4364,9 @@ AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
 		}
 
 		/*
-		 * If the trigger is a deferred unique constraint check trigger,
-		 * only queue it if the unique constraint was potentially violated,
-		 * which we know from index insertion time.
+		 * If the trigger is a deferred unique constraint check trigger, only
+		 * queue it if the unique constraint was potentially violated, which
+		 * we know from index insertion time.
 		 */
 		if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
 		{
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 86d631a0ad7504ce1e1e33d141723ed76f58abb0..8a85e79ea659481295fb744b3d83ab82f832ecb7 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.147 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.148 2010/02/26 02:00:40 momjian Exp $
  *
  * DESCRIPTION
  *	  The "DefineFoo" routines take the parse tree and pick out the
@@ -74,7 +74,7 @@ typedef struct
 	/* atts[] is of allocated length RelationGetNumberOfAttributes(rel) */
 } RelToCheck;
 
-Oid binary_upgrade_next_pg_type_array_oid = InvalidOid;
+Oid			binary_upgrade_next_pg_type_array_oid = InvalidOid;
 
 static Oid	findTypeInputFunction(List *procname, Oid typeOid);
 static Oid	findTypeOutputFunction(List *procname, Oid typeOid);
@@ -527,12 +527,12 @@ DefineType(List *names, List *parameters)
 	 * now have TypeCreate do all the real work.
 	 */
 	typoid =
-		/*
-		 *	The pg_type.oid is stored in user tables as array elements
-		 *	(base types) in ArrayType and in composite types in
-		 *	DatumTupleFields.  This oid must be preserved by binary
-		 *	upgrades.
-		 */
+
+	/*
+	 * The pg_type.oid is stored in user tables as array elements (base types)
+	 * in ArrayType and in composite types in DatumTupleFields.  This oid must
+	 * be preserved by binary upgrades.
+	 */
 		TypeCreate(InvalidOid,	/* no predetermined type OID */
 				   typeName,	/* type name */
 				   typeNamespace,		/* namespace */
@@ -986,7 +986,7 @@ DefineDomain(CreateDomainStmt *stmt)
 			case CONSTR_EXCLUSION:
 				ereport(ERROR,
 						(errcode(ERRCODE_SYNTAX_ERROR),
-					 errmsg("exclusion constraints not possible for domains")));
+				  errmsg("exclusion constraints not possible for domains")));
 				break;
 
 			case CONSTR_FOREIGN:
@@ -1466,7 +1466,7 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid)
 Oid
 AssignTypeArrayOid(void)
 {
-	Oid		type_array_oid;
+	Oid			type_array_oid;
 
 	/* Pre-assign the type's array OID for use in pg_type.typarray */
 	if (OidIsValid(binary_upgrade_next_pg_type_array_oid))
@@ -1525,10 +1525,10 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
 	createStmt->tablespacename = NULL;
 
 	/*
-	 * Check for collision with an existing type name. If there is one
-	 * and it's an autogenerated array, we can rename it out of the
-	 * way.  This check is here mainly to get a better error message
-	 * about a "type" instead of below about a "relation".
+	 * Check for collision with an existing type name. If there is one and
+	 * it's an autogenerated array, we can rename it out of the way.  This
+	 * check is here mainly to get a better error message about a "type"
+	 * instead of below about a "relation".
 	 */
 	typeNamespace = RangeVarGetCreationNamespace(createStmt->relation);
 	old_type_oid =
@@ -1911,7 +1911,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
 		case CONSTR_EXCLUSION:
 			ereport(ERROR,
 					(errcode(ERRCODE_SYNTAX_ERROR),
-					 errmsg("exclusion constraints not possible for domains")));
+				  errmsg("exclusion constraints not possible for domains")));
 			break;
 
 		case CONSTR_FOREIGN:
@@ -2343,7 +2343,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
 						  ' ',
 						  ' ',
 						  ' ',
-						  NULL,	/* not an exclusion constraint */
+						  NULL, /* not an exclusion constraint */
 						  expr, /* Tree form of check constraint */
 						  ccbin,	/* Binary form of check constraint */
 						  ccsrc,	/* Source form of check constraint */
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 36711565548559bdccae190938a34932ce6ca0be..cdf7dc5ef1ffd1a4b072a0e1b250c52c0b4b9736 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.192 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.193 2010/02/26 02:00:40 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -321,7 +321,7 @@ CreateRole(CreateRoleStmt *stmt)
 	if (check_password_hook && password)
 		(*check_password_hook) (stmt->role,
 								password,
-								isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
+			   isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
 								validUntil_datum,
 								validUntil_null);
 
@@ -630,7 +630,7 @@ AlterRole(AlterRoleStmt *stmt)
 	if (check_password_hook && password)
 		(*check_password_hook) (stmt->role,
 								password,
-								isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
+			   isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
 								validUntil_datum,
 								validUntil_null);
 
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 5c1c48570e210a0924938ed7b87665006b3186ff..e77430e6e4eba9c66db3983296d8d836d0ddd754 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -14,7 +14,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.409 2010/02/15 16:10:34 alvherre Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.410 2010/02/26 02:00:40 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -109,7 +109,7 @@ vacuum(VacuumStmt *vacstmt, Oid relid, bool do_toast,
 	/*
 	 * We cannot run VACUUM inside a user transaction block; if we were inside
 	 * a transaction, then our commit- and start-transaction-command calls
-	 * would not have the intended effect!  There are numerous other subtle
+	 * would not have the intended effect!	There are numerous other subtle
 	 * dependencies on this, too.
 	 *
 	 * ANALYZE (without VACUUM) can run either way.
@@ -664,9 +664,9 @@ vac_update_datfrozenxid(void)
 	heap_close(relation, RowExclusiveLock);
 
 	/*
-	 * If we were able to advance datfrozenxid, see if we can truncate pg_clog.
-	 * Also do it if the shared XID-wrap-limit info is stale, since this
-	 * action will update that too.
+	 * If we were able to advance datfrozenxid, see if we can truncate
+	 * pg_clog. Also do it if the shared XID-wrap-limit info is stale, since
+	 * this action will update that too.
 	 */
 	if (dirty || ForceTransactionIdLimitUpdate())
 		vac_truncate_clog(newFrozenXid);
@@ -944,8 +944,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound,
 	/*
 	 * Switch to the table owner's userid, so that any index functions are run
 	 * as that user.  Also lock down security-restricted operations and
-	 * arrange to make GUC variable changes local to this command.
-	 * (This is unnecessary, but harmless, for lazy VACUUM.)
+	 * arrange to make GUC variable changes local to this command. (This is
+	 * unnecessary, but harmless, for lazy VACUUM.)
 	 */
 	GetUserIdAndSecContext(&save_userid, &save_sec_context);
 	SetUserIdAndSecContext(onerel->rd_rel->relowner,
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 247cc72dd1fb6e4a9a7d8a6cfb35b70686a1a95c..1cf1ae3e59379aaafbb4fe8195cd8f1e533e93c9 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -29,7 +29,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.131 2010/02/09 21:43:30 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.132 2010/02/26 02:00:40 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -268,8 +268,8 @@ static void
 vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
 {
 	/*
-	 * No need to log changes for temp tables, they do not contain
-	 * data visible on the standby server.
+	 * No need to log changes for temp tables, they do not contain data
+	 * visible on the standby server.
 	 */
 	if (rel->rd_istemp || !XLogIsNeeded())
 		return;
@@ -629,7 +629,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
 			{
 				lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
 				HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
-												&vacrelstats->latestRemovedXid);
+											 &vacrelstats->latestRemovedXid);
 				tups_vacuumed += 1;
 			}
 			else
@@ -1039,7 +1039,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 	RelationTruncate(onerel, new_rel_pages);
 
 	/*
-	 * We can release the exclusive lock as soon as we have truncated.  Other
+	 * We can release the exclusive lock as soon as we have truncated.	Other
 	 * backends can't safely access the relation until they have processed the
 	 * smgr invalidation that smgrtruncate sent out ... but that should happen
 	 * as part of standard invalidation processing once they acquire lock on
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 20d59f9a8c921d316a8875590ca0667b49c44ab2..151299555cf02f2f05cb368ab249662b6d08e6b9 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,7 +26,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.347 2010/02/20 21:24:02 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.348 2010/02/26 02:00:41 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -76,7 +76,7 @@ static void ExecCheckRTPerms(List *rangeTable);
 static void ExecCheckRTEPerms(RangeTblEntry *rte);
 static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
 static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
-							  Plan *planTree);
+				  Plan *planTree);
 static void OpenIntoRel(QueryDesc *queryDesc);
 static void CloseIntoRel(QueryDesc *queryDesc);
 static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
@@ -582,8 +582,8 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
 	/*
 	 * CREATE TABLE AS or SELECT INTO?
 	 *
-	 * XXX should we allow this if the destination is temp?  Considering
-	 * that it would still require catalog changes, probably not.
+	 * XXX should we allow this if the destination is temp?  Considering that
+	 * it would still require catalog changes, probably not.
 	 */
 	if (plannedstmt->intoClause != NULL)
 		PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
@@ -641,8 +641,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 	/*
 	 * initialize result relation stuff, and open/lock the result rels.
 	 *
-	 * We must do this before initializing the plan tree, else we might
-	 * try to do a lock upgrade if a result rel is also a source rel.
+	 * We must do this before initializing the plan tree, else we might try to
+	 * do a lock upgrade if a result rel is also a source rel.
 	 */
 	if (plannedstmt->resultRelations)
 	{
@@ -686,8 +686,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 
 	/*
 	 * Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
-	 * before we initialize the plan tree, else we'd be risking lock
-	 * upgrades.  While we are at it, build the ExecRowMark list.
+	 * before we initialize the plan tree, else we'd be risking lock upgrades.
+	 * While we are at it, build the ExecRowMark list.
 	 */
 	estate->es_rowMarks = NIL;
 	foreach(l, plannedstmt->rowMarks)
@@ -804,8 +804,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 	tupType = ExecGetResultType(planstate);
 
 	/*
-	 * Initialize the junk filter if needed.  SELECT queries need a
-	 * filter if there are any junk attrs in the top-level tlist.
+	 * Initialize the junk filter if needed.  SELECT queries need a filter if
+	 * there are any junk attrs in the top-level tlist.
 	 */
 	if (operation == CMD_SELECT)
 	{
@@ -1101,9 +1101,9 @@ ExecEndPlan(PlanState *planstate, EState *estate)
 
 	/*
 	 * destroy the executor's tuple table.  Actually we only care about
-	 * releasing buffer pins and tupdesc refcounts; there's no need to
-	 * pfree the TupleTableSlots, since the containing memory context
-	 * is about to go away anyway.
+	 * releasing buffer pins and tupdesc refcounts; there's no need to pfree
+	 * the TupleTableSlots, since the containing memory context is about to go
+	 * away anyway.
 	 */
 	ExecResetTupleTable(estate->es_tupleTable, false);
 
@@ -1208,8 +1208,8 @@ ExecutePlan(EState *estate,
 			slot = ExecFilterJunk(estate->es_junkFilter, slot);
 
 		/*
-		 * If we are supposed to send the tuple somewhere, do so.
-		 * (In practice, this is probably always the case at this point.)
+		 * If we are supposed to send the tuple somewhere, do so. (In
+		 * practice, this is probably always the case at this point.)
 		 */
 		if (sendTuples)
 			(*dest->receiveSlot) (slot, dest);
@@ -1390,8 +1390,8 @@ EvalPlanQual(EState *estate, EPQState *epqstate,
 	EvalPlanQualBegin(epqstate, estate);
 
 	/*
-	 * Free old test tuple, if any, and store new tuple where relation's
-	 * scan node will see it
+	 * Free old test tuple, if any, and store new tuple where relation's scan
+	 * node will see it
 	 */
 	EvalPlanQualSetTuple(epqstate, rti, copyTuple);
 
@@ -1406,19 +1406,19 @@ EvalPlanQual(EState *estate, EPQState *epqstate,
 	slot = EvalPlanQualNext(epqstate);
 
 	/*
-	 * If we got a tuple, force the slot to materialize the tuple so that
-	 * it is not dependent on any local state in the EPQ query (in particular,
+	 * If we got a tuple, force the slot to materialize the tuple so that it
+	 * is not dependent on any local state in the EPQ query (in particular,
 	 * it's highly likely that the slot contains references to any pass-by-ref
-	 * datums that may be present in copyTuple).  As with the next step,
-	 * this is to guard against early re-use of the EPQ query.
+	 * datums that may be present in copyTuple).  As with the next step, this
+	 * is to guard against early re-use of the EPQ query.
 	 */
 	if (!TupIsNull(slot))
 		(void) ExecMaterializeSlot(slot);
 
 	/*
-	 * Clear out the test tuple.  This is needed in case the EPQ query
-	 * is re-used to test a tuple for a different relation.  (Not clear
-	 * that can really happen, but let's be safe.)
+	 * Clear out the test tuple.  This is needed in case the EPQ query is
+	 * re-used to test a tuple for a different relation.  (Not clear that can
+	 * really happen, but let's be safe.)
 	 */
 	EvalPlanQualSetTuple(epqstate, rti, NULL);
 
@@ -1680,8 +1680,8 @@ EvalPlanQualSetTuple(EPQState *epqstate, Index rti, HeapTuple tuple)
 	Assert(rti > 0);
 
 	/*
-	 * free old test tuple, if any, and store new tuple where relation's
-	 * scan node will see it
+	 * free old test tuple, if any, and store new tuple where relation's scan
+	 * node will see it
 	 */
 	if (estate->es_epqTuple[rti - 1] != NULL)
 		heap_freetuple(estate->es_epqTuple[rti - 1]);
@@ -1704,7 +1704,7 @@ EvalPlanQualGetTuple(EPQState *epqstate, Index rti)
 
 /*
  * Fetch the current row values for any non-locked relations that need
- * to be scanned by an EvalPlanQual operation.  origslot must have been set
+ * to be scanned by an EvalPlanQual operation.	origslot must have been set
  * to contain the current result row (top-level row) that we need to recheck.
  */
 void
@@ -1841,7 +1841,7 @@ EvalPlanQualBegin(EPQState *epqstate, EState *parentestate)
 		/* Recopy current values of parent parameters */
 		if (parentestate->es_plannedstmt->nParamExec > 0)
 		{
-			int		i = parentestate->es_plannedstmt->nParamExec;
+			int			i = parentestate->es_plannedstmt->nParamExec;
 
 			while (--i >= 0)
 			{
@@ -1913,7 +1913,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
 	estate->es_param_list_info = parentestate->es_param_list_info;
 	if (parentestate->es_plannedstmt->nParamExec > 0)
 	{
-		int		i = parentestate->es_plannedstmt->nParamExec;
+		int			i = parentestate->es_plannedstmt->nParamExec;
 
 		estate->es_param_exec_vals = (ParamExecData *)
 			palloc0(i * sizeof(ParamExecData));
@@ -1929,7 +1929,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
 
 	/*
 	 * Each EState must have its own es_epqScanDone state, but if we have
-	 * nested EPQ checks they should share es_epqTuple arrays.  This allows
+	 * nested EPQ checks they should share es_epqTuple arrays.	This allows
 	 * sub-rechecks to inherit the values being examined by an outer recheck.
 	 */
 	estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
@@ -1954,10 +1954,10 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
 	/*
 	 * Initialize private state information for each SubPlan.  We must do this
 	 * before running ExecInitNode on the main query tree, since
-	 * ExecInitSubPlan expects to be able to find these entries.
-	 * Some of the SubPlans might not be used in the part of the plan tree
-	 * we intend to run, but since it's not easy to tell which, we just
-	 * initialize them all.
+	 * ExecInitSubPlan expects to be able to find these entries. Some of the
+	 * SubPlans might not be used in the part of the plan tree we intend to
+	 * run, but since it's not easy to tell which, we just initialize them
+	 * all.
 	 */
 	Assert(estate->es_subplanstates == NIL);
 	foreach(l, parentestate->es_plannedstmt->subplans)
@@ -1972,9 +1972,9 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
 	}
 
 	/*
-	 * Initialize the private state information for all the nodes in the
-	 * part of the plan tree we need to run.  This opens files, allocates
-	 * storage and leaves us ready to start processing tuples.
+	 * Initialize the private state information for all the nodes in the part
+	 * of the plan tree we need to run.  This opens files, allocates storage
+	 * and leaves us ready to start processing tuples.
 	 */
 	epqstate->planstate = ExecInitNode(planTree, estate, 0);
 
@@ -2078,8 +2078,8 @@ OpenIntoRel(QueryDesc *queryDesc)
 	Assert(into);
 
 	/*
-	 * XXX This code needs to be kept in sync with DefineRelation().
-	 * Maybe we should try to use that function instead.
+	 * XXX This code needs to be kept in sync with DefineRelation(). Maybe we
+	 * should try to use that function instead.
 	 */
 
 	/*
@@ -2242,7 +2242,8 @@ CloseIntoRel(QueryDesc *queryDesc)
 		/* If we skipped using WAL, must heap_sync before commit */
 		if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
 		{
-			char reason[NAMEDATALEN + 30];
+			char		reason[NAMEDATALEN + 30];
+
 			snprintf(reason, sizeof(reason), "SELECT INTO on \"%s\"",
 					 RelationGetRelationName(myState->rel));
 			XLogReportUnloggedStatement(reason);
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 92be3a0a55972d725aa052b529e1695b72406247..e381e112821b58e36301906a2c3145bb1dda79df 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.262 2010/02/18 18:41:47 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.263 2010/02/26 02:00:41 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -357,7 +357,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
 		 * We might have a nested-assignment situation, in which the
 		 * refassgnexpr is itself a FieldStore or ArrayRef that needs to
 		 * obtain and modify the previous value of the array element or slice
-		 * being replaced.  If so, we have to extract that value from the
+		 * being replaced.	If so, we have to extract that value from the
 		 * array and pass it down via the econtext's caseValue.  It's safe to
 		 * reuse the CASE mechanism because there cannot be a CASE between
 		 * here and where the value would be needed, and an array assignment
@@ -386,7 +386,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
 													  astate->refelemlength,
 													  astate->refelembyval,
 													  astate->refelemalign,
-													  &econtext->caseValue_isNull);
+												&econtext->caseValue_isNull);
 			}
 			else
 			{
@@ -673,7 +673,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
 			 * We really only care about number of attributes and data type.
 			 * Also, we can ignore type mismatch on columns that are dropped
 			 * in the destination type, so long as (1) the physical storage
-			 * matches or (2) the actual column value is NULL.  Case (1) is
+			 * matches or (2) the actual column value is NULL.	Case (1) is
 			 * helpful in some cases involving out-of-date cached plans, while
 			 * case (2) is expected behavior in situations such as an INSERT
 			 * into a table with dropped columns (the planner typically
@@ -682,8 +682,8 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
 			 * holds, we have to use ExecEvalWholeRowSlow to check (2) for
 			 * each row.  Also, we have to allow the case that the slot has
 			 * more columns than the Var's type, because we might be looking
-			 * at the output of a subplan that includes resjunk columns.
-			 * (XXX it would be nice to verify that the extra columns are all
+			 * at the output of a subplan that includes resjunk columns. (XXX
+			 * it would be nice to verify that the extra columns are all
 			 * marked resjunk, but we haven't got access to the subplan
 			 * targetlist here...) Resjunk columns should always be at the end
 			 * of a targetlist, so it's sufficient to ignore them here; but we
@@ -702,7 +702,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
 										  slot_tupdesc->natts,
 										  var_tupdesc->natts)));
 			else if (var_tupdesc->natts < slot_tupdesc->natts)
-				needslow = true;			/* need to trim trailing atts */
+				needslow = true;	/* need to trim trailing atts */
 
 			for (i = 0; i < var_tupdesc->natts; i++)
 			{
@@ -722,7 +722,7 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
 
 				if (vattr->attlen != sattr->attlen ||
 					vattr->attalign != sattr->attalign)
-					needslow = true;		/* need runtime check for null */
+					needslow = true;	/* need runtime check for null */
 			}
 
 			ReleaseTupleDesc(var_tupdesc);
@@ -907,7 +907,7 @@ ExecEvalWholeRowSlow(ExprState *exprstate, ExprContext *econtext,
 
 		if (!vattr->attisdropped)
 			continue;			/* already checked non-dropped cols */
-		if (heap_attisnull(tuple, i+1))
+		if (heap_attisnull(tuple, i + 1))
 			continue;			/* null is always okay */
 		if (vattr->attlen != sattr->attlen ||
 			vattr->attalign != sattr->attalign)
@@ -2722,7 +2722,7 @@ ExecEvalConvertRowtype(ConvertRowtypeExprState *cstate,
 		/* prepare map from old to new attribute numbers */
 		cstate->map = convert_tuples_by_name(cstate->indesc,
 											 cstate->outdesc,
-											 gettext_noop("could not convert row type"));
+								 gettext_noop("could not convert row type"));
 		cstate->initialized = true;
 
 		MemoryContextSwitchTo(old_cxt);
@@ -3870,11 +3870,11 @@ ExecEvalFieldSelect(FieldSelectState *fstate,
 								 &fstate->argdesc, econtext);
 
 	/*
-	 * Find field's attr record.  Note we don't support system columns here:
-	 * a datum tuple doesn't have valid values for most of the interesting
+	 * Find field's attr record.  Note we don't support system columns here: a
+	 * datum tuple doesn't have valid values for most of the interesting
 	 * system columns anyway.
 	 */
-	if (fieldnum <= 0)					/* should never happen */
+	if (fieldnum <= 0)			/* should never happen */
 		elog(ERROR, "unsupported reference to system column %d in FieldSelect",
 			 fieldnum);
 	if (fieldnum > tupDesc->natts)		/* should never happen */
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index bda81cc12d1ebe659ad228c2d3519a934a62cd3c..fa5ff2d0e0580326190ef1b63a2a1523c0256d9c 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -12,7 +12,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.48 2010/01/02 16:57:41 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.49 2010/02/26 02:00:41 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -30,7 +30,7 @@ static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, Tuple
  * ExecScanFetch -- fetch next potential tuple
  *
  * This routine is concerned with substituting a test tuple if we are
- * inside an EvalPlanQual recheck.  If we aren't, just execute
+ * inside an EvalPlanQual recheck.	If we aren't, just execute
  * the access method's next-tuple routine.
  */
 static inline TupleTableSlot *
@@ -152,7 +152,7 @@ ExecScan(ScanState *node,
 	ResetExprContext(econtext);
 
 	/*
-	 * get a tuple from the access method.  Loop until we obtain a tuple that
+	 * get a tuple from the access method.	Loop until we obtain a tuple that
 	 * passes the qualification.
 	 */
 	for (;;)
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index a13cf5d19807ef57b5f658e39a1d4a51ecce0287..e2ee706c6da6a722b3ed855a8179ccb7aea6e480 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -4,7 +4,7 @@
  *	  Routines dealing with TupleTableSlots.  These are used for resource
  *	  management associated with tuples (eg, releasing buffer pins for
  *	  tuples in disk buffers, or freeing the memory occupied by transient
- *	  tuples).  Slots also provide access abstraction that lets us implement
+ *	  tuples).	Slots also provide access abstraction that lets us implement
  *	  "virtual" tuples to reduce data-copying overhead.
  *
  *	  Routines dealing with the type information for tuples. Currently,
@@ -17,7 +17,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.111 2010/01/02 16:57:41 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.112 2010/02/26 02:00:41 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1178,7 +1178,7 @@ void
 do_text_output_multiline(TupOutputState *tstate, char *text)
 {
 	Datum		values[1];
-	bool		isnull[1] = { false };
+	bool		isnull[1] = {false};
 
 	while (*text)
 	{
@@ -1189,6 +1189,7 @@ do_text_output_multiline(TupOutputState *tstate, char *text)
 		if (eol)
 		{
 			len = eol - text;
+
 			eol++;
 		}
 		else
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 151e50b63f9e12963438e95612173344718bcf53..de78719c4c5d12b8e1b26d47e8545ba3d3346173 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.170 2010/02/08 04:33:54 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.171 2010/02/26 02:00:41 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -342,7 +342,7 @@ CreateStandaloneExprContext(void)
  * any previously computed pass-by-reference expression result will go away!
  *
  * If isCommit is false, we are being called in error cleanup, and should
- * not call callbacks but only release memory.  (It might be better to call
+ * not call callbacks but only release memory.	(It might be better to call
  * the callbacks and pass the isCommit flag to them, but that would require
  * more invasive code changes than currently seems justified.)
  *
@@ -1078,9 +1078,9 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
 			checkUnique = UNIQUE_CHECK_PARTIAL;
 
 		satisfiesConstraint =
-			index_insert(indexRelation,	/* index relation */
-						 values,		/* array of index Datums */
-						 isnull,		/* null flags */
+			index_insert(indexRelation, /* index relation */
+						 values,	/* array of index Datums */
+						 isnull,	/* null flags */
 						 tupleid,		/* tid of heap tuple */
 						 heapRelation,	/* heap relation */
 						 checkUnique);	/* type of uniqueness check to do */
@@ -1088,7 +1088,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
 		/*
 		 * If the index has an associated exclusion constraint, check that.
 		 * This is simpler than the process for uniqueness checks since we
-		 * always insert first and then check.  If the constraint is deferred,
+		 * always insert first and then check.	If the constraint is deferred,
 		 * we check now anyway, but don't throw error on violation; instead
 		 * we'll queue a recheck event.
 		 *
@@ -1098,7 +1098,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
 		 */
 		if (indexInfo->ii_ExclusionOps != NULL)
 		{
-			bool errorOK = !indexRelation->rd_index->indimmediate;
+			bool		errorOK = !indexRelation->rd_index->indimmediate;
 
 			satisfiesConstraint =
 				check_exclusion_constraint(heapRelation,
@@ -1152,23 +1152,23 @@ check_exclusion_constraint(Relation heap, Relation index, IndexInfo *indexInfo,
 						   ItemPointer tupleid, Datum *values, bool *isnull,
 						   EState *estate, bool newIndex, bool errorOK)
 {
-	Oid			*constr_procs = indexInfo->ii_ExclusionProcs;
-	uint16		*constr_strats = indexInfo->ii_ExclusionStrats;
-	int			 index_natts = index->rd_index->indnatts;
-	IndexScanDesc	index_scan;
-	HeapTuple		tup;
-	ScanKeyData		scankeys[INDEX_MAX_KEYS];
-	SnapshotData	DirtySnapshot;
-	int				i;
-	bool			conflict;
-	bool			found_self;
-	ExprContext	   *econtext;
+	Oid		   *constr_procs = indexInfo->ii_ExclusionProcs;
+	uint16	   *constr_strats = indexInfo->ii_ExclusionStrats;
+	int			index_natts = index->rd_index->indnatts;
+	IndexScanDesc index_scan;
+	HeapTuple	tup;
+	ScanKeyData scankeys[INDEX_MAX_KEYS];
+	SnapshotData DirtySnapshot;
+	int			i;
+	bool		conflict;
+	bool		found_self;
+	ExprContext *econtext;
 	TupleTableSlot *existing_slot;
 	TupleTableSlot *save_scantuple;
 
 	/*
-	 * If any of the input values are NULL, the constraint check is assumed
-	 * to pass (i.e., we assume the operators are strict).
+	 * If any of the input values are NULL, the constraint check is assumed to
+	 * pass (i.e., we assume the operators are strict).
 	 */
 	for (i = 0; i < index_natts; i++)
 	{
@@ -1177,8 +1177,8 @@ check_exclusion_constraint(Relation heap, Relation index, IndexInfo *indexInfo,
 	}
 
 	/*
-	 * Search the tuples that are in the index for any violations,
-	 * including tuples that aren't visible yet.
+	 * Search the tuples that are in the index for any violations, including
+	 * tuples that aren't visible yet.
 	 */
 	InitDirtySnapshot(DirtySnapshot);
 
@@ -1205,8 +1205,8 @@ check_exclusion_constraint(Relation heap, Relation index, IndexInfo *indexInfo,
 	econtext->ecxt_scantuple = existing_slot;
 
 	/*
-	 * May have to restart scan from this point if a potential
-	 * conflict is found.
+	 * May have to restart scan from this point if a potential conflict is
+	 * found.
 	 */
 retry:
 	conflict = false;
@@ -1217,11 +1217,11 @@ retry:
 	while ((tup = index_getnext(index_scan,
 								ForwardScanDirection)) != NULL)
 	{
-		TransactionId	 xwait;
+		TransactionId xwait;
 		Datum		existing_values[INDEX_MAX_KEYS];
 		bool		existing_isnull[INDEX_MAX_KEYS];
-		char		*error_new;
-		char		*error_existing;
+		char	   *error_new;
+		char	   *error_existing;
 
 		/*
 		 * Ignore the entry for the tuple we're trying to check.
@@ -1239,7 +1239,7 @@ retry:
 		 * Extract the index column values and isnull flags from the existing
 		 * tuple.
 		 */
-		ExecStoreTuple(tup,	existing_slot, InvalidBuffer, false);
+		ExecStoreTuple(tup, existing_slot, InvalidBuffer, false);
 		FormIndexDatum(indexInfo, existing_slot, estate,
 					   existing_values, existing_isnull);
 
@@ -1251,12 +1251,13 @@ retry:
 										  existing_values,
 										  existing_isnull,
 										  values))
-				continue; /* tuple doesn't actually match, so no conflict */
+				continue;		/* tuple doesn't actually match, so no
+								 * conflict */
 		}
 
 		/*
-		 * At this point we have either a conflict or a potential conflict.
-		 * If we're not supposed to raise error, just return the fact of the
+		 * At this point we have either a conflict or a potential conflict. If
+		 * we're not supposed to raise error, just return the fact of the
 		 * potential conflict without waiting to see if it's real.
 		 */
 		if (errorOK)
@@ -1267,7 +1268,7 @@ retry:
 
 		/*
 		 * If an in-progress transaction is affecting the visibility of this
-		 * tuple, we need to wait for it to complete and then recheck.  For
+		 * tuple, we need to wait for it to complete and then recheck.	For
 		 * simplicity we do rechecking by just restarting the whole scan ---
 		 * this case probably doesn't happen often enough to be worth trying
 		 * harder, and anyway we don't want to hold any index internal locks
@@ -1308,15 +1309,15 @@ retry:
 	index_endscan(index_scan);
 
 	/*
-	 * We should have found our tuple in the index, unless we exited the
-	 * loop early because of conflict.  Complain if not.
+	 * We should have found our tuple in the index, unless we exited the loop
+	 * early because of conflict.  Complain if not.
 	 */
 	if (!found_self && !conflict)
 		ereport(ERROR,
 				(errcode(ERRCODE_INTERNAL_ERROR),
 				 errmsg("failed to re-find tuple within index \"%s\"",
 						RelationGetRelationName(index)),
-				 errhint("This may be because of a non-immutable index expression.")));
+		errhint("This may be because of a non-immutable index expression.")));
 
 	econtext->ecxt_scantuple = save_scantuple;
 
@@ -1327,7 +1328,7 @@ retry:
 
 /*
  * Check existing tuple's index values to see if it really matches the
- * exclusion condition against the new_values.  Returns true if conflict.
+ * exclusion condition against the new_values.	Returns true if conflict.
  */
 static bool
 index_recheck_constraint(Relation index, Oid *constr_procs,
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 88b47316e24221352306da7c69852fa3a1f32f02..d2bd23da750d7b3fb470a4a882c8ba7738188ebd 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.141 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.142 2010/02/26 02:00:41 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -636,8 +636,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
 		/*
 		 * For simplicity, we require callers to support both set eval modes.
 		 * There are cases where we must use one or must use the other, and
-		 * it's not really worthwhile to postpone the check till we know.
-		 * But note we do not require caller to provide an expectedDesc.
+		 * it's not really worthwhile to postpone the check till we know. But
+		 * note we do not require caller to provide an expectedDesc.
 		 */
 		if (!rsi || !IsA(rsi, ReturnSetInfo) ||
 			(rsi->allowedModes & SFRM_ValuePerCall) == 0 ||
@@ -1042,7 +1042,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
 	AssertArg(!IsPolymorphicType(rettype));
 
 	if (modifyTargetList)
-		*modifyTargetList = false;	/* initialize for no change */
+		*modifyTargetList = false;		/* initialize for no change */
 	if (junkFilter)
 		*junkFilter = NULL;		/* initialize in case of VOID result */
 
@@ -1219,7 +1219,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
 		/*
 		 * Verify that the targetlist matches the return tuple type. We scan
 		 * the non-deleted attributes to ensure that they match the datatypes
-		 * of the non-resjunk columns.  For deleted attributes, insert NULL
+		 * of the non-resjunk columns.	For deleted attributes, insert NULL
 		 * result columns if the caller asked for that.
 		 */
 		tupnatts = tupdesc->natts;
@@ -1254,7 +1254,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
 				attr = tupdesc->attrs[colindex - 1];
 				if (attr->attisdropped && modifyTargetList)
 				{
-					Expr   *null_expr;
+					Expr	   *null_expr;
 
 					/* The type of the null we insert isn't important */
 					null_expr = (Expr *) makeConst(INT4OID,
@@ -1311,17 +1311,17 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
 						(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
 						 errmsg("return type mismatch in function declared to return %s",
 								format_type_be(rettype)),
-						 errdetail("Final statement returns too few columns.")));
+					 errdetail("Final statement returns too few columns.")));
 			if (modifyTargetList)
 			{
-				Expr   *null_expr;
+				Expr	   *null_expr;
 
 				/* The type of the null we insert isn't important */
 				null_expr = (Expr *) makeConst(INT4OID,
 											   -1,
 											   sizeof(int32),
 											   (Datum) 0,
-											   true,		/* isnull */
+											   true,	/* isnull */
 											   true /* byval */ );
 				newtlist = lappend(newtlist,
 								   makeTargetEntry(null_expr,
diff --git a/src/backend/executor/instrument.c b/src/backend/executor/instrument.c
index 9b46215cd3fc9ca91800df7be4fd11572e8f3b0d..55aace9a826e26b7c7817602ddceb1d71509646b 100644
--- a/src/backend/executor/instrument.c
+++ b/src/backend/executor/instrument.c
@@ -7,7 +7,7 @@
  * Copyright (c) 2001-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.24 2010/01/02 16:57:41 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.25 2010/02/26 02:00:41 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -17,10 +17,10 @@
 
 #include "executor/instrument.h"
 
-BufferUsage			pgBufferUsage;
+BufferUsage pgBufferUsage;
 
 static void BufferUsageAccumDiff(BufferUsage *dst,
-		const BufferUsage *add, const BufferUsage *sub);
+					 const BufferUsage *add, const BufferUsage *sub);
 
 /* Allocate new instrumentation structure(s) */
 Instrumentation *
@@ -34,7 +34,7 @@ InstrAlloc(int n, int instrument_options)
 	instr = palloc0(n * sizeof(Instrumentation));
 	if (instrument_options & INSTRUMENT_BUFFERS)
 	{
-		int		i;
+		int			i;
 
 		for (i = 0; i < n; i++)
 			instr[i].needs_bufusage = true;
@@ -80,7 +80,7 @@ InstrStopNode(Instrumentation *instr, double nTuples)
 	/* Adds delta of buffer usage to node's count. */
 	if (instr->needs_bufusage)
 		BufferUsageAccumDiff(&instr->bufusage,
-			&pgBufferUsage, &instr->bufusage_start);
+							 &pgBufferUsage, &instr->bufusage_start);
 
 	/* Is this the first tuple of this cycle? */
 	if (!instr->running)
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 14a0a091b0f62a41822134c6441eaf0d85a69172..74fc87a66a59af611dd31d0d6cfc229de7b347b6 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -55,7 +55,7 @@
  *	  it is completely forbidden for functions to modify pass-by-ref inputs,
  *	  but in the aggregate case we know the left input is either the initial
  *	  transition value or a previous function result, and in either case its
- *	  value need not be preserved.  See int8inc() for an example.  Notice that
+ *	  value need not be preserved.	See int8inc() for an example.  Notice that
  *	  advance_transition_function() is coded to avoid a data copy step when
  *	  the previous transition value pointer is returned.  Also, some
  *	  transition functions want to store working state in addition to the
@@ -71,7 +71,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.174 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.175 2010/02/26 02:00:41 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -135,12 +135,12 @@ typedef struct AggStatePerAggData
 
 	/* number of sorting columns to consider in DISTINCT comparisons */
 	/* (this is either zero or the same as numSortCols) */
-	int         numDistinctCols;
+	int			numDistinctCols;
 
 	/* deconstructed sorting information (arrays of length numSortCols) */
 	AttrNumber *sortColIdx;
-	Oid        *sortOperators;
-	bool       *sortNullsFirst;
+	Oid		   *sortOperators;
+	bool	   *sortNullsFirst;
 
 	/*
 	 * fmgr lookup data for input columns' equality operators --- only
@@ -170,12 +170,12 @@ typedef struct AggStatePerAggData
 				transtypeByVal;
 
 	/*
-	 * Stuff for evaluation of inputs.  We used to just use ExecEvalExpr, but
+	 * Stuff for evaluation of inputs.	We used to just use ExecEvalExpr, but
 	 * with the addition of ORDER BY we now need at least a slot for passing
 	 * data to the sort object, which requires a tupledesc, so we might as
 	 * well go whole hog and use ExecProject too.
 	 */
-	TupleDesc   evaldesc;		/* descriptor of input tuples */
+	TupleDesc	evaldesc;		/* descriptor of input tuples */
 	ProjectionInfo *evalproj;	/* projection machinery */
 
 	/*
@@ -190,7 +190,7 @@ typedef struct AggStatePerAggData
 	 * input tuple group and updated for each input tuple.
 	 *
 	 * For a simple (non DISTINCT/ORDER BY) aggregate, we just feed the input
-	 * values straight to the transition function.  If it's DISTINCT or
+	 * values straight to the transition function.	If it's DISTINCT or
 	 * requires ORDER BY, we pass the input values into a Tuplesort object;
 	 * then at completion of the input tuple group, we scan the sorted values,
 	 * eliminate duplicates if needed, and run the transition function on the
@@ -257,11 +257,11 @@ static void advance_transition_function(AggState *aggstate,
 							FunctionCallInfoData *fcinfo);
 static void advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup);
 static void process_ordered_aggregate_single(AggState *aggstate,
-						 AggStatePerAgg peraggstate,
-						 AggStatePerGroup pergroupstate);
+								 AggStatePerAgg peraggstate,
+								 AggStatePerGroup pergroupstate);
 static void process_ordered_aggregate_multi(AggState *aggstate,
-						 AggStatePerAgg peraggstate,
-						 AggStatePerGroup pergroupstate);
+								AggStatePerAgg peraggstate,
+								AggStatePerGroup pergroupstate);
 static void finalize_aggregate(AggState *aggstate,
 				   AggStatePerAgg peraggstate,
 				   AggStatePerGroup pergroupstate,
@@ -307,8 +307,8 @@ initialize_aggregates(AggState *aggstate,
 				tuplesort_end(peraggstate->sortstate);
 
 			/*
-			 * We use a plain Datum sorter when there's a single input
-			 * column; otherwise sort the full tuple.  (See comments for
+			 * We use a plain Datum sorter when there's a single input column;
+			 * otherwise sort the full tuple.  (See comments for
 			 * process_ordered_aggregate_single.)
 			 */
 			peraggstate->sortstate =
@@ -488,11 +488,11 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
 			Assert(slot->tts_nvalid == peraggstate->numInputs);
 
 			/*
-			 * If the transfn is strict, we want to check for nullity
-			 * before storing the row in the sorter, to save space if
-			 * there are a lot of nulls.  Note that we must only check
-			 * numArguments columns, not numInputs, since nullity in
-			 * columns used only for sorting is not relevant here.
+			 * If the transfn is strict, we want to check for nullity before
+			 * storing the row in the sorter, to save space if there are a lot
+			 * of nulls.  Note that we must only check numArguments columns,
+			 * not numInputs, since nullity in columns used only for sorting
+			 * is not relevant here.
 			 */
 			if (peraggstate->transfn.fn_strict)
 			{
@@ -537,7 +537,7 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
 /*
  * Run the transition function for a DISTINCT or ORDER BY aggregate
  * with only one input.  This is called after we have completed
- * entering all the input values into the sort object.  We complete the
+ * entering all the input values into the sort object.	We complete the
  * sort, read out the values in sorted order, and run the transition
  * function on each value (applying DISTINCT if appropriate).
  *
@@ -559,11 +559,11 @@ process_ordered_aggregate_single(AggState *aggstate,
 								 AggStatePerGroup pergroupstate)
 {
 	Datum		oldVal = (Datum) 0;
-	bool        oldIsNull = true;
+	bool		oldIsNull = true;
 	bool		haveOldVal = false;
 	MemoryContext workcontext = aggstate->tmpcontext->ecxt_per_tuple_memory;
 	MemoryContext oldContext;
-	bool        isDistinct = (peraggstate->numDistinctCols > 0);
+	bool		isDistinct = (peraggstate->numDistinctCols > 0);
 	Datum	   *newVal;
 	bool	   *isNull;
 	FunctionCallInfoData fcinfo;
@@ -632,7 +632,7 @@ process_ordered_aggregate_single(AggState *aggstate,
 /*
  * Run the transition function for a DISTINCT or ORDER BY aggregate
  * with more than one input.  This is called after we have completed
- * entering all the input values into the sort object.  We complete the
+ * entering all the input values into the sort object.	We complete the
  * sort, read out the values in sorted order, and run the transition
  * function on each value (applying DISTINCT if appropriate).
  *
@@ -647,10 +647,10 @@ process_ordered_aggregate_multi(AggState *aggstate,
 	FunctionCallInfoData fcinfo;
 	TupleTableSlot *slot1 = peraggstate->evalslot;
 	TupleTableSlot *slot2 = peraggstate->uniqslot;
-	int         numArguments = peraggstate->numArguments;
-	int         numDistinctCols = peraggstate->numDistinctCols;
-	bool        haveOldValue = false;
-	int         i;
+	int			numArguments = peraggstate->numArguments;
+	int			numDistinctCols = peraggstate->numDistinctCols;
+	bool		haveOldValue = false;
+	int			i;
 
 	tuplesort_performsort(peraggstate->sortstate);
 
@@ -983,9 +983,9 @@ ExecAgg(AggState *node)
 	}
 
 	/*
-	 * Exit if nothing left to do.  (We must do the ps_TupFromTlist check
-	 * first, because in some cases agg_done gets set before we emit the
-	 * final aggregate tuple, and we have to finish running SRFs for it.)
+	 * Exit if nothing left to do.	(We must do the ps_TupFromTlist check
+	 * first, because in some cases agg_done gets set before we emit the final
+	 * aggregate tuple, and we have to finish running SRFs for it.)
 	 */
 	if (node->agg_done)
 		return NULL;
@@ -1066,9 +1066,9 @@ agg_retrieve_direct(AggState *aggstate)
 
 		/*
 		 * Clear the per-output-tuple context for each group, as well as
-		 * aggcontext (which contains any pass-by-ref transvalues of the
-		 * old group).  We also clear any child contexts of the aggcontext;
-		 * some aggregate functions store working state in such contexts.
+		 * aggcontext (which contains any pass-by-ref transvalues of the old
+		 * group).	We also clear any child contexts of the aggcontext; some
+		 * aggregate functions store working state in such contexts.
 		 */
 		ResetExprContext(econtext);
 
@@ -1402,8 +1402,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 	 * structures and transition values.  NOTE: the details of what is stored
 	 * in aggcontext and what is stored in the regular per-query memory
 	 * context are driven by a simple decision: we want to reset the
-	 * aggcontext at group boundaries (if not hashing) and in ExecReScanAgg
-	 * to recover no-longer-wanted space.
+	 * aggcontext at group boundaries (if not hashing) and in ExecReScanAgg to
+	 * recover no-longer-wanted space.
 	 */
 	aggstate->aggcontext =
 		AllocSetContextCreate(CurrentMemoryContext,
@@ -1539,7 +1539,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 		int			numInputs;
 		int			numSortCols;
 		int			numDistinctCols;
-		List       *sortlist;
+		List	   *sortlist;
 		HeapTuple	aggTuple;
 		Form_pg_aggregate aggform;
 		Oid			aggtranstype;
@@ -1735,9 +1735,9 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 														NULL);
 
 		/*
-		 * If we're doing either DISTINCT or ORDER BY, then we have a list
-		 * of SortGroupClause nodes; fish out the data in them and
-		 * stick them into arrays.
+		 * If we're doing either DISTINCT or ORDER BY, then we have a list of
+		 * SortGroupClause nodes; fish out the data in them and stick them
+		 * into arrays.
 		 *
 		 * Note that by construction, if there is a DISTINCT clause then the
 		 * ORDER BY clause is a prefix of it (see transformDistinctClause).
@@ -1976,8 +1976,8 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
  *
  * The transition and/or final functions of an aggregate may want to verify
  * that they are being called as aggregates, rather than as plain SQL
- * functions.  They should use this function to do so.  The return value
- * is nonzero if being called as an aggregate, or zero if not.  (Specific
+ * functions.  They should use this function to do so.	The return value
+ * is nonzero if being called as an aggregate, or zero if not.	(Specific
  * nonzero values are AGG_CONTEXT_AGGREGATE or AGG_CONTEXT_WINDOW, but more
  * values could conceivably appear in future.)
  *
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 6a41a290126037103218a2966d30bb6a24b0b471..be45d732e09ccb03075211982b773057d5d10b65 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.128 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.129 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -458,7 +458,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 	/*
 	 * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
 	 * memory is filled.  Set nbatch to the smallest power of 2 that appears
-	 * sufficient.  The Min() steps limit the results so that the pointer
+	 * sufficient.	The Min() steps limit the results so that the pointer
 	 * arrays we'll try to allocate do not exceed work_mem.
 	 */
 	max_pointers = (work_mem * 1024L) / sizeof(void *);
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index b60160c0402ab7ff6ac98e356aa6841ff560cd06..0994dbf84efe6ef35596e0ae97243fb5e99f3c19 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.138 2010/01/02 16:57:42 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.139 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -240,9 +240,9 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext,
 		 * necessary.
 		 *
 		 * It's also entirely possible that the result of the eval is a
-		 * toasted value.  In this case we should forcibly detoast it,
-		 * to avoid repeat detoastings each time the value is examined
-		 * by an index support function.
+		 * toasted value.  In this case we should forcibly detoast it, to
+		 * avoid repeat detoastings each time the value is examined by an
+		 * index support function.
 		 */
 		scanvalue = ExecEvalExpr(key_expr,
 								 econtext,
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index b3c61d43e1917f041c26bb6e18cbdf91750b6b36..0eafa0afa2ccccf81ea4258c8d1b7cb8354ee1b8 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/nodeLockRows.c,v 1.3 2010/01/02 16:57:42 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/nodeLockRows.c,v 1.4 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -154,8 +154,8 @@ lnext:
 				tuple.t_self = copyTuple->t_self;
 
 				/*
-				 * Need to run a recheck subquery.  Initialize EPQ state
-				 * if we didn't do so already.
+				 * Need to run a recheck subquery.	Initialize EPQ state if we
+				 * didn't do so already.
 				 */
 				if (!epq_started)
 				{
@@ -185,9 +185,9 @@ lnext:
 	{
 		/*
 		 * First, fetch a copy of any rows that were successfully locked
-		 * without any update having occurred.  (We do this in a separate
-		 * pass so as to avoid overhead in the common case where there are
-		 * no concurrent updates.)
+		 * without any update having occurred.	(We do this in a separate pass
+		 * so as to avoid overhead in the common case where there are no
+		 * concurrent updates.)
 		 */
 		foreach(lc, node->lr_rowMarks)
 		{
@@ -209,12 +209,14 @@ lnext:
 								 heap_copytuple(&tuple));
 			ReleaseBuffer(buffer);
 		}
+
 		/*
-		 * Now fetch any non-locked source rows --- the EPQ logic knows
-		 * how to do that.
+		 * Now fetch any non-locked source rows --- the EPQ logic knows how to
+		 * do that.
 		 */
 		EvalPlanQualSetSlot(&node->lr_epqstate, slot);
 		EvalPlanQualFetchRowMarks(&node->lr_epqstate);
+
 		/*
 		 * And finally we can re-evaluate the tuple.
 		 */
@@ -272,15 +274,15 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
 	outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags);
 
 	/*
-	 * LockRows nodes do no projections, so initialize projection info for this
-	 * node appropriately
+	 * LockRows nodes do no projections, so initialize projection info for
+	 * this node appropriately
 	 */
 	ExecAssignResultTypeFromTL(&lrstate->ps);
 	lrstate->ps.ps_ProjInfo = NULL;
 
 	/*
-	 * Locate the ExecRowMark(s) that this node is responsible for.
-	 * (InitPlan should already have built the global list of ExecRowMarks.)
+	 * Locate the ExecRowMark(s) that this node is responsible for. (InitPlan
+	 * should already have built the global list of ExecRowMarks.)
 	 */
 	lrstate->lr_rowMarks = NIL;
 	foreach(lc, node->rowMarks)
@@ -307,10 +309,10 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
 				 rc->rti);
 
 		/*
-		 * Only locking rowmarks go into our own list.  Non-locking marks
-		 * are passed off to the EvalPlanQual machinery.  This is because
-		 * we don't want to bother fetching non-locked rows unless we
-		 * actually have to do an EPQ recheck.
+		 * Only locking rowmarks go into our own list.	Non-locking marks are
+		 * passed off to the EvalPlanQual machinery.  This is because we don't
+		 * want to bother fetching non-locked rows unless we actually have to
+		 * do an EPQ recheck.
 		 */
 		if (RowMarkRequiresRowShareLock(erm->markType))
 			lrstate->lr_rowMarks = lappend(lrstate->lr_rowMarks, erm);
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 0c611015134ef668f84e2f6339f99bb99362ad0e..8404c4d46468615ec5d5904a0b55d559dc0719ee 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.100 2010/01/05 23:25:36 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.101 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -507,7 +507,7 @@ check_constant_qual(List *qual, bool *is_const_false)
 
 	foreach(lc, qual)
 	{
-		Const  *con = (Const *) lfirst(lc);
+		Const	   *con = (Const *) lfirst(lc);
 
 		if (!con || !IsA(con, Const))
 			return false;
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index eca1d6de3f4a1dea781ad320fc22e84e24c398b3..adfe97cefde9fad6c0b0def0fadbb4cb8eb7475f 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -8,12 +8,12 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/nodeModifyTable.c,v 1.6 2010/02/08 04:33:54 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/nodeModifyTable.c,v 1.7 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
 /* INTERFACE ROUTINES
- *		ExecInitModifyTable	- initialize the ModifyTable node
+ *		ExecInitModifyTable - initialize the ModifyTable node
  *		ExecModifyTable		- retrieve the next tuple from the node
  *		ExecEndModifyTable	- shut down the ModifyTable node
  *		ExecReScanModifyTable - rescan the ModifyTable node
@@ -30,7 +30,7 @@
  *
  *		If the query specifies RETURNING, then the ModifyTable returns a
  *		RETURNING tuple after completing each row insert, update, or delete.
- *		It must be called again to continue the operation.  Without RETURNING,
+ *		It must be called again to continue the operation.	Without RETURNING,
  *		we just loop within the node until all the work is done, then
  *		return NULL.  This avoids useless call/return overhead.
  */
@@ -215,7 +215,7 @@ ExecInsert(TupleTableSlot *slot,
 			 * slot should not try to clear it.
 			 */
 			TupleTableSlot *newslot = estate->es_trig_tuple_slot;
-			TupleDesc tupdesc = RelationGetDescr(resultRelationDesc);
+			TupleDesc	tupdesc = RelationGetDescr(resultRelationDesc);
 
 			if (newslot->tts_tupleDescriptor != tupdesc)
 				ExecSetSlotDescriptor(newslot, tupdesc);
@@ -470,7 +470,7 @@ ExecUpdate(ItemPointer tupleid,
 			 * slot should not try to clear it.
 			 */
 			TupleTableSlot *newslot = estate->es_trig_tuple_slot;
-			TupleDesc tupdesc = RelationGetDescr(resultRelationDesc);
+			TupleDesc	tupdesc = RelationGetDescr(resultRelationDesc);
 
 			if (newslot->tts_tupleDescriptor != tupdesc)
 				ExecSetSlotDescriptor(newslot, tupdesc);
@@ -646,9 +646,9 @@ fireASTriggers(ModifyTableState *node)
 TupleTableSlot *
 ExecModifyTable(ModifyTableState *node)
 {
-	EState *estate = node->ps.state;
-	CmdType operation = node->operation;
-	PlanState *subplanstate;
+	EState	   *estate = node->ps.state;
+	CmdType		operation = node->operation;
+	PlanState  *subplanstate;
 	JunkFilter *junkfilter;
 	TupleTableSlot *slot;
 	TupleTableSlot *planSlot;
@@ -666,8 +666,8 @@ ExecModifyTable(ModifyTableState *node)
 
 	/*
 	 * es_result_relation_info must point to the currently active result
-	 * relation.  (Note we assume that ModifyTable nodes can't be nested.)
-	 * We want it to be NULL whenever we're not within ModifyTable, though.
+	 * relation.  (Note we assume that ModifyTable nodes can't be nested.) We
+	 * want it to be NULL whenever we're not within ModifyTable, though.
 	 */
 	estate->es_result_relation_info =
 		estate->es_result_relations + node->mt_whichplan;
@@ -791,8 +791,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
 
 	/*
-	 * This should NOT get called during EvalPlanQual; we should have passed
-	 * a subplan tree to EvalPlanQual, instead.  Use a runtime test not just
+	 * This should NOT get called during EvalPlanQual; we should have passed a
+	 * subplan tree to EvalPlanQual, instead.  Use a runtime test not just
 	 * Assert because this condition is easy to miss in testing ...
 	 */
 	if (estate->es_epqTuple != NULL)
@@ -846,8 +846,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 		ExprContext *econtext;
 
 		/*
-		 * Initialize result tuple slot and assign its rowtype using the
-		 * first RETURNING list.  We assume the rest will look the same.
+		 * Initialize result tuple slot and assign its rowtype using the first
+		 * RETURNING list.	We assume the rest will look the same.
 		 */
 		tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists),
 								 false);
@@ -881,8 +881,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	else
 	{
 		/*
-		 * We still must construct a dummy result tuple type, because
-		 * InitPlan expects one (maybe should change that?).
+		 * We still must construct a dummy result tuple type, because InitPlan
+		 * expects one (maybe should change that?).
 		 */
 		tupDesc = ExecTypeFromTL(NIL, false);
 		ExecInitResultTupleSlot(estate, &mtstate->ps);
@@ -892,10 +892,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	}
 
 	/*
-	 * If we have any secondary relations in an UPDATE or DELETE, they need
-	 * to be treated like non-locked relations in SELECT FOR UPDATE, ie,
-	 * the EvalPlanQual mechanism needs to be told about them.  Locate
-	 * the relevant ExecRowMarks.
+	 * If we have any secondary relations in an UPDATE or DELETE, they need to
+	 * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
+	 * EvalPlanQual mechanism needs to be told about them.	Locate the
+	 * relevant ExecRowMarks.
 	 */
 	foreach(l, node->rowMarks)
 	{
@@ -925,12 +925,12 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 
 	/*
 	 * Initialize the junk filter(s) if needed.  INSERT queries need a filter
-	 * if there are any junk attrs in the tlist.  UPDATE and DELETE
-	 * always need a filter, since there's always a junk 'ctid' attribute
-	 * present --- no need to look first.
+	 * if there are any junk attrs in the tlist.  UPDATE and DELETE always
+	 * need a filter, since there's always a junk 'ctid' attribute present ---
+	 * no need to look first.
 	 *
 	 * If there are multiple result relations, each one needs its own junk
-	 * filter.  Note multiple rels are only possible for UPDATE/DELETE, so we
+	 * filter.	Note multiple rels are only possible for UPDATE/DELETE, so we
 	 * can't be fooled by some needing a filter and some not.
 	 *
 	 * This section of code is also a convenient place to verify that the
@@ -999,9 +999,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	}
 
 	/*
-	 * Set up a tuple table slot for use for trigger output tuples.
-	 * In a plan containing multiple ModifyTable nodes, all can share
-	 * one such slot, so we keep it in the estate.
+	 * Set up a tuple table slot for use for trigger output tuples. In a plan
+	 * containing multiple ModifyTable nodes, all can share one such slot, so
+	 * we keep it in the estate.
 	 */
 	if (estate->es_trig_tuple_slot == NULL)
 		estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate);
@@ -1020,7 +1020,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 void
 ExecEndModifyTable(ModifyTableState *node)
 {
-	int i;
+	int			i;
 
 	/*
 	 * Free the exprcontext
@@ -1040,7 +1040,7 @@ ExecEndModifyTable(ModifyTableState *node)
 	/*
 	 * shut down subplans
 	 */
-	for (i=0; i<node->mt_nplans; i++)
+	for (i = 0; i < node->mt_nplans; i++)
 		ExecEndNode(node->mt_plans[i]);
 }
 
@@ -1048,8 +1048,8 @@ void
 ExecReScanModifyTable(ModifyTableState *node, ExprContext *exprCtxt)
 {
 	/*
-	 * Currently, we don't need to support rescan on ModifyTable nodes.
-	 * The semantics of that would be a bit debatable anyway.
+	 * Currently, we don't need to support rescan on ModifyTable nodes. The
+	 * semantics of that would be a bit debatable anyway.
 	 */
 	elog(ERROR, "ExecReScanModifyTable is not implemented");
 }
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index d4a5f677fec3064c2561b8a7552e9a3a28fbd09c..75623be3715b1d5609bb96c2fdc3b9cab09edc3e 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.69 2010/01/02 16:57:45 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.70 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -92,9 +92,8 @@ static bool
 SeqRecheck(SeqScanState *node, TupleTableSlot *slot)
 {
 	/*
-	 * Note that unlike IndexScan, SeqScan never use keys in
-	 * heap_beginscan (and this is very bad) - so, here we do not check
-	 * are keys ok or not.
+	 * Note that unlike IndexScan, SeqScan never use keys in heap_beginscan
+	 * (and this is very bad) - so, here we do not check are keys ok or not.
 	 */
 	return true;
 }
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index c8e2e083df2329a08bb786837d5254fd86460c74..dbd42d79720265283e3928815ee761c52f055b87 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -12,7 +12,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.44 2010/01/02 16:57:45 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.45 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -53,9 +53,9 @@ SubqueryNext(SubqueryScanState *node)
 	slot = ExecProcNode(node->subplan);
 
 	/*
-	 * We just return the subplan's result slot, rather than expending
-	 * extra cycles for ExecCopySlot().  (Our own ScanTupleSlot is used
-	 * only for EvalPlanQual rechecks.)
+	 * We just return the subplan's result slot, rather than expending extra
+	 * cycles for ExecCopySlot().  (Our own ScanTupleSlot is used only for
+	 * EvalPlanQual rechecks.)
 	 */
 	return slot;
 }
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index 2668d83b03ed12e6ccefcc6ce467b8926c7b7354..4d76981b02ab1739b4355f8de7d1f0667d633fc5 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -27,7 +27,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/nodeWindowAgg.c,v 1.11 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/nodeWindowAgg.c,v 1.12 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -416,8 +416,8 @@ eval_windowaggregates(WindowAggState *winstate)
 	 * need the current aggregate value.  This is considerably more efficient
 	 * than the naive approach of re-running the entire aggregate calculation
 	 * for each current row.  It does assume that the final function doesn't
-	 * damage the running transition value, but we have the same assumption
-	 * in nodeAgg.c too (when it rescans an existing hash table).
+	 * damage the running transition value, but we have the same assumption in
+	 * nodeAgg.c too (when it rescans an existing hash table).
 	 *
 	 * For other frame start rules, we discard the aggregate state and re-run
 	 * the aggregates whenever the frame head row moves.  We can still
@@ -434,11 +434,11 @@ eval_windowaggregates(WindowAggState *winstate)
 	 * accumulated into the aggregate transition values.  Whenever we start a
 	 * new peer group, we accumulate forward to the end of the peer group.
 	 *
-	 * TODO: Rerunning aggregates from the frame start can be pretty slow.
-	 * For some aggregates like SUM and COUNT we could avoid that by
-	 * implementing a "negative transition function" that would be called for
-	 * each row as it exits the frame.  We'd have to think about avoiding
-	 * recalculation of volatile arguments of aggregate functions, too.
+	 * TODO: Rerunning aggregates from the frame start can be pretty slow. For
+	 * some aggregates like SUM and COUNT we could avoid that by implementing
+	 * a "negative transition function" that would be called for each row as
+	 * it exits the frame.	We'd have to think about avoiding recalculation of
+	 * volatile arguments of aggregate functions, too.
 	 */
 
 	/*
@@ -447,8 +447,8 @@ eval_windowaggregates(WindowAggState *winstate)
 	update_frameheadpos(agg_winobj, winstate->temp_slot_1);
 
 	/*
-	 * Initialize aggregates on first call for partition, or if the frame
-	 * head position moved since last time.
+	 * Initialize aggregates on first call for partition, or if the frame head
+	 * position moved since last time.
 	 */
 	if (winstate->currentpos == 0 ||
 		winstate->frameheadpos != winstate->aggregatedbase)
@@ -468,8 +468,8 @@ eval_windowaggregates(WindowAggState *winstate)
 		}
 
 		/*
-		 * If we created a mark pointer for aggregates, keep it pushed up
-		 * to frame head, so that tuplestore can discard unnecessary rows.
+		 * If we created a mark pointer for aggregates, keep it pushed up to
+		 * frame head, so that tuplestore can discard unnecessary rows.
 		 */
 		if (agg_winobj->markptr >= 0)
 			WinSetMarkPosition(agg_winobj, winstate->frameheadpos);
@@ -485,9 +485,9 @@ eval_windowaggregates(WindowAggState *winstate)
 	/*
 	 * In UNBOUNDED_FOLLOWING mode, we don't have to recalculate aggregates
 	 * except when the frame head moves.  In END_CURRENT_ROW mode, we only
-	 * have to recalculate when the frame head moves or currentpos has advanced
-	 * past the place we'd aggregated up to.  Check for these cases and if
-	 * so, reuse the saved result values.
+	 * have to recalculate when the frame head moves or currentpos has
+	 * advanced past the place we'd aggregated up to.  Check for these cases
+	 * and if so, reuse the saved result values.
 	 */
 	if ((winstate->frameOptions & (FRAMEOPTION_END_UNBOUNDED_FOLLOWING |
 								   FRAMEOPTION_END_CURRENT_ROW)) &&
@@ -508,7 +508,7 @@ eval_windowaggregates(WindowAggState *winstate)
 	 * Advance until we reach a row not in frame (or end of partition).
 	 *
 	 * Note the loop invariant: agg_row_slot is either empty or holds the row
-	 * at position aggregatedupto.  We advance aggregatedupto after processing
+	 * at position aggregatedupto.	We advance aggregatedupto after processing
 	 * a row.
 	 */
 	for (;;)
@@ -896,7 +896,7 @@ row_is_in_frame(WindowAggState *winstate, int64 pos, TupleTableSlot *slot)
 	{
 		if (frameOptions & FRAMEOPTION_ROWS)
 		{
-			int64	offset = DatumGetInt64(winstate->startOffsetValue);
+			int64		offset = DatumGetInt64(winstate->startOffsetValue);
 
 			/* rows before current row + offset are out of frame */
 			if (frameOptions & FRAMEOPTION_START_VALUE_PRECEDING)
@@ -937,7 +937,7 @@ row_is_in_frame(WindowAggState *winstate, int64 pos, TupleTableSlot *slot)
 	{
 		if (frameOptions & FRAMEOPTION_ROWS)
 		{
-			int64	offset = DatumGetInt64(winstate->endOffsetValue);
+			int64		offset = DatumGetInt64(winstate->endOffsetValue);
 
 			/* rows after current row + offset are out of frame */
 			if (frameOptions & FRAMEOPTION_END_VALUE_PRECEDING)
@@ -965,7 +965,7 @@ row_is_in_frame(WindowAggState *winstate, int64 pos, TupleTableSlot *slot)
  *
  * Uses the winobj's read pointer for any required fetches; hence, if the
  * frame mode is one that requires row comparisons, the winobj's mark must
- * not be past the currently known frame head.  Also uses the specified slot
+ * not be past the currently known frame head.	Also uses the specified slot
  * for any required fetches.
  */
 static void
@@ -1007,9 +1007,9 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot)
 			/*
 			 * In RANGE START_CURRENT mode, frame head is the first row that
 			 * is a peer of current row.  We search backwards from current,
-			 * which could be a bit inefficient if peer sets are large.
-			 * Might be better to have a separate read pointer that moves
-			 * forward tracking the frame head.
+			 * which could be a bit inefficient if peer sets are large. Might
+			 * be better to have a separate read pointer that moves forward
+			 * tracking the frame head.
 			 */
 			fhprev = winstate->currentpos - 1;
 			for (;;)
@@ -1018,9 +1018,9 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot)
 				if (fhprev < winstate->frameheadpos)
 					break;
 				if (!window_gettupleslot(winobj, fhprev, slot))
-					break;				/* start of partition */
+					break;		/* start of partition */
 				if (!are_peers(winstate, slot, winstate->ss.ss_ScanTupleSlot))
-					break;				/* not peer of current row */
+					break;		/* not peer of current row */
 				fhprev--;
 			}
 			winstate->frameheadpos = fhprev + 1;
@@ -1034,7 +1034,7 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot)
 		if (frameOptions & FRAMEOPTION_ROWS)
 		{
 			/* In ROWS mode, bound is physically n before/after current */
-			int64	offset = DatumGetInt64(winstate->startOffsetValue);
+			int64		offset = DatumGetInt64(winstate->startOffsetValue);
 
 			if (frameOptions & FRAMEOPTION_START_VALUE_PRECEDING)
 				offset = -offset;
@@ -1070,7 +1070,7 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot)
  *
  * Uses the winobj's read pointer for any required fetches; hence, if the
  * frame mode is one that requires row comparisons, the winobj's mark must
- * not be past the currently known frame tail.  Also uses the specified slot
+ * not be past the currently known frame tail.	Also uses the specified slot
  * for any required fetches.
  */
 static void
@@ -1122,9 +1122,9 @@ update_frametailpos(WindowObject winobj, TupleTableSlot *slot)
 			for (;;)
 			{
 				if (!window_gettupleslot(winobj, ftnext, slot))
-					break;				/* end of partition */
+					break;		/* end of partition */
 				if (!are_peers(winstate, slot, winstate->ss.ss_ScanTupleSlot))
-					break;				/* not peer of current row */
+					break;		/* not peer of current row */
 				ftnext++;
 			}
 			winstate->frametailpos = ftnext - 1;
@@ -1138,7 +1138,7 @@ update_frametailpos(WindowObject winobj, TupleTableSlot *slot)
 		if (frameOptions & FRAMEOPTION_ROWS)
 		{
 			/* In ROWS mode, bound is physically n before/after current */
-			int64	offset = DatumGetInt64(winstate->endOffsetValue);
+			int64		offset = DatumGetInt64(winstate->endOffsetValue);
 
 			if (frameOptions & FRAMEOPTION_END_VALUE_PRECEDING)
 				offset = -offset;
@@ -1213,12 +1213,12 @@ ExecWindowAgg(WindowAggState *winstate)
 	 */
 	if (winstate->all_first)
 	{
-		int				frameOptions = winstate->frameOptions;
-		ExprContext	   *econtext = winstate->ss.ps.ps_ExprContext;
-		Datum			value;
-		bool			isnull;
-		int16			len;
-		bool			byval;
+		int			frameOptions = winstate->frameOptions;
+		ExprContext *econtext = winstate->ss.ps.ps_ExprContext;
+		Datum		value;
+		bool		isnull;
+		int16		len;
+		bool		byval;
 
 		if (frameOptions & FRAMEOPTION_START_VALUE)
 		{
@@ -1238,12 +1238,12 @@ ExecWindowAgg(WindowAggState *winstate)
 			if (frameOptions & FRAMEOPTION_ROWS)
 			{
 				/* value is known to be int8 */
-				int64	offset = DatumGetInt64(value);
+				int64		offset = DatumGetInt64(value);
 
 				if (offset < 0)
 					ereport(ERROR,
 							(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-							 errmsg("frame starting offset must not be negative")));
+					  errmsg("frame starting offset must not be negative")));
 			}
 		}
 		if (frameOptions & FRAMEOPTION_END_VALUE)
@@ -1264,12 +1264,12 @@ ExecWindowAgg(WindowAggState *winstate)
 			if (frameOptions & FRAMEOPTION_ROWS)
 			{
 				/* value is known to be int8 */
-				int64	offset = DatumGetInt64(value);
+				int64		offset = DatumGetInt64(value);
 
 				if (offset < 0)
 					ereport(ERROR,
 							(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-							 errmsg("frame ending offset must not be negative")));
+						errmsg("frame ending offset must not be negative")));
 			}
 		}
 		winstate->all_first = false;
@@ -2146,8 +2146,8 @@ WinGetFuncArgInPartition(WindowObject winobj, int argno,
 			*isout = false;
 		if (set_mark)
 		{
-			int		frameOptions = winstate->frameOptions;
-			int64	mark_pos = abs_pos;
+			int			frameOptions = winstate->frameOptions;
+			int64		mark_pos = abs_pos;
 
 			/*
 			 * In RANGE mode with a moving frame head, we must not let the
@@ -2155,10 +2155,10 @@ WinGetFuncArgInPartition(WindowObject winobj, int argno,
 			 * fetchable during future update_frameheadpos calls.
 			 *
 			 * XXX it is very ugly to pollute window functions' marks with
-			 * this consideration; it could for instance mask a logic bug
-			 * that lets a window function fetch rows before what it had
-			 * claimed was its mark.  Perhaps use a separate mark for
-			 * frame head probes?
+			 * this consideration; it could for instance mask a logic bug that
+			 * lets a window function fetch rows before what it had claimed
+			 * was its mark.  Perhaps use a separate mark for frame head
+			 * probes?
 			 */
 			if ((frameOptions & FRAMEOPTION_RANGE) &&
 				!(frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING))
@@ -2245,8 +2245,8 @@ WinGetFuncArgInFrame(WindowObject winobj, int argno,
 			*isout = false;
 		if (set_mark)
 		{
-			int		frameOptions = winstate->frameOptions;
-			int64	mark_pos = abs_pos;
+			int			frameOptions = winstate->frameOptions;
+			int64		mark_pos = abs_pos;
 
 			/*
 			 * In RANGE mode with a moving frame head, we must not let the
@@ -2254,10 +2254,10 @@ WinGetFuncArgInFrame(WindowObject winobj, int argno,
 			 * fetchable during future update_frameheadpos calls.
 			 *
 			 * XXX it is very ugly to pollute window functions' marks with
-			 * this consideration; it could for instance mask a logic bug
-			 * that lets a window function fetch rows before what it had
-			 * claimed was its mark.  Perhaps use a separate mark for
-			 * frame head probes?
+			 * this consideration; it could for instance mask a logic bug that
+			 * lets a window function fetch rows before what it had claimed
+			 * was its mark.  Perhaps use a separate mark for frame head
+			 * probes?
 			 */
 			if ((frameOptions & FRAMEOPTION_RANGE) &&
 				!(frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING))
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index a78474f037bf752a863adbba3c71e647381d6440..1ffb1b2fee729e955800699500c46f8fc82ebc7b 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.214 2010/02/14 18:42:14 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.215 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1268,10 +1268,9 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
 	}
 
 	/*
-	 * If the plan has parameters, copy them into the portal.  Note that
-	 * this must be done after revalidating the plan, because in dynamic
-	 * parameter cases the set of parameters could have changed during
-	 * re-parsing.
+	 * If the plan has parameters, copy them into the portal.  Note that this
+	 * must be done after revalidating the plan, because in dynamic parameter
+	 * cases the set of parameters could have changed during re-parsing.
 	 */
 	if (paramLI)
 	{
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 01b6851e3585bf0f7cbbd226e81f65a84181ad84..70b0f6656650f796c3eb5edb6883524742b20b83 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.194 2010/02/02 19:09:36 mha Exp $
+ *	  $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.195 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -313,9 +313,9 @@ ClientAuthentication(Port *port)
 				 errhint("See server log for details.")));
 
 	/*
-	 * Enable immediate response to SIGTERM/SIGINT/timeout interrupts.
-	 * (We don't want this during hba_getauthmethod() because it might
-	 * have to do database access, eg for role membership checks.)
+	 * Enable immediate response to SIGTERM/SIGINT/timeout interrupts. (We
+	 * don't want this during hba_getauthmethod() because it might have to do
+	 * database access, eg for role membership checks.)
 	 */
 	ImmediateInterruptOK = true;
 	/* And don't forget to detect one that already arrived */
@@ -1960,7 +1960,7 @@ pam_passwd_conv_proc(int num_msg, const struct pam_message ** msg,
 					if (strlen(passwd) == 0)
 					{
 						ereport(LOG,
-								(errmsg("empty password returned by client")));
+							  (errmsg("empty password returned by client")));
 						goto fail;
 					}
 				}
@@ -2243,20 +2243,21 @@ CheckLDAPAuth(Port *port)
 	if (port->hba->ldapbasedn)
 	{
 		/*
-		 * First perform an LDAP search to find the DN for the user we are trying to log
-		 * in as.
+		 * First perform an LDAP search to find the DN for the user we are
+		 * trying to log in as.
 		 */
-		char		   *filter;
-		LDAPMessage	   *search_message;
-		LDAPMessage	   *entry;
-		char		   *attributes[2];
-		char		   *dn;
-		char		   *c;
+		char	   *filter;
+		LDAPMessage *search_message;
+		LDAPMessage *entry;
+		char	   *attributes[2];
+		char	   *dn;
+		char	   *c;
 
 		/*
-		 * Disallow any characters that we would otherwise need to escape, since they
-		 * aren't really reasonable in a username anyway. Allowing them would make it
-		 * possible to inject any kind of custom filters in the LDAP filter.
+		 * Disallow any characters that we would otherwise need to escape,
+		 * since they aren't really reasonable in a username anyway. Allowing
+		 * them would make it possible to inject any kind of custom filters in
+		 * the LDAP filter.
 		 */
 		for (c = port->user_name; *c; c++)
 		{
@@ -2273,17 +2274,17 @@ CheckLDAPAuth(Port *port)
 		}
 
 		/*
-		 * Bind with a pre-defined username/password (if available) for searching. If
-		 * none is specified, this turns into an anonymous bind.
+		 * Bind with a pre-defined username/password (if available) for
+		 * searching. If none is specified, this turns into an anonymous bind.
 		 */
 		r = ldap_simple_bind_s(ldap,
-							   port->hba->ldapbinddn ? port->hba->ldapbinddn : "",
-							   port->hba->ldapbindpasswd ? port->hba->ldapbindpasswd : "");
+						  port->hba->ldapbinddn ? port->hba->ldapbinddn : "",
+				 port->hba->ldapbindpasswd ? port->hba->ldapbindpasswd : "");
 		if (r != LDAP_SUCCESS)
 		{
 			ereport(LOG,
 					(errmsg("could not perform initial LDAP bind for ldapbinddn \"%s\" on server \"%s\": error code %d",
-							port->hba->ldapbinddn, port->hba->ldapserver, r)));
+						  port->hba->ldapbinddn, port->hba->ldapserver, r)));
 			return STATUS_ERROR;
 		}
 
@@ -2291,10 +2292,10 @@ CheckLDAPAuth(Port *port)
 		attributes[0] = port->hba->ldapsearchattribute ? port->hba->ldapsearchattribute : "uid";
 		attributes[1] = NULL;
 
-		filter = palloc(strlen(attributes[0])+strlen(port->user_name)+4);
+		filter = palloc(strlen(attributes[0]) + strlen(port->user_name) + 4);
 		sprintf(filter, "(%s=%s)",
-				 attributes[0],
-				 port->user_name);
+				attributes[0],
+				port->user_name);
 
 		r = ldap_search_s(ldap,
 						  port->hba->ldapbasedn,
@@ -2323,7 +2324,7 @@ CheckLDAPAuth(Port *port)
 				ereport(LOG,
 						(errmsg("LDAP search failed for filter \"%s\" on server \"%s\": user is not unique (%ld matches)",
 								filter, port->hba->ldapserver,
-								(long) ldap_count_entries(ldap, search_message))));
+						  (long) ldap_count_entries(ldap, search_message))));
 
 			pfree(filter);
 			ldap_msgfree(search_message);
@@ -2334,11 +2335,12 @@ CheckLDAPAuth(Port *port)
 		dn = ldap_get_dn(ldap, entry);
 		if (dn == NULL)
 		{
-			int error;
-			(void)ldap_get_option(ldap, LDAP_OPT_ERROR_NUMBER, &error);
+			int			error;
+
+			(void) ldap_get_option(ldap, LDAP_OPT_ERROR_NUMBER, &error);
 			ereport(LOG,
 					(errmsg("could not get dn for the first entry matching \"%s\" on server \"%s\": %s",
-							filter, port->hba->ldapserver, ldap_err2string(error))));
+					filter, port->hba->ldapserver, ldap_err2string(error))));
 			pfree(filter);
 			ldap_msgfree(search_message);
 			return STATUS_ERROR;
@@ -2353,18 +2355,19 @@ CheckLDAPAuth(Port *port)
 		r = ldap_unbind_s(ldap);
 		if (r != LDAP_SUCCESS)
 		{
-			int error;
-			(void)ldap_get_option(ldap, LDAP_OPT_ERROR_NUMBER, &error);
+			int			error;
+
+			(void) ldap_get_option(ldap, LDAP_OPT_ERROR_NUMBER, &error);
 			ereport(LOG,
 					(errmsg("could not unbind after searching for user \"%s\" on server \"%s\": %s",
-							fulluser, port->hba->ldapserver, ldap_err2string(error))));
+				  fulluser, port->hba->ldapserver, ldap_err2string(error))));
 			pfree(fulluser);
 			return STATUS_ERROR;
 		}
 
 		/*
-		 * Need to re-initialize the LDAP connection, so that we can bind
-		 * to it with a different username.
+		 * Need to re-initialize the LDAP connection, so that we can bind to
+		 * it with a different username.
 		 */
 		if (InitializeLDAPConnection(port, &ldap) == STATUS_ERROR)
 		{
@@ -2378,13 +2381,13 @@ CheckLDAPAuth(Port *port)
 	{
 		fulluser = palloc((port->hba->ldapprefix ? strlen(port->hba->ldapprefix) : 0) +
 						  strlen(port->user_name) +
-						  (port->hba->ldapsuffix ? strlen(port->hba->ldapsuffix) : 0) +
+				(port->hba->ldapsuffix ? strlen(port->hba->ldapsuffix) : 0) +
 						  1);
 
 		sprintf(fulluser, "%s%s%s",
-				 port->hba->ldapprefix ? port->hba->ldapprefix : "",
-				 port->user_name,
-				 port->hba->ldapsuffix ? port->hba->ldapsuffix : "");
+				port->hba->ldapprefix ? port->hba->ldapprefix : "",
+				port->user_name,
+				port->hba->ldapsuffix ? port->hba->ldapsuffix : "");
 	}
 
 	r = ldap_simple_bind_s(ldap, fulluser, passwd);
@@ -2429,7 +2432,6 @@ CheckCertAuth(Port *port)
 	/* Just pass the certificate CN to the usermap check */
 	return check_usermap(port->hba->usermap, port->user_name, port->peer_cn, false);
 }
-
 #endif
 
 
@@ -2448,17 +2450,17 @@ CheckCertAuth(Port *port)
 
 typedef struct
 {
-	uint8	attribute;
-	uint8	length;
-	uint8	data[1];
+	uint8		attribute;
+	uint8		length;
+	uint8		data[1];
 } radius_attribute;
 
 typedef struct
 {
-	uint8	code;
-	uint8	id;
-	uint16	length;
-	uint8	vector[RADIUS_VECTOR_LENGTH];
+	uint8		code;
+	uint8		id;
+	uint16		length;
+	uint8		vector[RADIUS_VECTOR_LENGTH];
 } radius_packet;
 
 /* RADIUS packet types */
@@ -2484,14 +2486,15 @@ typedef struct
 static void
 radius_add_attribute(radius_packet *packet, uint8 type, const unsigned char *data, int len)
 {
-	radius_attribute		*attr;
+	radius_attribute *attr;
 
 	if (packet->length + len > RADIUS_BUFFER_SIZE)
 	{
 		/*
-		 * With remotely realistic data, this can never happen. But catch it just to make
-		 * sure we don't overrun a buffer. We'll just skip adding the broken attribute,
-		 * which will in the end cause authentication to fail.
+		 * With remotely realistic data, this can never happen. But catch it
+		 * just to make sure we don't overrun a buffer. We'll just skip adding
+		 * the broken attribute, which will in the end cause authentication to
+		 * fail.
 		 */
 		elog(WARNING,
 			 "Adding attribute code %i with length %i to radius packet would create oversize packet, ignoring",
@@ -2500,9 +2503,9 @@ radius_add_attribute(radius_packet *packet, uint8 type, const unsigned char *dat
 
 	}
 
-	attr = (radius_attribute *) ((unsigned char *)packet + packet->length);
+	attr = (radius_attribute *) ((unsigned char *) packet + packet->length);
 	attr->attribute = type;
-	attr->length = len + 2; /* total size includes type and length */
+	attr->length = len + 2;		/* total size includes type and length */
 	memcpy(attr->data, data, len);
 	packet->length += attr->length;
 }
@@ -2510,31 +2513,33 @@ radius_add_attribute(radius_packet *packet, uint8 type, const unsigned char *dat
 static int
 CheckRADIUSAuth(Port *port)
 {
-	char			   *passwd;
-	char			   *identifier = "postgresql";
-	char				radius_buffer[RADIUS_BUFFER_SIZE];
-	char				receive_buffer[RADIUS_BUFFER_SIZE];
-	radius_packet	   *packet = (radius_packet *)radius_buffer;
-	radius_packet	   *receivepacket = (radius_packet *)receive_buffer;
-	int32				service = htonl(RADIUS_AUTHENTICATE_ONLY);
-	uint8			   *cryptvector;
-	uint8				encryptedpassword[RADIUS_VECTOR_LENGTH];
-	int					packetlength;
-	pgsocket			sock;
+	char	   *passwd;
+	char	   *identifier = "postgresql";
+	char		radius_buffer[RADIUS_BUFFER_SIZE];
+	char		receive_buffer[RADIUS_BUFFER_SIZE];
+	radius_packet *packet = (radius_packet *) radius_buffer;
+	radius_packet *receivepacket = (radius_packet *) receive_buffer;
+	int32		service = htonl(RADIUS_AUTHENTICATE_ONLY);
+	uint8	   *cryptvector;
+	uint8		encryptedpassword[RADIUS_VECTOR_LENGTH];
+	int			packetlength;
+	pgsocket	sock;
+
 #ifdef HAVE_IPV6
 	struct sockaddr_in6 localaddr;
 	struct sockaddr_in6 remoteaddr;
 #else
-	struct sockaddr_in	localaddr;
-	struct sockaddr_in	remoteaddr;
+	struct sockaddr_in localaddr;
+	struct sockaddr_in remoteaddr;
 #endif
-	struct addrinfo		hint;
-	struct addrinfo	   *serveraddrs;
-	char				portstr[128];
-	ACCEPT_TYPE_ARG3	addrsize;
-	fd_set				fdset;
-	struct timeval		timeout;
-	int					i,r;
+	struct addrinfo hint;
+	struct addrinfo *serveraddrs;
+	char		portstr[128];
+	ACCEPT_TYPE_ARG3 addrsize;
+	fd_set		fdset;
+	struct timeval timeout;
+	int			i,
+				r;
 
 	/* Make sure struct alignment is correct */
 	Assert(offsetof(radius_packet, vector) == 4);
@@ -2619,8 +2624,8 @@ CheckRADIUSAuth(Port *port)
 	radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier));
 
 	/*
-	 * RADIUS password attributes are calculated as:
-	 * e[0] = p[0] XOR MD5(secret + vector)
+	 * RADIUS password attributes are calculated as: e[0] = p[0] XOR
+	 * MD5(secret + vector)
 	 */
 	cryptvector = palloc(RADIUS_VECTOR_LENGTH + strlen(port->hba->radiussecret));
 	memcpy(cryptvector, port->hba->radiussecret, strlen(port->hba->radiussecret));
@@ -2668,7 +2673,7 @@ CheckRADIUSAuth(Port *port)
 	localaddr.sin_addr.s_addr = INADDR_ANY;
 	addrsize = sizeof(struct sockaddr_in);
 #endif
-	if (bind(sock, (struct sockaddr *) &localaddr, addrsize))
+	if (bind(sock, (struct sockaddr *) & localaddr, addrsize))
 	{
 		ereport(LOG,
 				(errmsg("could not bind local RADIUS socket: %m")));
@@ -2694,7 +2699,8 @@ CheckRADIUSAuth(Port *port)
 	timeout.tv_sec = RADIUS_TIMEOUT;
 	timeout.tv_usec = 0;
 	FD_ZERO(&fdset);
-	FD_SET(sock, &fdset);
+	FD_SET		(sock, &fdset);
+
 	while (true)
 	{
 		r = select(sock + 1, &fdset, NULL, NULL, &timeout);
@@ -2724,7 +2730,7 @@ CheckRADIUSAuth(Port *port)
 	/* Read the response packet */
 	addrsize = sizeof(remoteaddr);
 	packetlength = recvfrom(sock, receive_buffer, RADIUS_BUFFER_SIZE, 0,
-							(struct sockaddr *) &remoteaddr, &addrsize);
+							(struct sockaddr *) & remoteaddr, &addrsize);
 	if (packetlength < 0)
 	{
 		ereport(LOG,
@@ -2763,8 +2769,8 @@ CheckRADIUSAuth(Port *port)
 	if (packetlength != ntohs(receivepacket->length))
 	{
 		ereport(LOG,
-				(errmsg("RADIUS response has corrupt length: %i (actual length %i)",
-						ntohs(receivepacket->length), packetlength)));
+		 (errmsg("RADIUS response has corrupt length: %i (actual length %i)",
+				 ntohs(receivepacket->length), packetlength)));
 		return STATUS_ERROR;
 	}
 
@@ -2783,23 +2789,26 @@ CheckRADIUSAuth(Port *port)
 	cryptvector = palloc(packetlength + strlen(port->hba->radiussecret));
 
 	memcpy(cryptvector, receivepacket, 4);		/* code+id+length */
-	memcpy(cryptvector+4, packet->vector, RADIUS_VECTOR_LENGTH);	/* request authenticator, from original packet */
-	if (packetlength > RADIUS_HEADER_LENGTH)	/* there may be no attributes at all */
-		memcpy(cryptvector+RADIUS_HEADER_LENGTH, receive_buffer + RADIUS_HEADER_LENGTH, packetlength-RADIUS_HEADER_LENGTH);
-	memcpy(cryptvector+packetlength, port->hba->radiussecret, strlen(port->hba->radiussecret));
+	memcpy(cryptvector + 4, packet->vector, RADIUS_VECTOR_LENGTH);		/* request
+																		 * authenticator, from
+																		 * original packet */
+	if (packetlength > RADIUS_HEADER_LENGTH)	/* there may be no attributes
+												 * at all */
+		memcpy(cryptvector + RADIUS_HEADER_LENGTH, receive_buffer + RADIUS_HEADER_LENGTH, packetlength - RADIUS_HEADER_LENGTH);
+	memcpy(cryptvector + packetlength, port->hba->radiussecret, strlen(port->hba->radiussecret));
 
 	if (!pg_md5_binary(cryptvector,
 					   packetlength + strlen(port->hba->radiussecret),
 					   encryptedpassword))
 	{
 		ereport(LOG,
-				(errmsg("could not perform md5 encryption of received packet")));
+			(errmsg("could not perform md5 encryption of received packet")));
 		pfree(cryptvector);
 		return STATUS_ERROR;
 	}
 	pfree(cryptvector);
 
-	if (memcmp(receivepacket->vector, encryptedpassword, RADIUS_VECTOR_LENGTH)  != 0)
+	if (memcmp(receivepacket->vector, encryptedpassword, RADIUS_VECTOR_LENGTH) != 0)
 	{
 		ereport(LOG,
 				(errmsg("RADIUS response has incorrect MD5 signature")));
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c
index 2a317a6a9a427e34a588f5f1caaaec0d66628a9b..464183da783f3de534984d54099e76bd91f355e0 100644
--- a/src/backend/libpq/be-fsstubs.c
+++ b/src/backend/libpq/be-fsstubs.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.93 2010/01/02 16:57:45 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.94 2010/02/26 02:00:42 momjian Exp $
  *
  * NOTES
  *	  This should be moved to a more appropriate place.  It is here
@@ -55,7 +55,7 @@
 /*
  * compatibility flag for permission checks
  */
-bool lo_compat_privileges;
+bool		lo_compat_privileges;
 
 /*#define FSDB 1*/
 #define BUFSIZE			8192
@@ -167,7 +167,7 @@ lo_read(int fd, char *buf, int len)
 		pg_largeobject_aclcheck_snapshot(cookies[fd]->id,
 										 GetUserId(),
 										 ACL_SELECT,
-										 cookies[fd]->snapshot) != ACLCHECK_OK)
+									   cookies[fd]->snapshot) != ACLCHECK_OK)
 		ereport(ERROR,
 				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
 				 errmsg("permission denied for large object %u",
@@ -199,9 +199,9 @@ lo_write(int fd, const char *buf, int len)
 		pg_largeobject_aclcheck_snapshot(cookies[fd]->id,
 										 GetUserId(),
 										 ACL_UPDATE,
-										 cookies[fd]->snapshot) != ACLCHECK_OK)
+									   cookies[fd]->snapshot) != ACLCHECK_OK)
 		ereport(ERROR,
-                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
 				 errmsg("permission denied for large object %u",
 						cookies[fd]->id)));
 
@@ -522,7 +522,7 @@ lo_truncate(PG_FUNCTION_ARGS)
 		pg_largeobject_aclcheck_snapshot(cookies[fd]->id,
 										 GetUserId(),
 										 ACL_UPDATE,
-										 cookies[fd]->snapshot) != ACLCHECK_OK)
+									   cookies[fd]->snapshot) != ACLCHECK_OK)
 		ereport(ERROR,
 				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
 				 errmsg("permission denied for large object %u",
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index 6dac77bff7890221220826f1748879992ce3f8f1..19047bd148a2f8311d5206173911d686e51d2b9d 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -11,7 +11,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.98 2010/02/25 13:26:15 mha Exp $
+ *	  $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.99 2010/02/26 02:00:42 momjian Exp $
  *
  *	  Since the server static private key ($DataDir/server.key)
  *	  will normally be stored unencrypted so that the database
@@ -98,7 +98,7 @@ static const char *SSLerrmessage(void);
  *	(total in both directions) before we require renegotiation.
  *	Set to 0 to disable renegotiation completely.
  */
-int ssl_renegotiation_limit;
+int			ssl_renegotiation_limit;
 
 #ifdef USE_SSL
 static SSL_CTX *SSL_context = NULL;
@@ -973,8 +973,8 @@ aloop:
 		else
 		{
 			/*
-			 * Reject embedded NULLs in certificate common name to prevent attacks like
-			 * CVE-2009-4034.
+			 * Reject embedded NULLs in certificate common name to prevent
+			 * attacks like CVE-2009-4034.
 			 */
 			if (r != strlen(port->peer_cn))
 			{
diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c
index 56a20a674919bc2e0f4b36e9476841b835ad7faf..c956bf10a41d904770c1452575ee58d32901fdb2 100644
--- a/src/backend/libpq/crypt.c
+++ b/src/backend/libpq/crypt.c
@@ -9,7 +9,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/backend/libpq/crypt.c,v 1.80 2010/02/14 18:42:15 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/crypt.c,v 1.81 2010/02/26 02:00:42 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -41,8 +41,8 @@ md5_crypt_verify(const Port *port, const char *role, char *client_pass)
 	bool		isnull;
 
 	/*
-	 * Disable immediate interrupts while doing database access.  (Note
-	 * we don't bother to turn this back on if we hit one of the failure
+	 * Disable immediate interrupts while doing database access.  (Note we
+	 * don't bother to turn this back on if we hit one of the failure
 	 * conditions, since we can expect we'll just exit right away anyway.)
 	 */
 	ImmediateInterruptOK = false;
@@ -50,14 +50,14 @@ md5_crypt_verify(const Port *port, const char *role, char *client_pass)
 	/* Get role info from pg_authid */
 	roleTup = SearchSysCache1(AUTHNAME, PointerGetDatum(role));
 	if (!HeapTupleIsValid(roleTup))
-		return STATUS_ERROR;					/* no such user */
+		return STATUS_ERROR;	/* no such user */
 
 	datum = SysCacheGetAttr(AUTHNAME, roleTup,
 							Anum_pg_authid_rolpassword, &isnull);
 	if (isnull)
 	{
 		ReleaseSysCache(roleTup);
-		return STATUS_ERROR;					/* user has no password */
+		return STATUS_ERROR;	/* user has no password */
 	}
 	shadow_pass = TextDatumGetCString(datum);
 
@@ -69,7 +69,7 @@ md5_crypt_verify(const Port *port, const char *role, char *client_pass)
 	ReleaseSysCache(roleTup);
 
 	if (*shadow_pass == '\0')
-		return STATUS_ERROR;					/* empty password */
+		return STATUS_ERROR;	/* empty password */
 
 	/* Re-enable immediate response to SIGTERM/SIGINT/timeout interrupts */
 	ImmediateInterruptOK = true;
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index 94cff7cfd5762c4c1005df3e38bc95f6cd67154b..ae075ed939cf15a07240b2f3e265a34edc725ee0 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -10,7 +10,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.197 2010/02/02 19:09:37 mha Exp $
+ *	  $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.198 2010/02/26 02:00:43 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -56,8 +56,8 @@ static List *parsed_hba_lines = NIL;
 
 /*
  * These variables hold the pre-parsed contents of the ident usermap
- * configuration file.  ident_lines is a list of sublists, one sublist for
- * each (non-empty, non-comment) line of the file.  The sublist items are
+ * configuration file.	ident_lines is a list of sublists, one sublist for
+ * each (non-empty, non-comment) line of the file.	The sublist items are
  * palloc'd strings, one string per token on the line.  Note there will always
  * be at least one token, since blank lines are not entered in the data
  * structure.  ident_line_nums is an integer list containing the actual line
@@ -529,14 +529,14 @@ check_db(const char *dbname, const char *role, Oid roleid, char *param_str)
  * Check to see if a connecting IP matches the given address and netmask.
  */
 static bool
-check_ip(SockAddr *raddr, struct sockaddr *addr, struct sockaddr *mask)
+check_ip(SockAddr *raddr, struct sockaddr * addr, struct sockaddr * mask)
 {
 	if (raddr->addr.ss_family == addr->sa_family)
 	{
 		/* Same address family */
 		if (!pg_range_sockaddr(&raddr->addr,
-							   (struct sockaddr_storage*)addr,
-		                       (struct sockaddr_storage*)mask))
+							   (struct sockaddr_storage *) addr,
+							   (struct sockaddr_storage *) mask))
 			return false;
 	}
 #ifdef HAVE_IPV6
@@ -545,8 +545,8 @@ check_ip(SockAddr *raddr, struct sockaddr *addr, struct sockaddr *mask)
 	{
 		/*
 		 * If we're connected on IPv6 but the file specifies an IPv4 address
-		 * to match against, promote the latter to an IPv6 address
-		 * before trying to match the client's address.
+		 * to match against, promote the latter to an IPv6 address before
+		 * trying to match the client's address.
 		 */
 		struct sockaddr_storage addrcopy,
 					maskcopy;
@@ -573,7 +573,7 @@ check_ip(SockAddr *raddr, struct sockaddr *addr, struct sockaddr *mask)
  * pg_foreach_ifaddr callback: does client addr match this machine interface?
  */
 static void
-check_network_callback(struct sockaddr *addr, struct sockaddr *netmask,
+check_network_callback(struct sockaddr * addr, struct sockaddr * netmask,
 					   void *cb_data)
 {
 	check_network_data *cn = (check_network_data *) cb_data;
@@ -587,7 +587,7 @@ check_network_callback(struct sockaddr *addr, struct sockaddr *netmask,
 	{
 		/* Make an all-ones netmask of appropriate length for family */
 		pg_sockaddr_cidr_mask(&mask, NULL, addr->sa_family);
-		cn->result = check_ip(cn->raddr, addr, (struct sockaddr*) &mask);
+		cn->result = check_ip(cn->raddr, addr, (struct sockaddr *) & mask);
 	}
 	else
 	{
@@ -825,13 +825,13 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline)
 				if (pg_sockaddr_cidr_mask(&parsedline->mask, cidr_slash + 1,
 										  parsedline->addr.ss_family) < 0)
 				{
-					*cidr_slash = '/';		/* restore token for message */
+					*cidr_slash = '/';	/* restore token for message */
 					ereport(LOG,
 							(errcode(ERRCODE_CONFIG_FILE_ERROR),
 							 errmsg("invalid CIDR mask in address \"%s\"",
 									token),
-							 errcontext("line %d of configuration file \"%s\"",
-										line_num, HbaFileName)));
+						   errcontext("line %d of configuration file \"%s\"",
+									  line_num, HbaFileName)));
 					pfree(token);
 					return false;
 				}
@@ -846,9 +846,9 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline)
 				{
 					ereport(LOG,
 							(errcode(ERRCODE_CONFIG_FILE_ERROR),
-							 errmsg("end-of-line before netmask specification"),
-							 errcontext("line %d of configuration file \"%s\"",
-										line_num, HbaFileName)));
+						  errmsg("end-of-line before netmask specification"),
+						   errcontext("line %d of configuration file \"%s\"",
+									  line_num, HbaFileName)));
 					return false;
 				}
 				token = lfirst(line_item);
@@ -860,8 +860,8 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline)
 							(errcode(ERRCODE_CONFIG_FILE_ERROR),
 							 errmsg("invalid IP mask \"%s\": %s",
 									token, gai_strerror(ret)),
-							 errcontext("line %d of configuration file \"%s\"",
-										line_num, HbaFileName)));
+						   errcontext("line %d of configuration file \"%s\"",
+									  line_num, HbaFileName)));
 					if (gai_result)
 						pg_freeaddrinfo_all(hints.ai_family, gai_result);
 					return false;
@@ -952,7 +952,7 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline)
 #else
 		unsupauth = "cert";
 #endif
-	else if (strcmp(token, "radius")== 0)
+	else if (strcmp(token, "radius") == 0)
 		parsedline->auth_method = uaRADIUS;
 	else
 	{
@@ -1234,8 +1234,8 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline)
 
 		/*
 		 * LDAP can operate in two modes: either with a direct bind, using
-		 * ldapprefix and ldapsuffix, or using a search+bind,
-		 * using ldapbasedn, ldapbinddn, ldapbindpasswd and ldapsearchattribute.
+		 * ldapprefix and ldapsuffix, or using a search+bind, using
+		 * ldapbasedn, ldapbinddn, ldapbindpasswd and ldapsearchattribute.
 		 * Disallow mixing these parameters.
 		 */
 		if (parsedline->ldapprefix || parsedline->ldapsuffix)
@@ -1336,8 +1336,8 @@ check_hba(hbaPort *port)
 			{
 				case ipCmpMask:
 					if (!check_ip(&port->raddr,
-								  (struct sockaddr *) &hba->addr,
-								  (struct sockaddr *) &hba->mask))
+								  (struct sockaddr *) & hba->addr,
+								  (struct sockaddr *) & hba->mask))
 						continue;
 					break;
 				case ipCmpSameHost:
diff --git a/src/backend/libpq/ip.c b/src/backend/libpq/ip.c
index 778b9f9ea4a58fc5ad44f84d49b0aa13d4ba473d..7c17210cbe653b1945fb1be0c1d7e9516d123100 100644
--- a/src/backend/libpq/ip.c
+++ b/src/backend/libpq/ip.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.50 2010/01/10 14:16:07 mha Exp $
+ *	  $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.51 2010/02/26 02:00:43 momjian Exp $
  *
  * This file and the IPV6 implementation were initially provided by
  * Nigel Kukard <nkukard@lbsd.net>, Linux Based Systems Design
@@ -482,7 +482,6 @@ pg_promote_v4_to_v6_mask(struct sockaddr_storage * addr)
 
 	memcpy(addr, &addr6, sizeof(addr6));
 }
-
 #endif   /* HAVE_IPV6 */
 
 
@@ -492,7 +491,7 @@ pg_promote_v4_to_v6_mask(struct sockaddr_storage * addr)
  */
 static void
 run_ifaddr_callback(PgIfAddrCallback callback, void *cb_data,
-					struct sockaddr *addr, struct sockaddr *mask)
+					struct sockaddr * addr, struct sockaddr * mask)
 {
 	struct sockaddr_storage fullmask;
 
@@ -508,13 +507,13 @@ run_ifaddr_callback(PgIfAddrCallback callback, void *cb_data,
 		}
 		else if (mask->sa_family == AF_INET)
 		{
-			if (((struct sockaddr_in*)mask)->sin_addr.s_addr == INADDR_ANY)
+			if (((struct sockaddr_in *) mask)->sin_addr.s_addr == INADDR_ANY)
 				mask = NULL;
 		}
 #ifdef HAVE_IPV6
 		else if (mask->sa_family == AF_INET6)
 		{
-			if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6*)mask)->sin6_addr))
+			if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *) mask)->sin6_addr))
 				mask = NULL;
 		}
 #endif
@@ -524,7 +523,7 @@ run_ifaddr_callback(PgIfAddrCallback callback, void *cb_data,
 	if (!mask)
 	{
 		pg_sockaddr_cidr_mask(&fullmask, NULL, addr->sa_family);
-		mask = (struct sockaddr*) &fullmask;
+		mask = (struct sockaddr *) & fullmask;
 	}
 
 	(*callback) (addr, mask, cb_data);
@@ -544,11 +543,13 @@ run_ifaddr_callback(PgIfAddrCallback callback, void *cb_data,
 int
 pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 {
-	INTERFACE_INFO *ptr, *ii = NULL;
-	unsigned long length, i;
+	INTERFACE_INFO *ptr,
+			   *ii = NULL;
+	unsigned long length,
+				i;
 	unsigned long n_ii = 0;
-	SOCKET sock;
-	int error;
+	SOCKET		sock;
+	int			error;
 
 	sock = WSASocket(AF_INET, SOCK_DGRAM, 0, 0, 0, 0);
 	if (sock == SOCKET_ERROR)
@@ -557,7 +558,7 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 	while (n_ii < 1024)
 	{
 		n_ii += 64;
-		ptr = realloc(ii, sizeof (INTERFACE_INFO) * n_ii);
+		ptr = realloc(ii, sizeof(INTERFACE_INFO) * n_ii);
 		if (!ptr)
 		{
 			free(ii);
@@ -568,8 +569,8 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 
 		ii = ptr;
 		if (WSAIoctl(sock, SIO_GET_INTERFACE_LIST, 0, 0,
-		             ii, n_ii * sizeof (INTERFACE_INFO),
-		             &length, 0, 0) == SOCKET_ERROR)
+					 ii, n_ii * sizeof(INTERFACE_INFO),
+					 &length, 0, 0) == SOCKET_ERROR)
 		{
 			error = WSAGetLastError();
 			if (error == WSAEFAULT || error == WSAENOBUFS)
@@ -584,15 +585,14 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 
 	for (i = 0; i < length / sizeof(INTERFACE_INFO); ++i)
 		run_ifaddr_callback(callback, cb_data,
-		                    (struct sockaddr*)&ii[i].iiAddress,
-		                    (struct sockaddr*)&ii[i].iiNetmask);
+							(struct sockaddr *) & ii[i].iiAddress,
+							(struct sockaddr *) & ii[i].iiNetmask);
 
 	closesocket(sock);
 	free(ii);
 	return 0;
 }
-
-#elif HAVE_GETIFADDRS /* && !WIN32 */
+#elif HAVE_GETIFADDRS			/* && !WIN32 */
 
 #ifdef HAVE_IFADDRS_H
 #include <ifaddrs.h>
@@ -608,20 +608,20 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 int
 pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 {
-	struct ifaddrs *ifa, *l;
+	struct ifaddrs *ifa,
+			   *l;
 
 	if (getifaddrs(&ifa) < 0)
 		return -1;
 
 	for (l = ifa; l; l = l->ifa_next)
 		run_ifaddr_callback(callback, cb_data,
-		                    l->ifa_addr, l->ifa_netmask);
+							l->ifa_addr, l->ifa_netmask);
 
 	freeifaddrs(ifa);
 	return 0;
 }
-
-#else /* !HAVE_GETIFADDRS && !WIN32 */
+#else							/* !HAVE_GETIFADDRS && !WIN32 */
 
 #ifdef HAVE_SYS_IOCTL_H
 #include <sys/ioctl.h>
@@ -652,15 +652,21 @@ int
 pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 {
 	struct lifconf lifc;
-	struct lifreq *lifr, lmask;
-	struct sockaddr *addr, *mask;
-	char *ptr, *buffer = NULL;
-	size_t n_buffer = 1024;
-	pgsocket sock, fd;
+	struct lifreq *lifr,
+				lmask;
+	struct sockaddr *addr,
+			   *mask;
+	char	   *ptr,
+			   *buffer = NULL;
+	size_t		n_buffer = 1024;
+	pgsocket	sock,
+				fd;
+
 #ifdef HAVE_IPV6
-	pgsocket sock6;
+	pgsocket	sock6;
 #endif
-	int i, total;
+	int			i,
+				total;
 
 	sock = socket(AF_INET, SOCK_DGRAM, 0);
 	if (sock == -1)
@@ -678,7 +684,7 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 			return -1;
 		}
 
-		memset(&lifc, 0, sizeof (lifc));
+		memset(&lifc, 0, sizeof(lifc));
 		lifc.lifc_family = AF_UNSPEC;
 		lifc.lifc_buf = buffer = ptr;
 		lifc.lifc_len = n_buffer;
@@ -693,9 +699,9 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 		}
 
 		/*
-		 * Some Unixes try to return as much data as possible,
-		 * with no indication of whether enough space allocated.
-		 * Don't believe we have it all unless there's lots of slop.
+		 * Some Unixes try to return as much data as possible, with no
+		 * indication of whether enough space allocated. Don't believe we have
+		 * it all unless there's lots of slop.
 		 */
 		if (lifc.lifc_len < n_buffer - 1024)
 			break;
@@ -716,7 +722,7 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 	lifr = lifc.lifc_req;
 	for (i = 0; i < total; ++i)
 	{
-		addr = (struct sockaddr*)&lifr[i].lifr_addr;
+		addr = (struct sockaddr *) & lifr[i].lifr_addr;
 		memcpy(&lmask, &lifr[i], sizeof(struct lifreq));
 #ifdef HAVE_IPV6
 		fd = (addr->sa_family == AF_INET6) ? sock6 : sock;
@@ -726,7 +732,7 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 		if (ioctl(fd, SIOCGLIFNETMASK, &lmask) < 0)
 			mask = NULL;
 		else
-			mask = (struct sockaddr*)&lmask.lifr_addr;
+			mask = (struct sockaddr *) & lmask.lifr_addr;
 		run_ifaddr_callback(callback, cb_data, addr, mask);
 	}
 
@@ -737,7 +743,6 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 #endif
 	return 0;
 }
-
 #elif defined(SIOCGIFCONF)
 
 /*
@@ -754,17 +759,16 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 /* Calculate based on sockaddr.sa_len */
 #ifdef HAVE_STRUCT_SOCKADDR_SA_LEN
 #define _SIZEOF_ADDR_IFREQ(ifr) \
-        ((ifr).ifr_addr.sa_len > sizeof(struct sockaddr) ? \
-         (sizeof(struct ifreq) - sizeof(struct sockaddr) + \
-          (ifr).ifr_addr.sa_len) : sizeof(struct ifreq))
+		((ifr).ifr_addr.sa_len > sizeof(struct sockaddr) ? \
+		 (sizeof(struct ifreq) - sizeof(struct sockaddr) + \
+		  (ifr).ifr_addr.sa_len) : sizeof(struct ifreq))
 
 /* Padded ifreq structure, simple */
 #else
 #define _SIZEOF_ADDR_IFREQ(ifr) \
 	sizeof (struct ifreq)
 #endif
-
-#endif /* !_SIZEOF_ADDR_IFREQ */
+#endif   /* !_SIZEOF_ADDR_IFREQ */
 
 /*
  * Enumerate the system's network interface addresses and call the callback
@@ -776,10 +780,14 @@ int
 pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 {
 	struct ifconf ifc;
-	struct ifreq *ifr, *end, addr, mask;
-	char *ptr, *buffer = NULL;
-	size_t n_buffer = 1024;
-	int sock;
+	struct ifreq *ifr,
+			   *end,
+				addr,
+				mask;
+	char	   *ptr,
+			   *buffer = NULL;
+	size_t		n_buffer = 1024;
+	int			sock;
 
 	sock = socket(AF_INET, SOCK_DGRAM, 0);
 	if (sock == -1)
@@ -797,7 +805,7 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 			return -1;
 		}
 
-		memset(&ifc, 0, sizeof (ifc));
+		memset(&ifc, 0, sizeof(ifc));
 		ifc.ifc_buf = buffer = ptr;
 		ifc.ifc_len = n_buffer;
 
@@ -811,32 +819,31 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 		}
 
 		/*
-		 * Some Unixes try to return as much data as possible,
-		 * with no indication of whether enough space allocated.
-		 * Don't believe we have it all unless there's lots of slop.
+		 * Some Unixes try to return as much data as possible, with no
+		 * indication of whether enough space allocated. Don't believe we have
+		 * it all unless there's lots of slop.
 		 */
 		if (ifc.ifc_len < n_buffer - 1024)
 			break;
 	}
 
-	end = (struct ifreq*)(buffer + ifc.ifc_len);
+	end = (struct ifreq *) (buffer + ifc.ifc_len);
 	for (ifr = ifc.ifc_req; ifr < end;)
 	{
 		memcpy(&addr, ifr, sizeof(addr));
 		memcpy(&mask, ifr, sizeof(mask));
 		if (ioctl(sock, SIOCGIFADDR, &addr, sizeof(addr)) == 0 &&
-		    ioctl(sock, SIOCGIFNETMASK, &mask, sizeof(mask)) == 0)
+			ioctl(sock, SIOCGIFNETMASK, &mask, sizeof(mask)) == 0)
 			run_ifaddr_callback(callback, cb_data,
-			                    &addr.ifr_addr, &mask.ifr_addr);
-		ifr = (struct ifreq*)((char*)ifr + _SIZEOF_ADDR_IFREQ(*ifr));
+								&addr.ifr_addr, &mask.ifr_addr);
+		ifr = (struct ifreq *) ((char *) ifr + _SIZEOF_ADDR_IFREQ(*ifr));
 	}
 
 	free(buffer);
 	close(sock);
 	return 0;
 }
-
-#else /* !defined(SIOCGIFCONF) */
+#else							/* !defined(SIOCGIFCONF) */
 
 /*
  * Enumerate the system's network interface addresses and call the callback
@@ -850,6 +857,7 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 {
 	struct sockaddr_in addr;
 	struct sockaddr_storage mask;
+
 #ifdef HAVE_IPV6
 	struct sockaddr_in6 addr6;
 #endif
@@ -861,8 +869,8 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 	memset(&mask, 0, sizeof(mask));
 	pg_sockaddr_cidr_mask(&mask, "8", AF_INET);
 	run_ifaddr_callback(callback, cb_data,
-	                    (struct sockaddr*)&addr,
-	                    (struct sockaddr*)&mask);
+						(struct sockaddr *) & addr,
+						(struct sockaddr *) & mask);
 
 #ifdef HAVE_IPV6
 	/* addr ::1/128 */
@@ -872,13 +880,12 @@ pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data)
 	memset(&mask, 0, sizeof(mask));
 	pg_sockaddr_cidr_mask(&mask, "128", AF_INET6);
 	run_ifaddr_callback(callback, cb_data,
-	                    (struct sockaddr*)&addr6,
-	                    (struct sockaddr*)&mask);
+						(struct sockaddr *) & addr6,
+						(struct sockaddr *) & mask);
 #endif
 
 	return 0;
 }
+#endif   /* !defined(SIOCGIFCONF) */
 
-#endif /* !defined(SIOCGIFCONF) */
-
-#endif /* !HAVE_GETIFADDRS */
+#endif   /* !HAVE_GETIFADDRS */
diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c
index e875f51ed72cc7af493f0c17074472b0ac34bd52..ff4f9eaebf17a553cd050aaaad5c6cb3ac5ba179 100644
--- a/src/backend/libpq/md5.c
+++ b/src/backend/libpq/md5.c
@@ -14,7 +14,7 @@
  *	Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/libpq/md5.c,v 1.39 2010/01/27 12:11:59 mha Exp $
+ *	  $PostgreSQL: pgsql/src/backend/libpq/md5.c,v 1.40 2010/02/26 02:00:43 momjian Exp $
  */
 
 /* This is intended to be used in both frontend and backend, so use c.h */
@@ -298,7 +298,8 @@ pg_md5_hash(const void *buff, size_t len, char *hexsum)
 	return true;
 }
 
-bool pg_md5_binary(const void *buff, size_t len, void *outbuf)
+bool
+pg_md5_binary(const void *buff, size_t len, void *outbuf)
 {
 	if (!calculateDigestFromBuffer((uint8 *) buff, len, outbuf))
 		return false;
@@ -320,6 +321,7 @@ pg_md5_encrypt(const char *passwd, const char *salt, size_t salt_len,
 			   char *buf)
 {
 	size_t		passwd_len = strlen(passwd);
+
 	/* +1 here is just to avoid risk of unportable malloc(0) */
 	char	   *crypt_buf = malloc(passwd_len + salt_len + 1);
 	bool		ret;
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 98b27f1f71acf671e18c890b92d935b2b143afc8..5826aacaf77781765b486b9ec01fa96dcce0f10a 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -30,7 +30,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- *	$PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.204 2010/02/18 11:13:45 heikki Exp $
+ *	$PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.205 2010/02/26 02:00:43 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -55,7 +55,7 @@
  *		pq_peekbyte		- peek at next byte from connection
  *		pq_putbytes		- send bytes to connection (not flushed until pq_flush)
  *		pq_flush		- flush pending output
- *		pq_getbyte_if_available	- get a byte if available without blocking
+ *		pq_getbyte_if_available - get a byte if available without blocking
  *
  * message-level I/O (and old-style-COPY-OUT cruft):
  *		pq_putmessage	- send a normal message (suppressed in COPY OUT mode)
@@ -200,7 +200,8 @@ pq_close(int code, Datum arg)
 		 * transport layer reports connection closure, and you can be sure the
 		 * backend has exited.
 		 *
-		 * We do set sock to PGINVALID_SOCKET to prevent any further I/O, though.
+		 * We do set sock to PGINVALID_SOCKET to prevent any further I/O,
+		 * though.
 		 */
 		MyProcPort->sock = PGINVALID_SOCKET;
 	}
@@ -818,7 +819,7 @@ pq_peekbyte(void)
 
 
 /* --------------------------------
- *		pq_getbyte_if_available	- get a single byte from connection,
+ *		pq_getbyte_if_available - get a single byte from connection,
  *			if available
  *
  * The received byte is stored in *c. Returns 1 if a byte was read,
@@ -828,7 +829,7 @@ pq_peekbyte(void)
 int
 pq_getbyte_if_available(unsigned char *c)
 {
-	int r;
+	int			r;
 
 	if (PqRecvPointer < PqRecvLength)
 	{
@@ -851,18 +852,19 @@ pq_getbyte_if_available(unsigned char *c)
 		if (r < 0)
 		{
 			/*
-			 * Ok if no data available without blocking or interrupted
-			 * (though EINTR really shouldn't happen with a non-blocking
-			 * socket). Report other errors.
+			 * Ok if no data available without blocking or interrupted (though
+			 * EINTR really shouldn't happen with a non-blocking socket).
+			 * Report other errors.
 			 */
 			if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
 				r = 0;
 			else
 			{
 				/*
-				 * Careful: an ereport() that tries to write to the client would
-				 * cause recursion to here, leading to stack overflow and core
-				 * dump!  This message must go *only* to the postmaster log.
+				 * Careful: an ereport() that tries to write to the client
+				 * would cause recursion to here, leading to stack overflow
+				 * and core dump!  This message must go *only* to the
+				 * postmaster log.
 				 */
 				ereport(COMMERROR,
 						(errcode_for_socket_access(),
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 22d24ef21a5817bd06a99486af208a54f38579f0..829dc7ba8d27ef303d3d08155c87bebdaf021038 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -15,7 +15,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.463 2010/02/23 22:51:42 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.464 2010/02/26 02:00:43 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -160,7 +160,7 @@ _copyResult(Result *from)
 static ModifyTable *
 _copyModifyTable(ModifyTable *from)
 {
-	ModifyTable    *newnode = makeNode(ModifyTable);
+	ModifyTable *newnode = makeNode(ModifyTable);
 
 	/*
 	 * copy node superclass fields
@@ -806,7 +806,7 @@ _copySetOp(SetOp *from)
 static LockRows *
 _copyLockRows(LockRows *from)
 {
-	LockRows	   *newnode = makeNode(LockRows);
+	LockRows   *newnode = makeNode(LockRows);
 
 	/*
 	 * copy node superclass fields
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 9a05bcc1c0f1515e4652ac5c47a6345711cb3deb..e97a3ea9daaaf5c98c4a5a302b4932e2e63a6847 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -22,7 +22,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.384 2010/02/23 22:51:42 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.385 2010/02/26 02:00:43 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1572,7 +1572,7 @@ _equalDropTableSpaceStmt(DropTableSpaceStmt *a, DropTableSpaceStmt *b)
 
 static bool
 _equalAlterTableSpaceOptionsStmt(AlterTableSpaceOptionsStmt *a,
-											 AlterTableSpaceOptionsStmt *b)
+								 AlterTableSpaceOptionsStmt *b)
 {
 	COMPARE_STRING_FIELD(tablespacename);
 	COMPARE_NODE_FIELD(options);
diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c
index ef17a9bb32184b445c9b36df3cd782099bd0c7b7..a4deebf896b942d188b6a81a577f504b43c813cb 100644
--- a/src/backend/nodes/params.c
+++ b/src/backend/nodes/params.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/nodes/params.c,v 1.14 2010/01/15 22:36:31 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/nodes/params.c,v 1.15 2010/02/26 02:00:43 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -28,7 +28,7 @@
  *
  * Note: the intent of this function is to make a static, self-contained
  * set of parameter values.  If dynamic parameter hooks are present, we
- * intentionally do not copy them into the result.  Rather, we forcibly
+ * intentionally do not copy them into the result.	Rather, we forcibly
  * instantiate all available parameter values and copy the datum values.
  */
 ParamListInfo
@@ -61,7 +61,7 @@ copyParamList(ParamListInfo from)
 
 		/* give hook a chance in case parameter is dynamic */
 		if (!OidIsValid(oprm->ptype) && from->paramFetch != NULL)
-			(*from->paramFetch) (from, i+1);
+			(*from->paramFetch) (from, i + 1);
 
 		/* flat-copy the parameter info */
 		*nprm = *oprm;
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index f614e056319aacc24f8a48b3aa13a0c59ee11fcb..353ed1aa1ac2979fd5225669b661620a9d0e6d26 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.92 2010/01/02 16:57:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.93 2010/02/26 02:00:43 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -40,7 +40,7 @@ typedef struct
 } Clump;
 
 static List *merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump,
-						 bool force);
+			bool force);
 static bool desirable_join(PlannerInfo *root,
 			   RelOptInfo *outer_rel, RelOptInfo *inner_rel);
 
@@ -156,14 +156,14 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene)
 	/*
 	 * Sometimes, a relation can't yet be joined to others due to heuristics
 	 * or actual semantic restrictions.  We maintain a list of "clumps" of
-	 * successfully joined relations, with larger clumps at the front.
-	 * Each new relation from the tour is added to the first clump it can
-	 * be joined to; if there is none then it becomes a new clump of its own.
-	 * When we enlarge an existing clump we check to see if it can now be
-	 * merged with any other clumps.  After the tour is all scanned, we
-	 * forget about the heuristics and try to forcibly join any remaining
-	 * clumps.  Some forced joins might still fail due to semantics, but
-	 * we should always be able to find some join order that works.
+	 * successfully joined relations, with larger clumps at the front. Each
+	 * new relation from the tour is added to the first clump it can be joined
+	 * to; if there is none then it becomes a new clump of its own. When we
+	 * enlarge an existing clump we check to see if it can now be merged with
+	 * any other clumps.  After the tour is all scanned, we forget about the
+	 * heuristics and try to forcibly join any remaining clumps.  Some forced
+	 * joins might still fail due to semantics, but we should always be able
+	 * to find some join order that works.
 	 */
 	clumps = NIL;
 
@@ -214,7 +214,7 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene)
  * Merge a "clump" into the list of existing clumps for gimme_tree.
  *
  * We try to merge the clump into some existing clump, and repeat if
- * successful.  When no more merging is possible, insert the clump
+ * successful.	When no more merging is possible, insert the clump
  * into the list, preserving the list ordering rule (namely, that
  * clumps of larger size appear earlier).
  *
@@ -265,7 +265,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force)
 
 				/*
 				 * Recursively try to merge the enlarged old_clump with
-				 * others.  When no further merge is possible, we'll reinsert
+				 * others.	When no further merge is possible, we'll reinsert
 				 * it into the list.
 				 */
 				return merge_clump(root, clumps, old_clump, force);
@@ -276,7 +276,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force)
 
 	/*
 	 * No merging is possible, so add new_clump as an independent clump, in
-	 * proper order according to size.  We can be fast for the common case
+	 * proper order according to size.	We can be fast for the common case
 	 * where it has size 1 --- it should always go at the end.
 	 */
 	if (clumps == NIL || new_clump->size == 1)
diff --git a/src/backend/optimizer/geqo/geqo_random.c b/src/backend/optimizer/geqo/geqo_random.c
index 15446541ee73df943ceba18cf6494dc13da2dbe7..9ff5b40ecd284b3513a69f2e51f492c9abe02af5 100644
--- a/src/backend/optimizer/geqo/geqo_random.c
+++ b/src/backend/optimizer/geqo/geqo_random.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_random.c,v 1.2 2010/01/02 16:57:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_random.c,v 1.3 2010/02/26 02:00:44 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -22,8 +22,8 @@ geqo_set_seed(PlannerInfo *root, double seed)
 	GeqoPrivateData *private = (GeqoPrivateData *) root->join_search_private;
 
 	/*
-	 * XXX. This seeding algorithm could certainly be improved - but
-	 * it is not critical to do so.
+	 * XXX. This seeding algorithm could certainly be improved - but it is not
+	 * critical to do so.
 	 */
 	memset(private->random_state, 0, sizeof(private->random_state));
 	memcpy(private->random_state,
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 8437a8a638da192a37f76b649735e8b2974cf49a..52f26d255d951f9fb2643e52061d4040610b5ae7 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.192 2010/01/02 16:57:46 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.193 2010/02/26 02:00:44 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -347,11 +347,11 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
 		 * can disregard this child.
 		 *
 		 * As of 8.4, the child rel's targetlist might contain non-Var
-		 * expressions, which means that substitution into the quals
-		 * could produce opportunities for const-simplification, and perhaps
-		 * even pseudoconstant quals.  To deal with this, we strip the
-		 * RestrictInfo nodes, do the substitution, do const-simplification,
-		 * and then reconstitute the RestrictInfo layer.
+		 * expressions, which means that substitution into the quals could
+		 * produce opportunities for const-simplification, and perhaps even
+		 * pseudoconstant quals.  To deal with this, we strip the RestrictInfo
+		 * nodes, do the substitution, do const-simplification, and then
+		 * reconstitute the RestrictInfo layer.
 		 */
 		childquals = get_all_actual_clauses(rel->baserestrictinfo);
 		childquals = (List *) adjust_appendrel_attrs((Node *) childquals,
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 98d60c5ce06a327c00d61bc4cc96afe95c4e91e2..355db7f6844da846564c16ca4ca586691021f645 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -59,7 +59,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.215 2010/02/19 21:49:10 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.216 2010/02/26 02:00:44 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -635,11 +635,11 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
 		pages_fetched = ceil(pages_fetched);
 
 	/*
-	 * For small numbers of pages we should charge spc_random_page_cost apiece,
-	 * while if nearly all the table's pages are being read, it's more
-	 * appropriate to charge spc_seq_page_cost apiece.	The effect is nonlinear,
-	 * too. For lack of a better idea, interpolate like this to determine the
-	 * cost per page.
+	 * For small numbers of pages we should charge spc_random_page_cost
+	 * apiece, while if nearly all the table's pages are being read, it's more
+	 * appropriate to charge spc_seq_page_cost apiece.	The effect is
+	 * nonlinear, too. For lack of a better idea, interpolate like this to
+	 * determine the cost per page.
 	 */
 	if (pages_fetched >= 2.0)
 		cost_per_page = spc_random_page_cost -
@@ -936,13 +936,13 @@ cost_functionscan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
 	 *
 	 * Currently, nodeFunctionscan.c always executes the function to
 	 * completion before returning any rows, and caches the results in a
-	 * tuplestore.  So the function eval cost is all startup cost, and
-	 * per-row costs are minimal.
+	 * tuplestore.	So the function eval cost is all startup cost, and per-row
+	 * costs are minimal.
 	 *
 	 * XXX in principle we ought to charge tuplestore spill costs if the
 	 * number of rows is large.  However, given how phony our rowcount
-	 * estimates for functions tend to be, there's not a lot of point
-	 * in that refinement right now.
+	 * estimates for functions tend to be, there's not a lot of point in that
+	 * refinement right now.
 	 */
 	cost_qual_eval_node(&exprcost, rte->funcexpr, root);
 
@@ -1230,7 +1230,7 @@ cost_material(Path *path,
 	 * if it is exactly the same then there will be a cost tie between
 	 * nestloop with A outer, materialized B inner and nestloop with B outer,
 	 * materialized A inner.  The extra cost ensures we'll prefer
-	 * materializing the smaller rel.)  Note that this is normally a good deal
+	 * materializing the smaller rel.)	Note that this is normally a good deal
 	 * less than cpu_tuple_cost; which is OK because a Material plan node
 	 * doesn't do qual-checking or projection, so it's got less overhead than
 	 * most plan nodes.
@@ -1526,9 +1526,10 @@ cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
 		{
 			run_cost += (outer_path_rows - outer_matched_rows) *
 				inner_rescan_run_cost / inner_path_rows;
+
 			/*
-			 * We won't be evaluating any quals at all for these rows,
-			 * so don't add them to ntuples.
+			 * We won't be evaluating any quals at all for these rows, so
+			 * don't add them to ntuples.
 			 */
 		}
 		else
@@ -1568,10 +1569,10 @@ cost_nestloop(NestPath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
  * Unlike other costsize functions, this routine makes one actual decision:
  * whether we should materialize the inner path.  We do that either because
  * the inner path can't support mark/restore, or because it's cheaper to
- * use an interposed Material node to handle mark/restore.  When the decision
+ * use an interposed Material node to handle mark/restore.	When the decision
  * is cost-based it would be logically cleaner to build and cost two separate
  * paths with and without that flag set; but that would require repeating most
- * of the calculations here, which are not all that cheap.  Since the choice
+ * of the calculations here, which are not all that cheap.	Since the choice
  * will not affect output pathkeys or startup cost, only total cost, there is
  * no possibility of wanting to keep both paths.  So it seems best to make
  * the decision here and record it in the path's materialize_inner field.
@@ -1826,14 +1827,15 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
 
 	/*
 	 * Decide whether we want to materialize the inner input to shield it from
-	 * mark/restore and performing re-fetches.  Our cost model for regular
+	 * mark/restore and performing re-fetches.	Our cost model for regular
 	 * re-fetches is that a re-fetch costs the same as an original fetch,
 	 * which is probably an overestimate; but on the other hand we ignore the
 	 * bookkeeping costs of mark/restore.  Not clear if it's worth developing
-	 * a more refined model.  So we just need to inflate the inner run cost
-	 * by rescanratio.
+	 * a more refined model.  So we just need to inflate the inner run cost by
+	 * rescanratio.
 	 */
 	bare_inner_cost = inner_run_cost * rescanratio;
+
 	/*
 	 * When we interpose a Material node the re-fetch cost is assumed to be
 	 * just cpu_operator_cost per tuple, independently of the underlying
@@ -1842,7 +1844,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
 	 * never spill to disk, since it only has to remember tuples back to the
 	 * last mark.  (If there are a huge number of duplicates, our other cost
 	 * factors will make the path so expensive that it probably won't get
-	 * chosen anyway.)  So we don't use cost_rescan here.
+	 * chosen anyway.)	So we don't use cost_rescan here.
 	 *
 	 * Note: keep this estimate in sync with create_mergejoin_plan's labeling
 	 * of the generated Material node.
@@ -1853,6 +1855,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
 	/* Prefer materializing if it looks cheaper */
 	if (mat_inner_cost < bare_inner_cost)
 		path->materialize_inner = true;
+
 	/*
 	 * Even if materializing doesn't look cheaper, we *must* do it if the
 	 * inner path is to be used directly (without sorting) and it doesn't
@@ -1868,6 +1871,7 @@ cost_mergejoin(MergePath *path, PlannerInfo *root, SpecialJoinInfo *sjinfo)
 	else if (innersortkeys == NIL &&
 			 !ExecSupportsMarkRestore(inner_path->pathtype))
 		path->materialize_inner = true;
+
 	/*
 	 * Also, force materializing if the inner path is to be sorted and the
 	 * sort is expected to spill to disk.  This is because the final merge
@@ -2323,10 +2327,10 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
 /*
  * cost_rescan
  *		Given a finished Path, estimate the costs of rescanning it after
- *		having done so the first time.  For some Path types a rescan is
+ *		having done so the first time.	For some Path types a rescan is
  *		cheaper than an original scan (if no parameters change), and this
  *		function embodies knowledge about that.  The default is to return
- *		the same costs stored in the Path.  (Note that the cost estimates
+ *		the same costs stored in the Path.	(Note that the cost estimates
  *		actually stored in Paths are always for first scans.)
  *
  * This function is not currently intended to model effects such as rescans
@@ -2336,23 +2340,25 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
  */
 static void
 cost_rescan(PlannerInfo *root, Path *path,
-			Cost *rescan_startup_cost,		/* output parameters */
+			Cost *rescan_startup_cost,	/* output parameters */
 			Cost *rescan_total_cost)
 {
 	switch (path->pathtype)
 	{
 		case T_FunctionScan:
+
 			/*
-			 * Currently, nodeFunctionscan.c always executes the function
-			 * to completion before returning any rows, and caches the
-			 * results in a tuplestore.  So the function eval cost is
-			 * all startup cost and isn't paid over again on rescans.
-			 * However, all run costs will be paid over again.
+			 * Currently, nodeFunctionscan.c always executes the function to
+			 * completion before returning any rows, and caches the results in
+			 * a tuplestore.  So the function eval cost is all startup cost
+			 * and isn't paid over again on rescans. However, all run costs
+			 * will be paid over again.
 			 */
 			*rescan_startup_cost = 0;
 			*rescan_total_cost = path->total_cost - path->startup_cost;
 			break;
 		case T_HashJoin:
+
 			/*
 			 * Assume that all of the startup cost represents hash table
 			 * building, which we won't have to do over.
@@ -2365,14 +2371,14 @@ cost_rescan(PlannerInfo *root, Path *path,
 			{
 				/*
 				 * These plan types materialize their final result in a
-				 * tuplestore or tuplesort object.  So the rescan cost is only
+				 * tuplestore or tuplesort object.	So the rescan cost is only
 				 * cpu_tuple_cost per tuple, unless the result is large enough
 				 * to spill to disk.
 				 */
-				Cost	run_cost = cpu_tuple_cost * path->parent->rows;
-				double	nbytes = relation_byte_size(path->parent->rows,
-													path->parent->width);
-				long	work_mem_bytes = work_mem * 1024L;
+				Cost		run_cost = cpu_tuple_cost * path->parent->rows;
+				double		nbytes = relation_byte_size(path->parent->rows,
+														path->parent->width);
+				long		work_mem_bytes = work_mem * 1024L;
 
 				if (nbytes > work_mem_bytes)
 				{
@@ -2389,17 +2395,17 @@ cost_rescan(PlannerInfo *root, Path *path,
 		case T_Sort:
 			{
 				/*
-				 * These plan types not only materialize their results, but
-				 * do not implement qual filtering or projection.  So they
-				 * are even cheaper to rescan than the ones above.  We charge
-				 * only cpu_operator_cost per tuple.  (Note: keep that in
-				 * sync with the run_cost charge in cost_sort, and also see
-				 * comments in cost_material before you change it.)
+				 * These plan types not only materialize their results, but do
+				 * not implement qual filtering or projection.	So they are
+				 * even cheaper to rescan than the ones above.	We charge only
+				 * cpu_operator_cost per tuple.  (Note: keep that in sync with
+				 * the run_cost charge in cost_sort, and also see comments in
+				 * cost_material before you change it.)
 				 */
-				Cost	run_cost = cpu_operator_cost * path->parent->rows;
-				double	nbytes = relation_byte_size(path->parent->rows,
-													path->parent->width);
-				long	work_mem_bytes = work_mem * 1024L;
+				Cost		run_cost = cpu_operator_cost * path->parent->rows;
+				double		nbytes = relation_byte_size(path->parent->rows,
+														path->parent->width);
+				long		work_mem_bytes = work_mem * 1024L;
 
 				if (nbytes > work_mem_bytes)
 				{
@@ -3212,8 +3218,8 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
 		{
 			/*
 			 * We could be looking at an expression pulled up from a subquery,
-			 * or a ROW() representing a whole-row child Var, etc.  Do what
-			 * we can using the expression type information.
+			 * or a ROW() representing a whole-row child Var, etc.	Do what we
+			 * can using the expression type information.
 			 */
 			int32		item_width;
 
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index a2b6319d5e7b43b40d08563a9829a4b9196d1060..75219d0f33406d8edba2fed686c0c85fa61b422c 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -10,7 +10,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/path/equivclass.c,v 1.22 2010/01/02 16:57:46 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/path/equivclass.c,v 1.23 2010/02/26 02:00:44 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -115,14 +115,13 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
 	item2_relids = restrictinfo->right_relids;
 
 	/*
-	 * Reject clauses of the form X=X.  These are not as redundant as they
+	 * Reject clauses of the form X=X.	These are not as redundant as they
 	 * might seem at first glance: assuming the operator is strict, this is
-	 * really an expensive way to write X IS NOT NULL.  So we must not risk
-	 * just losing the clause, which would be possible if there is already
-	 * a single-element EquivalenceClass containing X.  The case is not
-	 * common enough to be worth contorting the EC machinery for, so just
-	 * reject the clause and let it be processed as a normal restriction
-	 * clause.
+	 * really an expensive way to write X IS NOT NULL.	So we must not risk
+	 * just losing the clause, which would be possible if there is already a
+	 * single-element EquivalenceClass containing X.  The case is not common
+	 * enough to be worth contorting the EC machinery for, so just reject the
+	 * clause and let it be processed as a normal restriction clause.
 	 */
 	if (equal(item1, item2))
 		return false;			/* X=X is not a useful equivalence */
@@ -367,7 +366,7 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
  *	  EquivalenceClass for it.
  *
  * sortref is the SortGroupRef of the originating SortGroupClause, if any,
- * or zero if not.  (It should never be zero if the expression is volatile!)
+ * or zero if not.	(It should never be zero if the expression is volatile!)
  *
  * This can be used safely both before and after EquivalenceClass merging;
  * since it never causes merging it does not invalidate any existing ECs
@@ -448,7 +447,7 @@ get_eclass_for_sort_expr(PlannerInfo *root,
 	newec->ec_sortref = sortref;
 	newec->ec_merged = NULL;
 
-	if (newec->ec_has_volatile && sortref == 0)		/* should not happen */
+	if (newec->ec_has_volatile && sortref == 0) /* should not happen */
 		elog(ERROR, "volatile EquivalenceClass has no sortref");
 
 	newem = add_eq_member(newec, expr, pull_varnos((Node *) expr),
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 3cf971c9c0faa80f45613fa3e190514008681f01..2c97bea3fa3a346f05a54904859b082e3872786c 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.245 2010/01/02 16:57:46 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.246 2010/02/26 02:00:44 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1944,8 +1944,8 @@ relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel,
 	/* Examine each index of the relation ... */
 	foreach(ic, rel->indexlist)
 	{
-		IndexOptInfo   *ind = (IndexOptInfo *) lfirst(ic);
-		int				c;
+		IndexOptInfo *ind = (IndexOptInfo *) lfirst(ic);
+		int			c;
 
 		/*
 		 * If the index is not unique or if it's a partial index that doesn't
@@ -1964,13 +1964,13 @@ relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel,
 
 			foreach(lc, restrictlist)
 			{
-				RestrictInfo   *rinfo = (RestrictInfo *) lfirst(lc);
-				Node   *rexpr;
+				RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+				Node	   *rexpr;
 
 				/*
 				 * The condition's equality operator must be a member of the
-				 * index opfamily, else it is not asserting the right kind
-				 * of equality behavior for this index.  We check this first
+				 * index opfamily, else it is not asserting the right kind of
+				 * equality behavior for this index.  We check this first
 				 * since it's probably cheaper than match_index_to_operand().
 				 */
 				if (!list_member_oid(rinfo->mergeopfamilies, ind->opfamily[c]))
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 2e208cb6210bb184d41efc4f285ef186b489ea98..35c9353d2e21888000e874346e9dece81b765b5c 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.129 2010/01/05 23:25:36 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.130 2010/02/26 02:00:44 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -161,7 +161,7 @@ add_paths_to_joinrel(PlannerInfo *root,
  * We already know that the clause is a binary opclause referencing only the
  * rels in the current join.  The point here is to check whether it has the
  * form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr",
- * rather than mixing outer and inner vars on either side.  If it matches,
+ * rather than mixing outer and inner vars on either side.	If it matches,
  * we set the transient flag outer_is_left to identify which side is which.
  */
 static inline bool
@@ -212,7 +212,7 @@ join_is_removable(PlannerInfo *root,
 
 	/*
 	 * Currently, we only know how to remove left joins to a baserel with
-	 * unique indexes.  We can check most of these criteria pretty trivially
+	 * unique indexes.	We can check most of these criteria pretty trivially
 	 * to avoid doing useless extra work.  But checking whether any of the
 	 * indexes are unique would require iterating over the indexlist, so for
 	 * now we just make sure there are indexes of some sort or other.  If none
@@ -225,13 +225,12 @@ join_is_removable(PlannerInfo *root,
 		return false;
 
 	/*
-	 * We can't remove the join if any inner-rel attributes are used above
-	 * the join.
+	 * We can't remove the join if any inner-rel attributes are used above the
+	 * join.
 	 *
-	 * Note that this test only detects use of inner-rel attributes in
-	 * higher join conditions and the target list.  There might be such
-	 * attributes in pushed-down conditions at this join, too.  We check
-	 * that case below.
+	 * Note that this test only detects use of inner-rel attributes in higher
+	 * join conditions and the target list.  There might be such attributes in
+	 * pushed-down conditions at this join, too.  We check that case below.
 	 *
 	 * As a micro-optimization, it seems better to start with max_attr and
 	 * count down rather than starting with min_attr and counting up, on the
@@ -249,9 +248,9 @@ join_is_removable(PlannerInfo *root,
 	/*
 	 * Search for mergejoinable clauses that constrain the inner rel against
 	 * either the outer rel or a pseudoconstant.  If an operator is
-	 * mergejoinable then it behaves like equality for some btree opclass,
-	 * so it's what we want.  The mergejoinability test also eliminates
-	 * clauses containing volatile functions, which we couldn't depend on.
+	 * mergejoinable then it behaves like equality for some btree opclass, so
+	 * it's what we want.  The mergejoinability test also eliminates clauses
+	 * containing volatile functions, which we couldn't depend on.
 	 */
 	foreach(l, restrictlist)
 	{
@@ -259,10 +258,10 @@ join_is_removable(PlannerInfo *root,
 
 		/*
 		 * If we find a pushed-down clause, it must have come from above the
-		 * outer join and it must contain references to the inner rel.  (If
-		 * it had only outer-rel variables, it'd have been pushed down into
-		 * the outer rel.)  Therefore, we can conclude that join removal
-		 * is unsafe without any examination of the clause contents.
+		 * outer join and it must contain references to the inner rel.	(If it
+		 * had only outer-rel variables, it'd have been pushed down into the
+		 * outer rel.)	Therefore, we can conclude that join removal is unsafe
+		 * without any examination of the clause contents.
 		 */
 		if (restrictinfo->is_pushed_down)
 			return false;
@@ -289,15 +288,15 @@ join_is_removable(PlannerInfo *root,
 
 		/*
 		 * Note: can_join won't be set for a restriction clause, but
-		 * mergeopfamilies will be if it has a mergejoinable operator
-		 * and doesn't contain volatile functions.
+		 * mergeopfamilies will be if it has a mergejoinable operator and
+		 * doesn't contain volatile functions.
 		 */
 		if (restrictinfo->mergeopfamilies == NIL)
 			continue;			/* not mergejoinable */
 
 		/*
-		 * The clause certainly doesn't refer to anything but the given
-		 * rel.  If either side is pseudoconstant then we can use it.
+		 * The clause certainly doesn't refer to anything but the given rel.
+		 * If either side is pseudoconstant then we can use it.
 		 */
 		if (bms_is_empty(restrictinfo->left_relids))
 		{
@@ -340,13 +339,13 @@ generate_outer_only(PlannerInfo *root, RelOptInfo *joinrel,
 	/*
 	 * For the moment, replicate all of the outerrel's paths as join paths.
 	 * Some of them might not really be interesting above the join, if they
-	 * have sort orderings that have no real use except to do a mergejoin
-	 * for the join we've just found we don't need.  But distinguishing that
-	 * case probably isn't worth the extra code it would take.
+	 * have sort orderings that have no real use except to do a mergejoin for
+	 * the join we've just found we don't need.  But distinguishing that case
+	 * probably isn't worth the extra code it would take.
 	 */
 	foreach(lc, outerrel->pathlist)
 	{
-		Path   *outerpath = (Path *) lfirst(lc);
+		Path	   *outerpath = (Path *) lfirst(lc);
 
 		add_path(joinrel, (Path *)
 				 create_noop_path(root, joinrel, outerpath));
@@ -1189,8 +1188,8 @@ select_mergejoin_clauses(PlannerInfo *root,
 			restrictinfo->mergeopfamilies == NIL)
 		{
 			/*
-			 * The executor can handle extra joinquals that are constants,
-			 * but not anything else, when doing right/full merge join.  (The
+			 * The executor can handle extra joinquals that are constants, but
+			 * not anything else, when doing right/full merge join.  (The
 			 * reason to support constants is so we can do FULL JOIN ON
 			 * FALSE.)
 			 */
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 3a332473080d8ae2257ce76838ed17ad1ddc13ed..e781ad5c1a841bd83b4f878f454081dcb85d2ee0 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.104 2010/01/02 16:57:47 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.105 2010/02/26 02:00:45 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -373,10 +373,10 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
 			continue;
 
 		/*
-		 * If it's a semijoin and we already joined the RHS to any other
-		 * rels within either input, then we must have unique-ified the RHS
-		 * at that point (see below).  Therefore the semijoin is no longer
-		 * relevant in this join path.
+		 * If it's a semijoin and we already joined the RHS to any other rels
+		 * within either input, then we must have unique-ified the RHS at that
+		 * point (see below).  Therefore the semijoin is no longer relevant in
+		 * this join path.
 		 */
 		if (sjinfo->jointype == JOIN_SEMI)
 		{
@@ -495,9 +495,9 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
 	}
 
 	/*
-	 * Fail if violated some SJ's RHS and didn't match to another SJ.
-	 * However, "matching" to a semijoin we are implementing by
-	 * unique-ification doesn't count (think: it's really an inner join).
+	 * Fail if violated some SJ's RHS and didn't match to another SJ. However,
+	 * "matching" to a semijoin we are implementing by unique-ification
+	 * doesn't count (think: it's really an inner join).
 	 */
 	if (!is_valid_inner &&
 		(match_sjinfo == NULL || unique_ified))
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 066cfbdb3563bf935358dfd7b66cb2c158d4029b..3f0c2fe90404c269d0511f7013ce1175e148f35c 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -11,7 +11,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.100 2010/01/02 16:57:47 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.101 2010/02/26 02:00:45 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -637,12 +637,12 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
 							0);
 
 				/*
-				 * Note: it might look funny to be setting sortref = 0 for
-				 * a reference to a volatile sub_eclass.  However, the
-				 * expression is *not* volatile in the outer query: it's
-				 * just a Var referencing whatever the subquery emitted.
-				 * (IOW, the outer query isn't going to re-execute the
-				 * volatile expression itself.)  So this is okay.
+				 * Note: it might look funny to be setting sortref = 0 for a
+				 * reference to a volatile sub_eclass.	However, the
+				 * expression is *not* volatile in the outer query: it's just
+				 * a Var referencing whatever the subquery emitted. (IOW, the
+				 * outer query isn't going to re-execute the volatile
+				 * expression itself.)	So this is okay.
 				 */
 				outer_ec =
 					get_eclass_for_sort_expr(root,
@@ -1000,7 +1000,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
 		 * It's possible that multiple matching clauses might have different
 		 * ECs on the other side, in which case the order we put them into our
 		 * result makes a difference in the pathkeys required for the other
-		 * input path.  However this routine hasn't got any info about which
+		 * input path.	However this routine hasn't got any info about which
 		 * order would be best, so we don't worry about that.
 		 *
 		 * It's also possible that the selected mergejoin clauses produce
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 5c35f77ec2d67d9c400c9a07724d5b6e36209bd0..db47054ecdcd4b2bba978bbfec6ad91af6afe6c0 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -10,7 +10,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.272 2010/02/19 21:49:10 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.273 2010/02/26 02:00:45 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1694,8 +1694,8 @@ create_mergejoin_plan(PlannerInfo *root,
 		innerpathkeys = best_path->jpath.innerjoinpath->pathkeys;
 
 	/*
-	 * If specified, add a materialize node to shield the inner plan from
-	 * the need to handle mark/restore.
+	 * If specified, add a materialize node to shield the inner plan from the
+	 * need to handle mark/restore.
 	 */
 	if (best_path->materialize_inner)
 	{
@@ -1754,9 +1754,9 @@ create_mergejoin_plan(PlannerInfo *root,
 		Assert(ieclass != NULL);
 
 		/*
-		 * For debugging purposes, we check that the eclasses match the
-		 * paths' pathkeys.  In typical cases the merge clauses are one-to-one
-		 * with the pathkeys, but when dealing with partially redundant query
+		 * For debugging purposes, we check that the eclasses match the paths'
+		 * pathkeys.  In typical cases the merge clauses are one-to-one with
+		 * the pathkeys, but when dealing with partially redundant query
 		 * conditions, we might have clauses that re-reference earlier path
 		 * keys.  The case that we need to reject is where a pathkey is
 		 * entirely skipped over.
@@ -1861,9 +1861,9 @@ create_mergejoin_plan(PlannerInfo *root,
 	}
 
 	/*
-	 * Note: it is not an error if we have additional pathkey elements
-	 * (i.e., lop or lip isn't NULL here).  The input paths might be
-	 * better-sorted than we need for the current mergejoin.
+	 * Note: it is not an error if we have additional pathkey elements (i.e.,
+	 * lop or lip isn't NULL here).  The input paths might be better-sorted
+	 * than we need for the current mergejoin.
 	 */
 
 	/*
@@ -3751,7 +3751,7 @@ make_result(PlannerInfo *root,
  *	  Build a ModifyTable plan node
  *
  * Currently, we don't charge anything extra for the actual table modification
- * work, nor for the RETURNING expressions if any.  It would only be window
+ * work, nor for the RETURNING expressions if any.	It would only be window
  * dressing, since these are always top-level nodes and there is no way for
  * the costs to change any higher-level planning choices.  But we might want
  * to make it look better sometime.
@@ -3781,7 +3781,7 @@ make_modifytable(CmdType operation, List *resultRelations,
 	{
 		Plan	   *subplan = (Plan *) lfirst(subnode);
 
-		if (subnode == list_head(subplans))	/* first node? */
+		if (subnode == list_head(subplans))		/* first node? */
 			plan->startup_cost = subplan->startup_cost;
 		plan->total_cost += subplan->total_cost;
 		plan->plan_rows += subplan->plan_rows;
@@ -3798,8 +3798,8 @@ make_modifytable(CmdType operation, List *resultRelations,
 
 	/*
 	 * Set up the visible plan targetlist as being the same as the first
-	 * RETURNING list.  This is for the use of EXPLAIN; the executor won't
-	 * pay any attention to the targetlist.
+	 * RETURNING list.	This is for the use of EXPLAIN; the executor won't pay
+	 * any attention to the targetlist.
 	 */
 	if (returningLists)
 		node->plan.targetlist = copyObject(linitial(returningLists));
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index a3e709357034238f8c375131c511e258a9b6ffd3..f8e1d523bb2e9a2ab71f469fc5da5eb547e2f1a2 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.157 2010/01/02 16:57:47 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.158 2010/02/26 02:00:45 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -560,8 +560,8 @@ make_outerjoininfo(PlannerInfo *root,
 	 * FOR UPDATE applied to a view.  Only after rewriting and flattening do
 	 * we know whether the view contains an outer join.
 	 *
-	 * We use the original RowMarkClause list here; the PlanRowMark list
-	 * would list everything.
+	 * We use the original RowMarkClause list here; the PlanRowMark list would
+	 * list everything.
 	 */
 	foreach(l, root->parse->rowMarks)
 	{
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 77e9d65ae7e30c4440821a1f80c5c9238ed9a9b0..356fe17df4bbce126a28d5604c01fbdb019e7408 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.265 2010/02/12 17:33:20 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.266 2010/02/26 02:00:45 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -370,7 +370,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
 	}
 
 	/*
-	 * Preprocess RowMark information.  We need to do this after subquery
+	 * Preprocess RowMark information.	We need to do this after subquery
 	 * pullup (so that all non-inherited RTEs are present) and before
 	 * inheritance expansion (so that the info is available for
 	 * expand_inherited_tables to examine and modify).
@@ -525,8 +525,8 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
 		/* If it's not SELECT, we need a ModifyTable node */
 		if (parse->commandType != CMD_SELECT)
 		{
-			List   *returningLists;
-			List   *rowMarks;
+			List	   *returningLists;
+			List	   *rowMarks;
 
 			/*
 			 * Deal with the RETURNING clause if any.  It's convenient to pass
@@ -542,7 +542,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
 				rlist = set_returning_clause_references(root->glob,
 														parse->returningList,
 														plan,
-														parse->resultRelation);
+													  parse->resultRelation);
 				returningLists = list_make1(rlist);
 			}
 			else
@@ -559,7 +559,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
 				rowMarks = root->rowMarks;
 
 			plan = (Plan *) make_modifytable(parse->commandType,
-											 copyObject(root->resultRelations),
+										   copyObject(root->resultRelations),
 											 list_make1(plan),
 											 returningLists,
 											 rowMarks,
@@ -614,11 +614,11 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
 	 * Simplify constant expressions.
 	 *
 	 * Note: an essential effect of this is to convert named-argument function
-	 * calls to positional notation and insert the current actual values
-	 * of any default arguments for functions.	To ensure that happens, we
-	 * *must* process all expressions here.  Previous PG versions sometimes
-	 * skipped const-simplification if it didn't seem worth the trouble, but
-	 * we can't do that anymore.
+	 * calls to positional notation and insert the current actual values of
+	 * any default arguments for functions.  To ensure that happens, we *must*
+	 * process all expressions here.  Previous PG versions sometimes skipped
+	 * const-simplification if it didn't seem worth the trouble, but we can't
+	 * do that anymore.
 	 *
 	 * Note: this also flattens nested AND and OR expressions into N-argument
 	 * form.  All processing of a qual expression after this point must be
@@ -783,7 +783,7 @@ inheritance_planner(PlannerInfo *root)
 			List	   *rlist;
 
 			rlist = set_returning_clause_references(root->glob,
-													subroot.parse->returningList,
+												subroot.parse->returningList,
 													subplan,
 													appinfo->child_relid);
 			returningLists = lappend(returningLists, rlist);
@@ -796,8 +796,8 @@ inheritance_planner(PlannerInfo *root)
 	root->query_pathkeys = NIL;
 
 	/*
-	 * If we managed to exclude every child rel, return a dummy plan;
-	 * it doesn't even need a ModifyTable node.
+	 * If we managed to exclude every child rel, return a dummy plan; it
+	 * doesn't even need a ModifyTable node.
 	 */
 	if (subplans == NIL)
 	{
@@ -825,9 +825,9 @@ inheritance_planner(PlannerInfo *root)
 	parse->rtable = rtable;
 
 	/*
-	 * If there was a FOR UPDATE/SHARE clause, the LockRows node will
-	 * have dealt with fetching non-locked marked rows, else we need
-	 * to have ModifyTable do that.
+	 * If there was a FOR UPDATE/SHARE clause, the LockRows node will have
+	 * dealt with fetching non-locked marked rows, else we need to have
+	 * ModifyTable do that.
 	 */
 	if (parse->rowMarks)
 		rowMarks = NIL;
@@ -837,7 +837,7 @@ inheritance_planner(PlannerInfo *root)
 	/* And last, tack on a ModifyTable node to do the UPDATE/DELETE work */
 	return (Plan *) make_modifytable(parse->commandType,
 									 copyObject(root->resultRelations),
-									 subplans, 
+									 subplans,
 									 returningLists,
 									 rowMarks,
 									 SS_assign_special_param(root));
@@ -1121,8 +1121,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 		}
 		else
 		{
-			path_rows = 1;				/* assume non-set result */
-			path_width = 100;			/* arbitrary */
+			path_rows = 1;		/* assume non-set result */
+			path_width = 100;	/* arbitrary */
 		}
 
 		if (parse->groupClause)
@@ -1424,8 +1424,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 			 * WindowFuncs.  It's probably not worth trying to optimize that
 			 * though.)  We also need any volatile sort expressions, because
 			 * make_sort_from_pathkeys won't add those on its own, and anyway
-			 * we want them evaluated only once at the bottom of the stack.
-			 * As we climb up the stack, we add outputs for the WindowFuncs
+			 * we want them evaluated only once at the bottom of the stack. As
+			 * we climb up the stack, we add outputs for the WindowFuncs
 			 * computed at each level.	Also, each input tlist has to present
 			 * all the columns needed to sort the data for the next WindowAgg
 			 * step.  That's handled internally by make_sort_from_pathkeys,
@@ -1659,16 +1659,17 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 	}
 
 	/*
-	 * If there is a FOR UPDATE/SHARE clause, add the LockRows node.
-	 * (Note: we intentionally test parse->rowMarks not root->rowMarks here.
-	 * If there are only non-locking rowmarks, they should be handled by
-	 * the ModifyTable node instead.)
+	 * If there is a FOR UPDATE/SHARE clause, add the LockRows node. (Note: we
+	 * intentionally test parse->rowMarks not root->rowMarks here. If there
+	 * are only non-locking rowmarks, they should be handled by the
+	 * ModifyTable node instead.)
 	 */
 	if (parse->rowMarks)
 	{
 		result_plan = (Plan *) make_lockrows(result_plan,
 											 root->rowMarks,
 											 SS_assign_special_param(root));
+
 		/*
 		 * The result can no longer be assumed sorted, since locking might
 		 * cause the sort key columns to be replaced with new values.
@@ -1811,9 +1812,9 @@ preprocess_rowmarks(PlannerInfo *root)
 	}
 
 	/*
-	 * We need to have rowmarks for all base relations except the target.
-	 * We make a bitmapset of all base rels and then remove the items we
-	 * don't need or have FOR UPDATE/SHARE marks for.
+	 * We need to have rowmarks for all base relations except the target. We
+	 * make a bitmapset of all base rels and then remove the items we don't
+	 * need or have FOR UPDATE/SHARE marks for.
 	 */
 	rels = get_base_rel_indexes((Node *) parse->jointree);
 	if (parse->resultRelation)
@@ -1831,16 +1832,16 @@ preprocess_rowmarks(PlannerInfo *root)
 
 		/*
 		 * Currently, it is syntactically impossible to have FOR UPDATE
-		 * applied to an update/delete target rel.  If that ever becomes
+		 * applied to an update/delete target rel.	If that ever becomes
 		 * possible, we should drop the target from the PlanRowMark list.
 		 */
 		Assert(rc->rti != parse->resultRelation);
 
 		/*
-		 * Ignore RowMarkClauses for subqueries; they aren't real tables
-		 * and can't support true locking.  Subqueries that got flattened
-		 * into the main query should be ignored completely.  Any that didn't
-		 * will get ROW_MARK_COPY items in the next loop.
+		 * Ignore RowMarkClauses for subqueries; they aren't real tables and
+		 * can't support true locking.  Subqueries that got flattened into the
+		 * main query should be ignored completely.  Any that didn't will get
+		 * ROW_MARK_COPY items in the next loop.
 		 */
 		if (rte->rtekind != RTE_RELATION)
 			continue;
@@ -1883,7 +1884,7 @@ preprocess_rowmarks(PlannerInfo *root)
 			newrc->markType = ROW_MARK_REFERENCE;
 		else
 			newrc->markType = ROW_MARK_COPY;
-		newrc->noWait = false;			/* doesn't matter */
+		newrc->noWait = false;	/* doesn't matter */
 		newrc->isParent = false;
 		/* attnos will be assigned in preprocess_targetlist */
 		newrc->ctidAttNo = InvalidAttrNumber;
@@ -2196,7 +2197,7 @@ choose_hashed_grouping(PlannerInfo *root,
 
 	/*
 	 * Executor doesn't support hashed aggregation with DISTINCT or ORDER BY
-	 * aggregates.  (Doing so would imply storing *all* the input values in
+	 * aggregates.	(Doing so would imply storing *all* the input values in
 	 * the hash table, and/or running many sorts in parallel, either of which
 	 * seems like a certain loser.)
 	 */
@@ -2364,8 +2365,8 @@ choose_hashed_distinct(PlannerInfo *root,
 	Path		sorted_p;
 
 	/*
-	 * If we have a sortable DISTINCT ON clause, we always use sorting.
-	 * This enforces the expected behavior of DISTINCT ON.
+	 * If we have a sortable DISTINCT ON clause, we always use sorting. This
+	 * enforces the expected behavior of DISTINCT ON.
 	 */
 	can_sort = grouping_is_sortable(parse->distinctClause);
 	if (can_sort && parse->hasDistinctOn)
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index d1d875d3cf0a8e03c7bc3cdf2d5fd02bc1fb84ac..70be2e66f2d20d7dcd7e64cc45fc98e9f37fcfa9 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.159 2010/02/14 18:42:15 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.160 2010/02/26 02:00:45 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -431,8 +431,8 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
 
 				/*
 				 * Like the plan types above, LockRows doesn't evaluate its
-				 * tlist or quals.  But we have to fix up the RT indexes
-				 * in its rowmarks.
+				 * tlist or quals.	But we have to fix up the RT indexes in
+				 * its rowmarks.
 				 */
 				set_dummy_tlist_references(plan, rtoffset);
 				Assert(splan->plan.qual == NIL);
@@ -471,7 +471,7 @@ set_plan_refs(PlannerGlobal *glob, Plan *plan, int rtoffset)
 			break;
 		case T_WindowAgg:
 			{
-				WindowAgg	   *wplan = (WindowAgg *) plan;
+				WindowAgg  *wplan = (WindowAgg *) plan;
 
 				set_upper_references(glob, plan, rtoffset);
 
@@ -1514,7 +1514,7 @@ search_indexed_tlist_for_sortgroupref(Node *node,
 							 exprType((Node *) tle->expr),
 							 exprTypmod((Node *) tle->expr),
 							 0);
-			newvar->varnoold = 0;	/* wasn't ever a plain Var */
+			newvar->varnoold = 0;		/* wasn't ever a plain Var */
 			newvar->varoattno = 0;
 			return newvar;
 		}
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 202243c35b9ac2fc830e48a8c90f753cf1025b23..16dbc3ad44381a98f35c5cc412244cf59356e277 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.160 2010/02/14 18:42:15 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.161 2010/02/26 02:00:46 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1766,8 +1766,8 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan, bool attach_initplans)
 	 * output parameters of any initPlans.	(We do not include output
 	 * parameters of regular subplans.	Those should only appear within the
 	 * testexpr of SubPlan nodes, and are taken care of locally within
-	 * finalize_primnode.  Likewise, special parameters that are generated
-	 * by nodes such as ModifyTable are handled within finalize_plan.)
+	 * finalize_primnode.  Likewise, special parameters that are generated by
+	 * nodes such as ModifyTable are handled within finalize_plan.)
 	 *
 	 * Note: this is a bit overly generous since some parameters of upper
 	 * query levels might belong to query subtrees that don't include this
@@ -1944,14 +1944,14 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params,
 				 * You might think we should add the node's cteParam to
 				 * paramids, but we shouldn't because that param is just a
 				 * linkage mechanism for multiple CteScan nodes for the same
-				 * CTE; it is never used for changed-param signaling.  What
-				 * we have to do instead is to find the referenced CTE plan
-				 * and incorporate its external paramids, so that the correct
+				 * CTE; it is never used for changed-param signaling.  What we
+				 * have to do instead is to find the referenced CTE plan and
+				 * incorporate its external paramids, so that the correct
 				 * things will happen if the CTE references outer-level
 				 * variables.  See test cases for bug #4902.
 				 */
-				int		plan_id = ((CteScan *) plan)->ctePlanId;
-				Plan   *cteplan;
+				int			plan_id = ((CteScan *) plan)->ctePlanId;
+				Plan	   *cteplan;
 
 				/* so, do this ... */
 				if (plan_id < 1 || plan_id > list_length(root->glob->subplans))
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 914703227f44e13750de059dbccf0ba6c4f9f82c..bcc1fe2be33954e2417a6672f73cae6a25af7183 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -16,7 +16,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.70 2010/01/02 16:57:47 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.71 2010/02/26 02:00:46 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -39,13 +39,13 @@
 typedef struct pullup_replace_vars_context
 {
 	PlannerInfo *root;
-	List	   *targetlist;			/* tlist of subquery being pulled up */
-	RangeTblEntry *target_rte;		/* RTE of subquery */
-	bool	   *outer_hasSubLinks;	/* -> outer query's hasSubLinks */
-	int			varno;				/* varno of subquery */
-	bool		need_phvs;			/* do we need PlaceHolderVars? */
-	bool		wrap_non_vars;		/* do we need 'em on *all* non-Vars? */
-	Node	  **rv_cache;			/* cache for results with PHVs */
+	List	   *targetlist;		/* tlist of subquery being pulled up */
+	RangeTblEntry *target_rte;	/* RTE of subquery */
+	bool	   *outer_hasSubLinks;		/* -> outer query's hasSubLinks */
+	int			varno;			/* varno of subquery */
+	bool		need_phvs;		/* do we need PlaceHolderVars? */
+	bool		wrap_non_vars;	/* do we need 'em on *all* non-Vars? */
+	Node	  **rv_cache;		/* cache for results with PHVs */
 } pullup_replace_vars_context;
 
 typedef struct reduce_outer_joins_state
@@ -79,7 +79,7 @@ static void replace_vars_in_jointree(Node *jtnode,
 						 pullup_replace_vars_context *context,
 						 JoinExpr *lowest_outer_join);
 static Node *pullup_replace_vars(Node *expr,
-								 pullup_replace_vars_context *context);
+					pullup_replace_vars_context *context);
 static Node *pullup_replace_vars_callback(Var *var,
 							 replace_rte_variables_context *context);
 static reduce_outer_joins_state *reduce_outer_joins_pass1(Node *jtnode);
@@ -708,7 +708,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
 	 * insert into the top query, but if we are under an outer join then
 	 * non-nullable items may have to be turned into PlaceHolderVars.  If we
 	 * are dealing with an appendrel member then anything that's not a simple
-	 * Var has to be turned into a PlaceHolderVar.  Set up appropriate context
+	 * Var has to be turned into a PlaceHolderVar.	Set up appropriate context
 	 * data for pullup_replace_vars.
 	 */
 	rvcontext.root = root;
@@ -729,7 +729,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
 	 * replace any of the jointree structure. (This'd be a lot cleaner if we
 	 * could use query_tree_mutator.)  We have to use PHVs in the targetList,
 	 * returningList, and havingQual, since those are certainly above any
-	 * outer join.  replace_vars_in_jointree tracks its location in the
+	 * outer join.	replace_vars_in_jointree tracks its location in the
 	 * jointree and uses PHVs or not appropriately.
 	 */
 	parse->targetList = (List *)
@@ -751,7 +751,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
 	foreach(lc, root->append_rel_list)
 	{
 		AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc);
-		bool	save_need_phvs = rvcontext.need_phvs;
+		bool		save_need_phvs = rvcontext.need_phvs;
 
 		if (appinfo == containing_appendrel)
 			rvcontext.need_phvs = false;
@@ -796,9 +796,8 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
 	 * We also have to fix the relid sets of any PlaceHolderVar nodes in the
 	 * parent query.  (This could perhaps be done by pullup_replace_vars(),
 	 * but it seems cleaner to use two passes.)  Note in particular that any
-	 * PlaceHolderVar nodes just created by pullup_replace_vars()
-	 * will be adjusted, so having created them with the subquery's varno is
-	 * correct.
+	 * PlaceHolderVar nodes just created by pullup_replace_vars() will be
+	 * adjusted, so having created them with the subquery's varno is correct.
 	 *
 	 * Likewise, relids appearing in AppendRelInfo nodes have to be fixed. We
 	 * already checked that this won't require introducing multiple subrelids
@@ -1033,8 +1032,8 @@ is_simple_subquery(Query *subquery)
 	 *
 	 * We also don't pull up a subquery that has explicit FOR UPDATE/SHARE
 	 * clauses, because pullup would cause the locking to occur semantically
-	 * higher than it should.  Implicit FOR UPDATE/SHARE is okay because
-	 * in that case the locking was originally declared in the upper query
+	 * higher than it should.  Implicit FOR UPDATE/SHARE is okay because in
+	 * that case the locking was originally declared in the upper query
 	 * anyway.
 	 */
 	if (subquery->hasAggs ||
@@ -1227,7 +1226,7 @@ replace_vars_in_jointree(Node *jtnode,
 	else if (IsA(jtnode, JoinExpr))
 	{
 		JoinExpr   *j = (JoinExpr *) jtnode;
-		bool	save_need_phvs = context->need_phvs;
+		bool		save_need_phvs = context->need_phvs;
 
 		if (j == lowest_outer_join)
 		{
@@ -1310,7 +1309,7 @@ pullup_replace_vars_callback(Var *var,
 		 * expansion with varlevelsup = 0, and then adjust if needed.
 		 */
 		expandRTE(rcon->target_rte,
-				  var->varno, 0 /* not varlevelsup */, var->location,
+				  var->varno, 0 /* not varlevelsup */ , var->location,
 				  (var->vartype != RECORDOID),
 				  &colnames, &fields);
 		/* Adjust the generated per-field Vars, but don't insert PHVs */
@@ -1327,11 +1326,11 @@ pullup_replace_vars_callback(Var *var,
 		newnode = (Node *) rowexpr;
 
 		/*
-		 * Insert PlaceHolderVar if needed.  Notice that we are wrapping
-		 * one PlaceHolderVar around the whole RowExpr, rather than putting
-		 * one around each element of the row.  This is because we need
-		 * the expression to yield NULL, not ROW(NULL,NULL,...) when it
-		 * is forced to null by an outer join.
+		 * Insert PlaceHolderVar if needed.  Notice that we are wrapping one
+		 * PlaceHolderVar around the whole RowExpr, rather than putting one
+		 * around each element of the row.	This is because we need the
+		 * expression to yield NULL, not ROW(NULL,NULL,...) when it is forced
+		 * to null by an outer join.
 		 */
 		if (rcon->need_phvs)
 		{
@@ -1359,7 +1358,7 @@ pullup_replace_vars_callback(Var *var,
 		/* Insert PlaceHolderVar if needed */
 		if (rcon->need_phvs)
 		{
-			bool	wrap;
+			bool		wrap;
 
 			if (newnode && IsA(newnode, Var) &&
 				((Var *) newnode)->varlevelsup == 0)
@@ -1402,8 +1401,8 @@ pullup_replace_vars_callback(Var *var,
 
 			/*
 			 * Cache it if possible (ie, if the attno is in range, which it
-			 * probably always should be).  We can cache the value even if
-			 * we decided we didn't need a PHV, since this result will be
+			 * probably always should be).	We can cache the value even if we
+			 * decided we didn't need a PHV, since this result will be
 			 * suitable for any request that has need_phvs.
 			 */
 			if (varattno > InvalidAttrNumber &&
@@ -1837,7 +1836,7 @@ reduce_outer_joins_pass2(Node *jtnode,
  * top query could (yet) contain such a reference.
  *
  * NOTE: although this has the form of a walker, we cheat and modify the
- * nodes in-place.  This should be OK since the tree was copied by
+ * nodes in-place.	This should be OK since the tree was copied by
  * pullup_replace_vars earlier.  Avoid scribbling on the original values of
  * the bitmapsets, though, because expression_tree_mutator doesn't copy those.
  */
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index cf51fce481ebb5c4f01cb2e8b4b57a0e1294700b..abbf42cb625a0993e4e1308a6d9fb3e297240668 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -17,7 +17,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.99 2010/01/02 16:57:47 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.100 2010/02/26 02:00:46 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -109,11 +109,10 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
 	}
 
 	/*
-	 * Add necessary junk columns for rowmarked rels.  These values are
-	 * needed for locking of rels selected FOR UPDATE/SHARE, and to do
-	 * EvalPlanQual rechecking.  While we are at it, store these junk attnos
-	 * in the PlanRowMark list so that we don't have to redetermine them
-	 * at runtime.
+	 * Add necessary junk columns for rowmarked rels.  These values are needed
+	 * for locking of rels selected FOR UPDATE/SHARE, and to do EvalPlanQual
+	 * rechecking.	While we are at it, store these junk attnos in the
+	 * PlanRowMark list so that we don't have to redetermine them at runtime.
 	 */
 	foreach(lc, root->rowMarks)
 	{
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 560afaaa240211afc8b38839c07eab2dc9724610..562006e13bbc83fa9ee4e93930ca52fc038044f2 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -22,7 +22,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.180 2010/02/01 19:28:56 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.181 2010/02/26 02:00:46 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1196,8 +1196,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
 
 	/*
 	 * If parent relation is selected FOR UPDATE/SHARE, we need to mark its
-	 * PlanRowMark as isParent = true, and generate a new PlanRowMark for
-	 * each child.
+	 * PlanRowMark as isParent = true, and generate a new PlanRowMark for each
+	 * child.
 	 */
 	if (oldrc)
 		oldrc->isParent = true;
@@ -1244,7 +1244,8 @@ expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte, Index rti)
 		childrte = copyObject(rte);
 		childrte->relid = childOID;
 		childrte->inh = false;
-		childrte->requiredPerms = 0; /* do not require permissions on child tables */
+		childrte->requiredPerms = 0;	/* do not require permissions on child
+										 * tables */
 		parse->rtable = lappend(parse->rtable, childrte);
 		childRTindex = list_length(parse->rtable);
 
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 91bdb3537e77138a272d357fcda2b96cd7daabd5..cb1735725a718c388931220fb7973a567fd773ae 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.285 2010/02/14 18:42:15 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.286 2010/02/26 02:00:46 momjian Exp $
  *
  * HISTORY
  *	  AUTHOR			DATE			MAJOR EVENT
@@ -106,7 +106,7 @@ static List *add_function_defaults(List *args, Oid result_type,
 					  eval_const_expressions_context *context);
 static List *fetch_function_defaults(HeapTuple func_tuple);
 static void recheck_cast_function_args(List *args, Oid result_type,
-									   HeapTuple func_tuple);
+						   HeapTuple func_tuple);
 static Expr *evaluate_function(Oid funcid,
 				  Oid result_type, int32 result_typmod, List *args,
 				  HeapTuple func_tuple,
@@ -2127,14 +2127,14 @@ eval_const_expressions_mutator(Node *node,
 		ListCell   *lc;
 
 		/*
-		 * Reduce constants in the FuncExpr's arguments, and check to see
-		 * if there are any named args.
+		 * Reduce constants in the FuncExpr's arguments, and check to see if
+		 * there are any named args.
 		 */
 		args = NIL;
 		has_named_args = false;
 		foreach(lc, expr->args)
 		{
-			Node   *arg = (Node *) lfirst(lc);
+			Node	   *arg = (Node *) lfirst(lc);
 
 			arg = eval_const_expressions_mutator(arg, context);
 			if (IsA(arg, NamedArgExpr))
@@ -2158,8 +2158,8 @@ eval_const_expressions_mutator(Node *node,
 		/*
 		 * The expression cannot be simplified any further, so build and
 		 * return a replacement FuncExpr node using the possibly-simplified
-		 * arguments.  Note that we have also converted the argument list
-		 * to positional notation.
+		 * arguments.  Note that we have also converted the argument list to
+		 * positional notation.
 		 */
 		newexpr = makeNode(FuncExpr);
 		newexpr->funcid = expr->funcid;
@@ -3219,16 +3219,16 @@ simplify_boolean_equality(Oid opno, List *args)
 		if (opno == BooleanEqualOperator)
 		{
 			if (DatumGetBool(((Const *) leftop)->constvalue))
-				return rightop;		/* true = foo */
+				return rightop; /* true = foo */
 			else
-				return make_notclause(rightop);		/* false = foo */
+				return make_notclause(rightop); /* false = foo */
 		}
 		else
 		{
 			if (DatumGetBool(((Const *) leftop)->constvalue))
-				return make_notclause(rightop);		/* true <> foo */
+				return make_notclause(rightop); /* true <> foo */
 			else
-				return rightop;		/* false <> foo */
+				return rightop; /* false <> foo */
 		}
 	}
 	if (rightop && IsA(rightop, Const))
@@ -3237,16 +3237,16 @@ simplify_boolean_equality(Oid opno, List *args)
 		if (opno == BooleanEqualOperator)
 		{
 			if (DatumGetBool(((Const *) rightop)->constvalue))
-				return leftop;		/* foo = true */
+				return leftop;	/* foo = true */
 			else
-				return make_notclause(leftop);		/* foo = false */
+				return make_notclause(leftop);	/* foo = false */
 		}
 		else
 		{
 			if (DatumGetBool(((Const *) rightop)->constvalue))
-				return make_notclause(leftop);		/* foo <> true */
+				return make_notclause(leftop);	/* foo <> true */
 			else
-				return leftop;		/* foo <> false */
+				return leftop;	/* foo <> false */
 		}
 	}
 	return NULL;
@@ -3340,7 +3340,7 @@ reorder_function_arguments(List *args, Oid result_type, HeapTuple func_tuple,
 	i = 0;
 	foreach(lc, args)
 	{
-		Node   *arg = (Node *) lfirst(lc);
+		Node	   *arg = (Node *) lfirst(lc);
 
 		if (!IsA(arg, NamedArgExpr))
 		{
@@ -3358,13 +3358,13 @@ reorder_function_arguments(List *args, Oid result_type, HeapTuple func_tuple,
 	}
 
 	/*
-	 * Fetch default expressions, if needed, and insert into array at
-	 * proper locations (they aren't necessarily consecutive or all used)
+	 * Fetch default expressions, if needed, and insert into array at proper
+	 * locations (they aren't necessarily consecutive or all used)
 	 */
 	defargnumbers = NULL;
 	if (nargsprovided < pronargs)
 	{
-		List   *defaults = fetch_function_defaults(func_tuple);
+		List	   *defaults = fetch_function_defaults(func_tuple);
 
 		i = pronargs - funcform->pronargdefaults;
 		foreach(lc, defaults)
@@ -3390,10 +3390,10 @@ reorder_function_arguments(List *args, Oid result_type, HeapTuple func_tuple,
 	recheck_cast_function_args(args, result_type, func_tuple);
 
 	/*
-	 * Lastly, we have to recursively simplify the defaults we just added
-	 * (but don't recurse on the args passed in, as we already did those).
-	 * This isn't merely an optimization, it's *necessary* since there could
-	 * be functions with named or defaulted arguments down in there.
+	 * Lastly, we have to recursively simplify the defaults we just added (but
+	 * don't recurse on the args passed in, as we already did those). This
+	 * isn't merely an optimization, it's *necessary* since there could be
+	 * functions with named or defaulted arguments down in there.
 	 *
 	 * Note that we do this last in hopes of simplifying any typecasts that
 	 * were added by recheck_cast_function_args --- there shouldn't be any new
@@ -3448,10 +3448,10 @@ add_function_defaults(List *args, Oid result_type, HeapTuple func_tuple,
 	recheck_cast_function_args(args, result_type, func_tuple);
 
 	/*
-	 * Lastly, we have to recursively simplify the defaults we just added
-	 * (but don't recurse on the args passed in, as we already did those).
-	 * This isn't merely an optimization, it's *necessary* since there could
-	 * be functions with named or defaulted arguments down in there.
+	 * Lastly, we have to recursively simplify the defaults we just added (but
+	 * don't recurse on the args passed in, as we already did those). This
+	 * isn't merely an optimization, it's *necessary* since there could be
+	 * functions with named or defaulted arguments down in there.
 	 *
 	 * Note that we do this last in hopes of simplifying any typecasts that
 	 * were added by recheck_cast_function_args --- there shouldn't be any new
@@ -4191,11 +4191,11 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
 	oldcxt = MemoryContextSwitchTo(mycxt);
 
 	/*
-	 * Run eval_const_expressions on the function call.  This is necessary
-	 * to ensure that named-argument notation is converted to positional
-	 * notation and any default arguments are inserted.  It's a bit of
-	 * overkill for the arguments, since they'll get processed again later,
-	 * but no harm will be done.
+	 * Run eval_const_expressions on the function call.  This is necessary to
+	 * ensure that named-argument notation is converted to positional notation
+	 * and any default arguments are inserted.	It's a bit of overkill for the
+	 * arguments, since they'll get processed again later, but no harm will be
+	 * done.
 	 */
 	fexpr = (FuncExpr *) eval_const_expressions(root, (Node *) fexpr);
 
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 3f6aec023da13dc60630c5b0a2ea034a534baee5..a2ebe0d8ed352e1564d7614f1487294edfa632da 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.156 2010/01/02 16:57:48 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.157 2010/02/26 02:00:47 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1224,7 +1224,7 @@ create_noop_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath)
 {
 	NoOpPath   *pathnode = makeNode(NoOpPath);
 
-	pathnode->path.pathtype = T_Join;			/* by convention */
+	pathnode->path.pathtype = T_Join;	/* by convention */
 	pathnode->path.parent = rel;
 	pathnode->path.startup_cost = subpath->startup_cost;
 	pathnode->path.total_cost = subpath->total_cost;
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index 97d7e94f3266b513407225c5541a5bc34beab6db..66d3a7498fe916c4f1ba93da257281a0b9431b23 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.32 2010/02/25 20:59:53 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.33 2010/02/26 02:00:47 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -657,10 +657,10 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate)
 			/*
 			 * If A is a strong NOT-clause, A R=> B if B equals A's arg
 			 *
-			 * We cannot make the stronger conclusion that B is refuted if
-			 * B implies A's arg; that would only prove that B is not-TRUE,
-			 * not that it's not NULL either.  Hence use equal() rather than
-			 * predicate_implied_by_recurse().  We could do the latter if we
+			 * We cannot make the stronger conclusion that B is refuted if B
+			 * implies A's arg; that would only prove that B is not-TRUE, not
+			 * that it's not NULL either.  Hence use equal() rather than
+			 * predicate_implied_by_recurse().	We could do the latter if we
 			 * ever had a need for the weak form of refutation.
 			 */
 			not_arg = extract_strong_not_arg(clause);
@@ -1678,7 +1678,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
 		else if (OidIsValid(clause_op_negator))
 		{
 			clause_tuple = SearchSysCache2(AMOPOPID,
-										   ObjectIdGetDatum(clause_op_negator),
+										 ObjectIdGetDatum(clause_op_negator),
 										   ObjectIdGetDatum(opfamily_id));
 			if (HeapTupleIsValid(clause_tuple))
 			{
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index ea27058766f03e632f2bdd5faf2dccffc9321e70..f99d0ad1fb1e173decbc795032a2dd98c6894828 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.97 2010/01/02 16:57:48 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.98 2010/02/26 02:00:47 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -404,9 +404,9 @@ build_join_rel(PlannerInfo *root,
 
 	/*
 	 * Also, if dynamic-programming join search is active, add the new joinrel
-	 * to the appropriate sublist.  Note: you might think the Assert on
-	 * number of members should be for equality, but some of the level 1
-	 * rels might have been joinrels already, so we can only assert <=.
+	 * to the appropriate sublist.	Note: you might think the Assert on number
+	 * of members should be for equality, but some of the level 1 rels might
+	 * have been joinrels already, so we can only assert <=.
 	 */
 	if (root->join_rel_level)
 	{
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 16504d90111c045616817a0fc46d60f36a07168a..caa3cd77c0df944bea3f029f283c013018b95340 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.62 2010/01/02 16:57:48 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.63 2010/02/26 02:00:49 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -292,15 +292,15 @@ make_restrictinfos_from_actual_clauses(PlannerInfo *root,
 
 	foreach(l, clause_list)
 	{
-		Expr   *clause = (Expr *) lfirst(l);
-		bool	pseudoconstant;
+		Expr	   *clause = (Expr *) lfirst(l);
+		bool		pseudoconstant;
 		RestrictInfo *rinfo;
 
 		/*
 		 * It's pseudoconstant if it contains no Vars and no volatile
 		 * functions.  We probably can't see any sublinks here, so
-		 * contain_var_clause() would likely be enough, but for safety
-		 * use contain_vars_of_level() instead.
+		 * contain_var_clause() would likely be enough, but for safety use
+		 * contain_vars_of_level() instead.
 		 */
 		pseudoconstant =
 			!contain_vars_of_level((Node *) clause, 0) &&
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 027cb972ee7b6c7c9aa9bbc15a6883aebadf9f54..1576613bc1a6193f155ae8752e4f4562f72170df 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -17,7 +17,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- *	$PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.401 2010/02/12 22:48:56 tgl Exp $
+ *	$PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.402 2010/02/26 02:00:49 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -53,7 +53,7 @@ static Query *transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt);
 static Node *transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
 						  bool isTopLevel, List **colInfo);
 static void determineRecursiveColTypes(ParseState *pstate,
-									   Node *larg, List *lcolinfo);
+						   Node *larg, List *lcolinfo);
 static void applyColumnNames(List *dst, List *src);
 static Query *transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt);
 static List *transformReturningList(ParseState *pstate, List *returningList);
@@ -62,7 +62,7 @@ static Query *transformDeclareCursorStmt(ParseState *pstate,
 static Query *transformExplainStmt(ParseState *pstate,
 					 ExplainStmt *stmt);
 static void transformLockingClause(ParseState *pstate, Query *qry,
-								   LockingClause *lc, bool pushedDown);
+					   LockingClause *lc, bool pushedDown);
 
 
 /*
@@ -823,14 +823,14 @@ transformSelectStmt(ParseState *pstate, SelectStmt *stmt)
 	qry->sortClause = transformSortClause(pstate,
 										  stmt->sortClause,
 										  &qry->targetList,
-										  true /* fix unknowns */,
-										  false /* allow SQL92 rules */);
+										  true /* fix unknowns */ ,
+										  false /* allow SQL92 rules */ );
 
 	qry->groupClause = transformGroupClause(pstate,
 											stmt->groupClause,
 											&qry->targetList,
 											qry->sortClause,
-											false /* allow SQL92 rules */);
+											false /* allow SQL92 rules */ );
 
 	if (stmt->distinctClause == NIL)
 	{
@@ -1040,8 +1040,8 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
 	qry->sortClause = transformSortClause(pstate,
 										  stmt->sortClause,
 										  &qry->targetList,
-										  true /* fix unknowns */,
-										  false /* allow SQL92 rules */);
+										  true /* fix unknowns */ ,
+										  false /* allow SQL92 rules */ );
 
 	qry->limitOffset = transformLimitClause(pstate, stmt->limitOffset,
 											"OFFSET");
@@ -1294,8 +1294,8 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
 	qry->sortClause = transformSortClause(pstate,
 										  sortClause,
 										  &qry->targetList,
-										  false /* no unknowns expected */,
-										  false /* allow SQL92 rules */);
+										  false /* no unknowns expected */ ,
+										  false /* allow SQL92 rules */ );
 
 	pstate->p_rtable = list_truncate(pstate->p_rtable, sv_rtable_length);
 	pstate->p_relnamespace = sv_relnamespace;
@@ -1494,8 +1494,8 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
 											 &lcolinfo);
 
 		/*
-		 * If we are processing a recursive union query, now is the time
-		 * to examine the non-recursive term's output columns and mark the
+		 * If we are processing a recursive union query, now is the time to
+		 * examine the non-recursive term's output columns and mark the
 		 * containing CTE as having those result columns.  We should do this
 		 * only at the topmost setop of the CTE, of course.
 		 */
@@ -1552,25 +1552,25 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
 				rescoltypmod = -1;
 
 			/*
-			 * Verify the coercions are actually possible.  If not, we'd
-			 * fail later anyway, but we want to fail now while we have
-			 * sufficient context to produce an error cursor position.
+			 * Verify the coercions are actually possible.	If not, we'd fail
+			 * later anyway, but we want to fail now while we have sufficient
+			 * context to produce an error cursor position.
 			 *
 			 * The if-tests might look wrong, but they are correct: we should
 			 * verify if the input is non-UNKNOWN *or* if it is an UNKNOWN
 			 * Const (to verify the literal is valid for the target data type)
 			 * or Param (to possibly resolve the Param's type).  We should do
 			 * nothing if the input is say an UNKNOWN Var, which can happen in
-			 * some cases.  The planner is sometimes able to fold the Var to a
+			 * some cases.	The planner is sometimes able to fold the Var to a
 			 * constant before it has to coerce the type, so failing now would
 			 * just break cases that might work.
 			 */
 			if (lcoltype != UNKNOWNOID ||
-				IsA(lcolnode, Const) || IsA(lcolnode, Param))
+				IsA(lcolnode, Const) ||IsA(lcolnode, Param))
 				(void) coerce_to_common_type(pstate, lcolnode,
 											 rescoltype, context);
 			if (rcoltype != UNKNOWNOID ||
-				IsA(rcolnode, Const) || IsA(rcolnode, Param))
+				IsA(rcolnode, Const) ||IsA(rcolnode, Param))
 				(void) coerce_to_common_type(pstate, rcolnode,
 											 rescoltype, context);
 
@@ -1647,8 +1647,8 @@ determineRecursiveColTypes(ParseState *pstate, Node *larg, List *lcolinfo)
 	Assert(leftmostQuery != NULL);
 
 	/*
-	 * Generate dummy targetlist using column names of leftmost select
-	 * and dummy result expressions of the non-recursive term.
+	 * Generate dummy targetlist using column names of leftmost select and
+	 * dummy result expressions of the non-recursive term.
 	 */
 	targetList = NIL;
 	left_tlist = list_head(leftmostQuery->targetList);
@@ -2095,12 +2095,13 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
 				case RTE_SUBQUERY:
 					applyLockingClause(qry, i,
 									   lc->forUpdate, lc->noWait, pushedDown);
+
 					/*
 					 * FOR UPDATE/SHARE of subquery is propagated to all of
-					 * subquery's rels, too.  We could do this later (based
-					 * on the marking of the subquery RTE) but it is convenient
-					 * to have local knowledge in each query level about
-					 * which rels need to be opened with RowShareLock.
+					 * subquery's rels, too.  We could do this later (based on
+					 * the marking of the subquery RTE) but it is convenient
+					 * to have local knowledge in each query level about which
+					 * rels need to be opened with RowShareLock.
 					 */
 					transformLockingClause(pstate, rte->subquery,
 										   allrels, true);
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index e883e283e0be028c71f23436e9f3e9f6fa778c7d..d30d01261f77a584cd9cbc4f732a6bc60d22af54 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/parser/parse_agg.c,v 1.91 2010/02/12 17:33:20 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/parser/parse_agg.c,v 1.92 2010/02/26 02:00:49 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -51,7 +51,7 @@ static bool check_ungrouped_columns_walker(Node *node,
  *
  * Here we convert the args list into a targetlist by inserting TargetEntry
  * nodes, and then transform the aggorder and agg_distinct specifications to
- * produce lists of SortGroupClause nodes.  (That might also result in adding
+ * produce lists of SortGroupClause nodes.	(That might also result in adding
  * resjunk expressions to the targetlist.)
  *
  * We must also determine which query level the aggregate actually belongs to,
@@ -61,11 +61,11 @@ static bool check_ungrouped_columns_walker(Node *node,
 void
 transformAggregateCall(ParseState *pstate, Aggref *agg, bool agg_distinct)
 {
-	List       *tlist;
-	List       *torder;
-	List       *tdistinct = NIL;
-	AttrNumber  attno;
-	int         save_next_resno;
+	List	   *tlist;
+	List	   *torder;
+	List	   *tdistinct = NIL;
+	AttrNumber	attno;
+	int			save_next_resno;
 	int			min_varlevel;
 	ListCell   *lc;
 
@@ -77,7 +77,7 @@ transformAggregateCall(ParseState *pstate, Aggref *agg, bool agg_distinct)
 	attno = 1;
 	foreach(lc, agg->args)
 	{
-		Expr        *arg  = (Expr *) lfirst(lc);
+		Expr	   *arg = (Expr *) lfirst(lc);
 		TargetEntry *tle = makeTargetEntry(arg, attno++, NULL, false);
 
 		tlist = lappend(tlist, tle);
@@ -98,8 +98,8 @@ transformAggregateCall(ParseState *pstate, Aggref *agg, bool agg_distinct)
 	torder = transformSortClause(pstate,
 								 agg->aggorder,
 								 &tlist,
-								 true /* fix unknowns */,
-								 true /* force SQL99 rules */);
+								 true /* fix unknowns */ ,
+								 true /* force SQL99 rules */ );
 
 	/*
 	 * If we have DISTINCT, transform that to produce a distinctList.
@@ -118,12 +118,12 @@ transformAggregateCall(ParseState *pstate, Aggref *agg, bool agg_distinct)
 
 			if (!OidIsValid(sortcl->sortop))
 			{
-				Node   *expr = get_sortgroupclause_expr(sortcl, tlist);
+				Node	   *expr = get_sortgroupclause_expr(sortcl, tlist);
 
 				ereport(ERROR,
 						(errcode(ERRCODE_UNDEFINED_FUNCTION),
-						 errmsg("could not identify an ordering operator for type %s",
-								format_type_be(exprType(expr))),
+				errmsg("could not identify an ordering operator for type %s",
+					   format_type_be(exprType(expr))),
 						 errdetail("Aggregates with DISTINCT must be able to sort their inputs."),
 						 parser_errposition(pstate, exprLocation(expr))));
 			}
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 54bb867631eeee76d7421957480052cdc3c381ac..182181f3a60e1d73bf38e68b3ef0dda399a17240 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.197 2010/02/12 17:33:20 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.198 2010/02/26 02:00:50 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -73,7 +73,7 @@ static Node *transformFromClauseItem(ParseState *pstate, Node *n,
 static Node *buildMergedJoinVar(ParseState *pstate, JoinType jointype,
 				   Var *l_colvar, Var *r_colvar);
 static void checkExprIsVarFree(ParseState *pstate, Node *n,
-							   const char *constructName);
+				   const char *constructName);
 static TargetEntry *findTargetlistEntrySQL92(ParseState *pstate, Node *node,
 						 List **tlist, int clause);
 static TargetEntry *findTargetlistEntrySQL99(ParseState *pstate, Node *node,
@@ -88,7 +88,7 @@ static List *addTargetToGroupList(ParseState *pstate, TargetEntry *tle,
 					 bool resolveUnknown);
 static WindowClause *findWindowClause(List *wclist, const char *name);
 static Node *transformFrameOffset(ParseState *pstate, int frameOptions,
-								  Node *clause);
+					 Node *clause);
 
 
 /*
@@ -802,7 +802,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
 			ListCell   *lx,
 					   *rx;
 
-			Assert(j->usingClause == NIL);	/* shouldn't have USING() too */
+			Assert(j->usingClause == NIL);		/* shouldn't have USING() too */
 
 			foreach(lx, l_colnames)
 			{
@@ -1245,9 +1245,9 @@ checkExprIsVarFree(ParseState *pstate, Node *n, const char *constructName)
  *
  * This function supports the old SQL92 ORDER BY interpretation, where the
  * expression is an output column name or number.  If we fail to find a
- * match of that sort, we fall through to the SQL99 rules.  For historical
+ * match of that sort, we fall through to the SQL99 rules.	For historical
  * reasons, Postgres also allows this interpretation for GROUP BY, though
- * the standard never did.  However, for GROUP BY we prefer a SQL99 match.
+ * the standard never did.	However, for GROUP BY we prefer a SQL99 match.
  * This function is *not* used for WINDOW definitions.
  *
  * node		the ORDER BY, GROUP BY, or DISTINCT ON expression to be matched
@@ -1421,7 +1421,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist)
 	/*
 	 * Convert the untransformed node to a transformed expression, and search
 	 * for a match in the tlist.  NOTE: it doesn't really matter whether there
-	 * is more than one match.  Also, we are willing to match an existing
+	 * is more than one match.	Also, we are willing to match an existing
 	 * resjunk target here, though the SQL92 cases above must ignore resjunk
 	 * targets.
 	 */
@@ -1617,13 +1617,13 @@ transformWindowDefinitions(ParseState *pstate,
 		orderClause = transformSortClause(pstate,
 										  windef->orderClause,
 										  targetlist,
-										  true /* fix unknowns */,
-										  true /* force SQL99 rules */);
+										  true /* fix unknowns */ ,
+										  true /* force SQL99 rules */ );
 		partitionClause = transformGroupClause(pstate,
 											   windef->partitionClause,
 											   targetlist,
 											   orderClause,
-											   true /* force SQL99 rules */);
+											   true /* force SQL99 rules */ );
 
 		/*
 		 * And prepare the new WindowClause.
@@ -2220,8 +2220,8 @@ transformFrameOffset(ParseState *pstate, int frameOptions, Node *clause)
 	else if (frameOptions & FRAMEOPTION_RANGE)
 	{
 		/*
-		 * this needs a lot of thought to decide how to support in the
-		 * context of Postgres' extensible datatype framework
+		 * this needs a lot of thought to decide how to support in the context
+		 * of Postgres' extensible datatype framework
 		 */
 		constructName = "RANGE";
 		/* error was already thrown by gram.y, this is just a backstop */
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 66ce032cee6214b64621b38615bca43b8df6bdd3..2000dfb93a9b12f411b805bbb8434b8520404f45 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.180 2010/02/14 18:42:15 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.181 2010/02/26 02:00:52 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -263,8 +263,8 @@ coerce_type(ParseState *pstate, Node *node,
 		pstate != NULL && pstate->p_coerce_param_hook != NULL)
 	{
 		/*
-		 * Allow the CoerceParamHook to decide what happens.  It can return
-		 * a transformed node (very possibly the same Param node), or return
+		 * Allow the CoerceParamHook to decide what happens.  It can return a
+		 * transformed node (very possibly the same Param node), or return
 		 * NULL to indicate we should proceed with normal coercion.
 		 */
 		result = (*pstate->p_coerce_param_hook) (pstate,
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 72cb64a63d02f2cc0fcdb0fa8d2e49dd287d05ef..12c93e15f41fb03ca20edb37058c60b9c22224bc 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.253 2010/01/02 16:57:49 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.254 2010/02/26 02:00:52 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -61,7 +61,7 @@ static Node *transformBooleanTest(ParseState *pstate, BooleanTest *b);
 static Node *transformCurrentOfExpr(ParseState *pstate, CurrentOfExpr *cexpr);
 static Node *transformColumnRef(ParseState *pstate, ColumnRef *cref);
 static Node *transformWholeRowRef(ParseState *pstate, RangeTblEntry *rte,
-								  int location);
+					 int location);
 static Node *transformIndirection(ParseState *pstate, Node *basenode,
 					 List *indirection);
 static Node *transformTypeCast(ParseState *pstate, TypeCast *tc);
@@ -172,8 +172,8 @@ transformExpr(ParseState *pstate, Node *expr)
 						 * not a domain, transformTypeCast is a no-op.
 						 */
 						targetType = getBaseTypeAndTypmod(targetType,
-														 &targetTypmod);
-							
+														  &targetTypmod);
+
 						tc = copyObject(tc);
 						tc->arg = transformArrayExpr(pstate,
 													 (A_ArrayExpr *) tc->arg,
@@ -466,7 +466,8 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
 	char	   *colname = NULL;
 	RangeTblEntry *rte;
 	int			levels_up;
-	enum {
+	enum
+	{
 		CRERR_NO_COLUMN,
 		CRERR_NO_RTE,
 		CRERR_WRONG_DB,
@@ -474,7 +475,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
 	}			crerr = CRERR_NO_COLUMN;
 
 	/*
-	 * Give the PreParseColumnRefHook, if any, first shot.  If it returns
+	 * Give the PreParseColumnRefHook, if any, first shot.	If it returns
 	 * non-null then that's all, folks.
 	 */
 	if (pstate->p_pre_columnref_hook != NULL)
@@ -708,22 +709,22 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
 				break;
 			}
 		default:
-			crerr = CRERR_TOO_MANY;			/* too many dotted names */
+			crerr = CRERR_TOO_MANY;		/* too many dotted names */
 			break;
 	}
 
 	/*
 	 * Now give the PostParseColumnRefHook, if any, a chance.  We pass the
 	 * translation-so-far so that it can throw an error if it wishes in the
-	 * case that it has a conflicting interpretation of the ColumnRef.
-	 * (If it just translates anyway, we'll throw an error, because we can't
-	 * undo whatever effects the preceding steps may have had on the pstate.)
-	 * If it returns NULL, use the standard translation, or throw a suitable
-	 * error if there is none.
+	 * case that it has a conflicting interpretation of the ColumnRef. (If it
+	 * just translates anyway, we'll throw an error, because we can't undo
+	 * whatever effects the preceding steps may have had on the pstate.) If it
+	 * returns NULL, use the standard translation, or throw a suitable error
+	 * if there is none.
 	 */
 	if (pstate->p_post_columnref_hook != NULL)
 	{
-		Node   *hookresult;
+		Node	   *hookresult;
 
 		hookresult = (*pstate->p_post_columnref_hook) (pstate, cref, node);
 		if (node == NULL)
@@ -765,15 +766,15 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
 			case CRERR_WRONG_DB:
 				ereport(ERROR,
 						(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-						 errmsg("cross-database references are not implemented: %s",
-								NameListToString(cref->fields)),
+				  errmsg("cross-database references are not implemented: %s",
+						 NameListToString(cref->fields)),
 						 parser_errposition(pstate, cref->location)));
 				break;
 			case CRERR_TOO_MANY:
 				ereport(ERROR,
 						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("improper qualified name (too many dotted names): %s",
-								NameListToString(cref->fields)),
+				errmsg("improper qualified name (too many dotted names): %s",
+					   NameListToString(cref->fields)),
 						 parser_errposition(pstate, cref->location)));
 				break;
 		}
@@ -788,7 +789,7 @@ transformParamRef(ParseState *pstate, ParamRef *pref)
 	Node	   *result;
 
 	/*
-	 * The core parser knows nothing about Params.  If a hook is supplied,
+	 * The core parser knows nothing about Params.	If a hook is supplied,
 	 * call it.  If not, or if the hook returns NULL, throw a generic error.
 	 */
 	if (pstate->p_paramref_hook != NULL)
@@ -1972,10 +1973,10 @@ transformCurrentOfExpr(ParseState *pstate, CurrentOfExpr *cexpr)
 
 	/*
 	 * Check to see if the cursor name matches a parameter of type REFCURSOR.
-	 * If so, replace the raw name reference with a parameter reference.
-	 * (This is a hack for the convenience of plpgsql.)
+	 * If so, replace the raw name reference with a parameter reference. (This
+	 * is a hack for the convenience of plpgsql.)
 	 */
-	if (cexpr->cursor_name != NULL)			/* in case already transformed */
+	if (cexpr->cursor_name != NULL)		/* in case already transformed */
 	{
 		ColumnRef  *cref = makeNode(ColumnRef);
 		Node	   *node = NULL;
@@ -1991,13 +1992,13 @@ transformCurrentOfExpr(ParseState *pstate, CurrentOfExpr *cexpr)
 			node = (*pstate->p_post_columnref_hook) (pstate, cref, NULL);
 
 		/*
-		 * XXX Should we throw an error if we get a translation that isn't
-		 * a refcursor Param?  For now it seems best to silently ignore
-		 * false matches.
+		 * XXX Should we throw an error if we get a translation that isn't a
+		 * refcursor Param?  For now it seems best to silently ignore false
+		 * matches.
 		 */
 		if (node != NULL && IsA(node, Param))
 		{
-			Param  *p = (Param *) node;
+			Param	   *p = (Param *) node;
 
 			if (p->paramkind == PARAM_EXTERN &&
 				p->paramtype == REFCURSOROID)
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index df34711af6ef76cc9a3d1a29b5eace2fe868c47d..dffc24c09e39ea38f4aff7720573c09d799ebaff 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.221 2010/02/14 18:42:15 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.222 2010/02/26 02:00:52 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -125,13 +125,13 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
 	 *
 	 * We allow mixed notation (some named and some not), but only with all
 	 * the named parameters after all the unnamed ones.  So the name list
-	 * corresponds to the last N actual parameters and we don't need any
-	 * extra bookkeeping to match things up.
+	 * corresponds to the last N actual parameters and we don't need any extra
+	 * bookkeeping to match things up.
 	 */
 	argnames = NIL;
 	foreach(l, fargs)
 	{
-		Node   *arg = lfirst(l);
+		Node	   *arg = lfirst(l);
 
 		if (IsA(arg, NamedArgExpr))
 		{
@@ -144,8 +144,8 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
 				if (strcmp(na->name, (char *) lfirst(lc)) == 0)
 					ereport(ERROR,
 							(errcode(ERRCODE_SYNTAX_ERROR),
-							 errmsg("argument name \"%s\" used more than once",
-									na->name),
+						   errmsg("argument name \"%s\" used more than once",
+								  na->name),
 							 parser_errposition(pstate, na->location)));
 			}
 			argnames = lappend(argnames, na->name);
@@ -155,7 +155,7 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
 			if (argnames != NIL)
 				ereport(ERROR,
 						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("positional argument cannot follow named argument"),
+				  errmsg("positional argument cannot follow named argument"),
 						 parser_errposition(pstate, exprLocation(arg))));
 		}
 	}
@@ -246,8 +246,8 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
 		if (agg_order != NIL)
 			ereport(ERROR,
 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-			   errmsg("ORDER BY specified, but %s is not an aggregate function",
-					  NameListToString(funcname)),
+			errmsg("ORDER BY specified, but %s is not an aggregate function",
+				   NameListToString(funcname)),
 					 parser_errposition(pstate, location)));
 		if (over)
 			ereport(ERROR,
@@ -262,8 +262,8 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
 		/*
 		 * Oops.  Time to die.
 		 *
-		 * If we are dealing with the attribute notation rel.function,
-		 * let the caller handle failure.
+		 * If we are dealing with the attribute notation rel.function, let the
+		 * caller handle failure.
 		 */
 		if (is_column)
 			return NULL;
@@ -408,9 +408,9 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
 
 		/*
 		 * Currently it's not possible to define an aggregate with named
-		 * arguments, so this case should be impossible.  Check anyway
-		 * because the planner and executor wouldn't cope with NamedArgExprs
-		 * in an Aggref node.
+		 * arguments, so this case should be impossible.  Check anyway because
+		 * the planner and executor wouldn't cope with NamedArgExprs in an
+		 * Aggref node.
 		 */
 		if (argnames != NIL)
 			ereport(ERROR,
@@ -481,9 +481,9 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
 					 parser_errposition(pstate, location)));
 
 		/*
-		 * We might want to support this later, but for now reject it
-		 * because the planner and executor wouldn't cope with NamedArgExprs
-		 * in a WindowFunc node.
+		 * We might want to support this later, but for now reject it because
+		 * the planner and executor wouldn't cope with NamedArgExprs in a
+		 * WindowFunc node.
 		 */
 		if (argnames != NIL)
 			ereport(ERROR,
@@ -1075,10 +1075,9 @@ func_get_detail(List *funcname,
 			return FUNCDETAIL_MULTIPLE;
 
 		/*
-		 * We disallow VARIADIC with named arguments unless the last
-		 * argument (the one with VARIADIC attached) actually matched the
-		 * variadic parameter.  This is mere pedantry, really, but some
-		 * folks insisted.
+		 * We disallow VARIADIC with named arguments unless the last argument
+		 * (the one with VARIADIC attached) actually matched the variadic
+		 * parameter.  This is mere pedantry, really, but some folks insisted.
 		 */
 		if (fargnames != NIL && !expand_variadic && nargs > 0 &&
 			best_candidate->argnumbers[nargs - 1] != nargs - 1)
@@ -1142,17 +1141,17 @@ func_get_detail(List *funcname,
 			{
 				/*
 				 * This is a bit tricky in named notation, since the supplied
-				 * arguments could replace any subset of the defaults.  We
+				 * arguments could replace any subset of the defaults.	We
 				 * work by making a bitmapset of the argnumbers of defaulted
 				 * arguments, then scanning the defaults list and selecting
 				 * the needed items.  (This assumes that defaulted arguments
 				 * should be supplied in their positional order.)
 				 */
-				Bitmapset *defargnumbers;
-				int	   *firstdefarg;
-				List   *newdefaults;
-				ListCell *lc;
-				int		i;
+				Bitmapset  *defargnumbers;
+				int		   *firstdefarg;
+				List	   *newdefaults;
+				ListCell   *lc;
+				int			i;
 
 				defargnumbers = NULL;
 				firstdefarg = &best_candidate->argnumbers[best_candidate->nargs - best_candidate->ndargs];
@@ -1174,8 +1173,8 @@ func_get_detail(List *funcname,
 			else
 			{
 				/*
-				 * Defaults for positional notation are lots easier;
-				 * just remove any unwanted ones from the front.
+				 * Defaults for positional notation are lots easier; just
+				 * remove any unwanted ones from the front.
 				 */
 				int			ndelete;
 
@@ -1226,11 +1225,11 @@ make_fn_arguments(ParseState *pstate,
 		/* types don't match? then force coercion using a function call... */
 		if (actual_arg_types[i] != declared_arg_types[i])
 		{
-			Node   *node = (Node *) lfirst(current_fargs);
+			Node	   *node = (Node *) lfirst(current_fargs);
 
 			/*
-			 * If arg is a NamedArgExpr, coerce its input expr instead ---
-			 * we want the NamedArgExpr to stay at the top level of the list.
+			 * If arg is a NamedArgExpr, coerce its input expr instead --- we
+			 * want the NamedArgExpr to stay at the top level of the list.
 			 */
 			if (IsA(node, NamedArgExpr))
 			{
@@ -1364,7 +1363,7 @@ ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg,
  *		The result is something like "foo(integer)".
  *
  * If argnames isn't NIL, it is a list of C strings representing the actual
- * arg names for the last N arguments.  This must be considered part of the
+ * arg names for the last N arguments.	This must be considered part of the
  * function signature too, when dealing with named-notation function calls.
  *
  * This is typically used in the construction of function-not-found error
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index daa9e2341fae0af69449ff5d60d82f7be5101c36..f40f9af348e4d5954a07bbaa56f23f9084dfb02d 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.112 2010/02/14 18:42:15 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.113 2010/02/26 02:00:52 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -229,11 +229,12 @@ get_sort_group_operators(Oid argtype,
 				lt_opr = gt_opr = InvalidOid;
 			}
 #else
+
 			/*
 			 * ... but for the moment we have to do this.  This is because
 			 * anyarray has sorting but not hashing support.  So, if the
-			 * element type is only hashable, there is nothing we can do
-			 * with the array type.
+			 * element type is only hashable, there is nothing we can do with
+			 * the array type.
 			 */
 			if (!OidIsValid(typentry->lt_opr) ||
 				!OidIsValid(typentry->eq_opr) ||
diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c
index b1282a14626679be7c6832b2faf2689452ca1006..ba91028c891754c15ce5077cdccc94a74937b5b4 100644
--- a/src/backend/parser/parse_param.c
+++ b/src/backend/parser/parse_param.c
@@ -17,7 +17,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/parser/parse_param.c,v 2.3 2010/01/13 01:17:07 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/parser/parse_param.c,v 2.4 2010/02/26 02:00:52 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -53,8 +53,8 @@ typedef struct VarParamState
 static Node *fixed_paramref_hook(ParseState *pstate, ParamRef *pref);
 static Node *variable_paramref_hook(ParseState *pstate, ParamRef *pref);
 static Node *variable_coerce_param_hook(ParseState *pstate, Param *param,
-										Oid targetTypeId, int32 targetTypeMod,
-										int location);
+						   Oid targetTypeId, int32 targetTypeMod,
+						   int location);
 static bool check_parameter_resolution_walker(Node *node, ParseState *pstate);
 
 
@@ -245,7 +245,7 @@ variable_coerce_param_hook(ParseState *pstate, Param *param,
  * of parsing with parse_variable_parameters.
  *
  * Note: this code intentionally does not check that all parameter positions
- * were used, nor that all got non-UNKNOWN types assigned.  Caller of parser
+ * were used, nor that all got non-UNKNOWN types assigned.	Caller of parser
  * should enforce that if it's important.
  */
 void
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 34f2dc410bd4943995cdc9e084225d7dc4e8771a..9fdcd83d75ba99501fbe60b8494d2a80b26b079a 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.149 2010/02/14 18:42:15 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.150 2010/02/26 02:00:52 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -88,11 +88,11 @@ refnameRangeTblEntry(ParseState *pstate,
 
 		/*
 		 * We can use LookupNamespaceNoError() here because we are only
-		 * interested in finding existing RTEs.  Checking USAGE permission
-		 * on the schema is unnecessary since it would have already been
-		 * checked when the RTE was made.  Furthermore, we want to report
-		 * "RTE not found", not "no permissions for schema", if the name
-		 * happens to match a schema name the user hasn't got access to.
+		 * interested in finding existing RTEs.  Checking USAGE permission on
+		 * the schema is unnecessary since it would have already been checked
+		 * when the RTE was made.  Furthermore, we want to report "RTE not
+		 * found", not "no permissions for schema", if the name happens to
+		 * match a schema name the user hasn't got access to.
 		 */
 		namespaceId = LookupNamespaceNoError(schemaname);
 		if (!OidIsValid(relId))
@@ -2369,8 +2369,8 @@ errorMissingRTE(ParseState *pstate, RangeVar *relation)
 
 	/*
 	 * Check to see if there are any potential matches in the query's
-	 * rangetable.  (Note: cases involving a bad schema name in the
-	 * RangeVar will throw error immediately here.  That seems OK.)
+	 * rangetable.	(Note: cases involving a bad schema name in the RangeVar
+	 * will throw error immediately here.  That seems OK.)
 	 */
 	rte = searchRangeTable(pstate, relation);
 
@@ -2394,11 +2394,11 @@ errorMissingRTE(ParseState *pstate, RangeVar *relation)
 	if (rte)
 		ereport(ERROR,
 				(errcode(ERRCODE_UNDEFINED_TABLE),
-				 errmsg("invalid reference to FROM-clause entry for table \"%s\"",
-						relation->relname),
+			errmsg("invalid reference to FROM-clause entry for table \"%s\"",
+				   relation->relname),
 				 (badAlias ?
-				  errhint("Perhaps you meant to reference the table alias \"%s\".",
-						  badAlias) :
+			errhint("Perhaps you meant to reference the table alias \"%s\".",
+					badAlias) :
 				  errhint("There is an entry for table \"%s\", but it cannot be referenced from this part of the query.",
 						  rte->eref->aliasname)),
 				 parser_errposition(pstate, relation->location)));
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index dce6f4d01c90d047f45f62cd9e606bbc11a58156..7e04a947830cbaf0afa730e594712472df7e297b 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.176 2010/01/02 16:57:50 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.177 2010/02/26 02:00:52 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -49,9 +49,9 @@ static List *ExpandAllTables(ParseState *pstate, int location);
 static List *ExpandIndirectionStar(ParseState *pstate, A_Indirection *ind,
 					  bool targetlist);
 static List *ExpandSingleTable(ParseState *pstate, RangeTblEntry *rte,
-							   int location, bool targetlist);
+				  int location, bool targetlist);
 static List *ExpandRowReference(ParseState *pstate, Node *expr,
-								bool targetlist);
+				   bool targetlist);
 static int	FigureColnameInternal(Node *node, char **name);
 
 
@@ -884,12 +884,12 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
 		 *
 		 * (e.g., SELECT emp.*, dname FROM emp, dept)
 		 *
-		 * Note: this code is a lot like transformColumnRef; it's tempting
-		 * to call that instead and then replace the resulting whole-row Var
-		 * with a list of Vars.  However, that would leave us with the
-		 * RTE's selectedCols bitmap showing the whole row as needing
-		 * select permission, as well as the individual columns.  That would
-		 * be incorrect (since columns added later shouldn't need select
+		 * Note: this code is a lot like transformColumnRef; it's tempting to
+		 * call that instead and then replace the resulting whole-row Var with
+		 * a list of Vars.	However, that would leave us with the RTE's
+		 * selectedCols bitmap showing the whole row as needing select
+		 * permission, as well as the individual columns.  That would be
+		 * incorrect (since columns added later shouldn't need select
 		 * permissions).  We could try to remove the whole-row permission bit
 		 * after the fact, but duplicating code is less messy.
 		 */
@@ -897,14 +897,15 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
 		char	   *relname = NULL;
 		RangeTblEntry *rte = NULL;
 		int			levels_up;
-		enum {
+		enum
+		{
 			CRSERR_NO_RTE,
 			CRSERR_WRONG_DB,
 			CRSERR_TOO_MANY
 		}			crserr = CRSERR_NO_RTE;
 
 		/*
-		 * Give the PreParseColumnRefHook, if any, first shot.  If it returns
+		 * Give the PreParseColumnRefHook, if any, first shot.	If it returns
 		 * non-null then we should use that expression.
 		 */
 		if (pstate->p_pre_columnref_hook != NULL)
@@ -932,35 +933,35 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
 										   &levels_up);
 				break;
 			case 4:
-			{
-				char	   *catname = strVal(linitial(fields));
-
-				/*
-				 * We check the catalog name and then ignore it.
-				 */
-				if (strcmp(catname, get_database_name(MyDatabaseId)) != 0)
 				{
-					crserr = CRSERR_WRONG_DB;
+					char	   *catname = strVal(linitial(fields));
+
+					/*
+					 * We check the catalog name and then ignore it.
+					 */
+					if (strcmp(catname, get_database_name(MyDatabaseId)) != 0)
+					{
+						crserr = CRSERR_WRONG_DB;
+						break;
+					}
+					nspname = strVal(lsecond(fields));
+					relname = strVal(lthird(fields));
+					rte = refnameRangeTblEntry(pstate, nspname, relname,
+											   cref->location,
+											   &levels_up);
 					break;
 				}
-				nspname = strVal(lsecond(fields));
-				relname = strVal(lthird(fields));
-				rte = refnameRangeTblEntry(pstate, nspname, relname,
-										   cref->location,
-										   &levels_up);
-				break;
-			}
 			default:
 				crserr = CRSERR_TOO_MANY;
 				break;
 		}
 
 		/*
-		 * Now give the PostParseColumnRefHook, if any, a chance.
-		 * We cheat a bit by passing the RangeTblEntry, not a Var,
-		 * as the planned translation.  (A single Var wouldn't be
-		 * strictly correct anyway.  This convention allows hooks
-		 * that really care to know what is happening.)
+		 * Now give the PostParseColumnRefHook, if any, a chance. We cheat a
+		 * bit by passing the RangeTblEntry, not a Var, as the planned
+		 * translation.  (A single Var wouldn't be strictly correct anyway.
+		 * This convention allows hooks that really care to know what is
+		 * happening.)
 		 */
 		if (pstate->p_post_columnref_hook != NULL)
 		{
@@ -1111,9 +1112,9 @@ ExpandSingleTable(ParseState *pstate, RangeTblEntry *rte,
 				  NULL, &vars);
 
 		/*
-		 * Require read access to the table.  This is normally redundant
-		 * with the markVarForSelectPriv calls below, but not if the table
-		 * has zero columns.
+		 * Require read access to the table.  This is normally redundant with
+		 * the markVarForSelectPriv calls below, but not if the table has zero
+		 * columns.
 		 */
 		rte->requiredPerms |= ACL_SELECT;
 
@@ -1147,7 +1148,7 @@ ExpandRowReference(ParseState *pstate, Node *expr,
 
 	/*
 	 * If the rowtype expression is a whole-row Var, we can expand the fields
-	 * as simple Vars.  Note: if the RTE is a relation, this case leaves us
+	 * as simple Vars.	Note: if the RTE is a relation, this case leaves us
 	 * with the RTE's selectedCols bitmap showing the whole row as needing
 	 * select permission, as well as the individual columns.  However, we can
 	 * only get here for weird notations like (table.*).*, so it's not worth
@@ -1165,8 +1166,8 @@ ExpandRowReference(ParseState *pstate, Node *expr,
 	}
 
 	/*
-	 * Otherwise we have to do it the hard way.  Our current implementation
-	 * is to generate multiple copies of the expression and do FieldSelects.
+	 * Otherwise we have to do it the hard way.  Our current implementation is
+	 * to generate multiple copies of the expression and do FieldSelects.
 	 * (This can be pretty inefficient if the expression involves nontrivial
 	 * computation :-(.)
 	 *
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 4a5a5725455a6898a9348944355c8af82b841523..1657096c6d83c47bfddd56cb6f8b0ecead97ced5 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -19,7 +19,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- *	$PostgreSQL: pgsql/src/backend/parser/parse_utilcmd.c,v 2.39 2010/02/14 18:42:15 rhaas Exp $
+ *	$PostgreSQL: pgsql/src/backend/parser/parse_utilcmd.c,v 2.40 2010/02/26 02:00:53 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -106,7 +106,7 @@ static void transformTableConstraint(ParseState *pstate,
 static void transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
 					 InhRelation *inhrelation);
 static void transformOfType(ParseState *pstate, CreateStmtContext *cxt,
-					 TypeName *ofTypename);
+				TypeName *ofTypename);
 static char *chooseIndexName(const RangeVar *relation, IndexStmt *index_stmt);
 static IndexStmt *generateClonedIndexStmt(CreateStmtContext *cxt,
 						Relation parent_index, AttrNumber *attmap);
@@ -186,7 +186,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
 	cxt.pkey = NULL;
 	cxt.hasoids = interpretOidsOption(stmt->options);
 
-	Assert(!stmt->ofTypename || !stmt->inhRelations); /* grammar enforces */
+	Assert(!stmt->ofTypename || !stmt->inhRelations);	/* grammar enforces */
 
 	if (stmt->ofTypename)
 		transformOfType(pstate, &cxt, stmt->ofTypename);
@@ -486,6 +486,7 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
 				break;
 
 			case CONSTR_FOREIGN:
+
 				/*
 				 * Fill in the current attribute's name and throw it into the
 				 * list of FK constraints to be processed later.
@@ -760,11 +761,11 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
 
 				if (comment != NULL)
 				{
-					CommentStmt	   *stmt;
+					CommentStmt *stmt;
 
 					/*
-					 * We have to assign the index a name now, so that we
-					 * can reference it in CommentStmt.
+					 * We have to assign the index a name now, so that we can
+					 * reference it in CommentStmt.
 					 */
 					if (index_stmt->idxname == NULL)
 						index_stmt->idxname = chooseIndexName(cxt->relation,
@@ -811,7 +812,7 @@ transformOfType(ParseState *pstate, CreateStmtContext *cxt, TypeName *ofTypename
 	tuple = typenameType(NULL, ofTypename, NULL);
 	typ = (Form_pg_type) GETSTRUCT(tuple);
 	ofTypeId = HeapTupleGetOid(tuple);
-	ofTypename->typeOid = ofTypeId; /* cached for later */
+	ofTypename->typeOid = ofTypeId;		/* cached for later */
 
 	if (typ->typtype != TYPTYPE_COMPOSITE)
 		ereport(ERROR,
@@ -823,7 +824,7 @@ transformOfType(ParseState *pstate, CreateStmtContext *cxt, TypeName *ofTypename
 	for (i = 0; i < tupdesc->natts; i++)
 	{
 		Form_pg_attribute attr = tupdesc->attrs[i];
-		ColumnDef *n = makeNode(ColumnDef);
+		ColumnDef  *n = makeNode(ColumnDef);
 
 		n->colname = pstrdup(NameStr(attr->attname));
 		n->typeName = makeTypeNameFromOid(attr->atttypid, attr->atttypmod);
@@ -934,7 +935,7 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx,
 	 */
 	if (index->primary || index->unique || idxrelrec->relhasexclusion)
 	{
-		Oid		constraintId = get_index_constraint(source_relid);
+		Oid			constraintId = get_index_constraint(source_relid);
 
 		if (OidIsValid(constraintId))
 		{
@@ -942,7 +943,7 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx,
 			Form_pg_constraint conrec;
 
 			ht_constr = SearchSysCache1(CONSTROID,
-									    ObjectIdGetDatum(constraintId));
+										ObjectIdGetDatum(constraintId));
 			if (!HeapTupleIsValid(ht_constr))
 				elog(ERROR, "cache lookup failed for constraint %u",
 					 constraintId);
@@ -955,9 +956,9 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx,
 			/* If it's an exclusion constraint, we need the operator names */
 			if (idxrelrec->relhasexclusion)
 			{
-				Datum  *elems;
-				int		nElems;
-				int		i;
+				Datum	   *elems;
+				int			nElems;
+				int			i;
 
 				Assert(conrec->contype == CONSTRAINT_EXCLUSION);
 				/* Extract operator OIDs from the pg_constraint tuple */
@@ -1310,17 +1311,17 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
 	index->concurrent = false;
 
 	/*
-	 * If it's an EXCLUDE constraint, the grammar returns a list of pairs
-	 * of IndexElems and operator names.  We have to break that apart into
+	 * If it's an EXCLUDE constraint, the grammar returns a list of pairs of
+	 * IndexElems and operator names.  We have to break that apart into
 	 * separate lists.
 	 */
 	if (constraint->contype == CONSTR_EXCLUSION)
 	{
 		foreach(lc, constraint->exclusions)
 		{
-			List	*pair = (List *) lfirst(lc);
-			IndexElem *elem;
-			List   *opname;
+			List	   *pair = (List *) lfirst(lc);
+			IndexElem  *elem;
+			List	   *opname;
 
 			Assert(list_length(pair) == 2);
 			elem = (IndexElem *) linitial(pair);
diff --git a/src/backend/port/win32/mingwcompat.c b/src/backend/port/win32/mingwcompat.c
index 5520ec1864c022884583bd3808d9d1590dedfe23..4088b2049234e07b19decec7a8383c3306600e10 100644
--- a/src/backend/port/win32/mingwcompat.c
+++ b/src/backend/port/win32/mingwcompat.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/port/win32/mingwcompat.c,v 1.10 2010/02/09 20:22:20 mha Exp $
+ *	  $PostgreSQL: pgsql/src/backend/port/win32/mingwcompat.c,v 1.11 2010/02/26 02:00:53 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -19,7 +19,7 @@
  * in any library. It's trivial enough that we can safely define it
  * ourselves.
  */
-const struct in6_addr in6addr_any = {{{0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}}};
+const struct in6_addr in6addr_any = {{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}};
 
 
 /*
diff --git a/src/backend/port/win32/signal.c b/src/backend/port/win32/signal.c
index 1ffc55a5c4dbb733b45c37eeefaeb4f239b9c27f..35959ae01555ed5f70e8fe6fb3a856deac9afae1 100644
--- a/src/backend/port/win32/signal.c
+++ b/src/backend/port/win32/signal.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/port/win32/signal.c,v 1.24 2010/01/31 17:16:23 mha Exp $
+ *	  $PostgreSQL: pgsql/src/backend/port/win32/signal.c,v 1.25 2010/02/26 02:00:53 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -275,31 +275,34 @@ pg_signal_thread(LPVOID param)
 		fConnected = ConnectNamedPipe(pipe, NULL) ? TRUE : (GetLastError() == ERROR_PIPE_CONNECTED);
 		if (fConnected)
 		{
-			HANDLE newpipe;
+			HANDLE		newpipe;
 
 			/*
-			 * We have a connected pipe. Pass this off to a separate thread that will do the actual
-			 * processing of the pipe.
+			 * We have a connected pipe. Pass this off to a separate thread
+			 * that will do the actual processing of the pipe.
 			 *
-			 * We must also create a new instance of the pipe *before* we start running the new
-			 * thread. If we don't, there is a race condition whereby the dispatch thread might
-			 * run CloseHandle() before we have created a new instance, thereby causing a small
+			 * We must also create a new instance of the pipe *before* we
+			 * start running the new thread. If we don't, there is a race
+			 * condition whereby the dispatch thread might run CloseHandle()
+			 * before we have created a new instance, thereby causing a small
 			 * window of time where we will miss incoming requests.
 			 */
 			newpipe = CreateNamedPipe(pipename, PIPE_ACCESS_DUPLEX,
-									   PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
-									   PIPE_UNLIMITED_INSTANCES, 16, 16, 1000, NULL);
+					   PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
+							   PIPE_UNLIMITED_INSTANCES, 16, 16, 1000, NULL);
 			if (newpipe == INVALID_HANDLE_VALUE)
 			{
 				/*
-				 * This really should never fail. Just retry in case it does, even though we have
-				 * a small race window in that case. There is nothing else we can do other than
-				 * abort the whole process which will be even worse.
+				 * This really should never fail. Just retry in case it does,
+				 * even though we have a small race window in that case. There
+				 * is nothing else we can do other than abort the whole
+				 * process which will be even worse.
 				 */
 				write_stderr("could not create signal listener pipe: error code %d; retrying\n", (int) GetLastError());
+
 				/*
-				 * Keep going so we at least dispatch this signal. Hopefully, the call will succeed
-				 * when retried in the loop soon after.
+				 * Keep going so we at least dispatch this signal. Hopefully,
+				 * the call will succeed when retried in the loop soon after.
 				 */
 			}
 			hThread = CreateThread(NULL, 0,
@@ -312,8 +315,9 @@ pg_signal_thread(LPVOID param)
 				CloseHandle(hThread);
 
 			/*
-			 * Background thread is running with our instance of the pipe. So replace our reference
-			 * with the newly created one and loop back up for another run.
+			 * Background thread is running with our instance of the pipe. So
+			 * replace our reference with the newly created one and loop back
+			 * up for another run.
 			 */
 			pipe = newpipe;
 		}
@@ -322,8 +326,8 @@ pg_signal_thread(LPVOID param)
 			/*
 			 * Connection failed. Cleanup and try again.
 			 *
-			 * This should never happen. If it does, we have a small race condition until we loop
-			 * up and re-create the pipe.
+			 * This should never happen. If it does, we have a small race
+			 * condition until we loop up and re-create the pipe.
 			 */
 			CloseHandle(pipe);
 			pipe = INVALID_HANDLE_VALUE;
diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c
index e05048681b155089d1f1b03cc22b010206e6c730..3355606e450d6671d1d841c4bac8f2b1f5087087 100644
--- a/src/backend/port/win32/socket.c
+++ b/src/backend/port/win32/socket.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.25 2010/02/17 05:51:40 mha Exp $
+ *	  $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.26 2010/02/26 02:00:53 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -24,7 +24,7 @@
  * This flag changes the behaviour *globally* for all socket operations,
  * so it should only be set for very short periods of time.
  */
-int	pgwin32_noblock = 0;
+int			pgwin32_noblock = 0;
 
 #undef socket
 #undef accept
@@ -326,8 +326,8 @@ pgwin32_recv(SOCKET s, char *buf, int len, int f)
 	if (pgwin32_noblock)
 	{
 		/*
-		 * No data received, and we are in "emulated non-blocking mode", so return
-		 * indicating that we'd block if we were to continue.
+		 * No data received, and we are in "emulated non-blocking mode", so
+		 * return indicating that we'd block if we were to continue.
 		 */
 		errno = EWOULDBLOCK;
 		return -1;
diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c
index 18917bff1e3be49069cfe59c427f8bb974b321a0..ffbc20736c7f75c482f5e8522e1d5d3587b71001 100644
--- a/src/backend/port/win32_shmem.c
+++ b/src/backend/port/win32_shmem.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/port/win32_shmem.c,v 1.15 2010/01/04 01:06:21 itagaki Exp $
+ *	  $PostgreSQL: pgsql/src/backend/port/win32_shmem.c,v 1.16 2010/02/26 02:00:53 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -159,8 +159,8 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
 		hmap = CreateFileMapping(INVALID_HANDLE_VALUE,	/* Use the pagefile */
 								 NULL,	/* Default security attrs */
 								 PAGE_READWRITE,		/* Memory is Read/Write */
-								 size_high,	/* Size Upper 32 Bits	*/
-								 size_low,	/* Size Lower 32 bits */
+								 size_high,		/* Size Upper 32 Bits	*/
+								 size_low,		/* Size Lower 32 bits */
 								 szShareMem);
 
 		if (!hmap)
@@ -341,14 +341,15 @@ pgwin32_SharedMemoryDelete(int status, Datum shmId)
 int
 pgwin32_ReserveSharedMemoryRegion(HANDLE hChild)
 {
-	void *address;
+	void	   *address;
 
 	Assert(UsedShmemSegAddr != NULL);
 	Assert(UsedShmemSegSize != 0);
 
 	address = VirtualAllocEx(hChild, UsedShmemSegAddr, UsedShmemSegSize,
-								MEM_RESERVE, PAGE_READWRITE);
-	if (address == NULL) {
+							 MEM_RESERVE, PAGE_READWRITE);
+	if (address == NULL)
+	{
 		/* Don't use FATAL since we're running in the postmaster */
 		elog(LOG, "could not reserve shared memory region (addr=%p) for child %p: %lu",
 			 UsedShmemSegAddr, hChild, GetLastError());
@@ -357,12 +358,12 @@ pgwin32_ReserveSharedMemoryRegion(HANDLE hChild)
 	if (address != UsedShmemSegAddr)
 	{
 		/*
-		 * Should never happen - in theory if allocation granularity causes strange
-		 * effects it could, so check just in case.
+		 * Should never happen - in theory if allocation granularity causes
+		 * strange effects it could, so check just in case.
 		 *
 		 * Don't use FATAL since we're running in the postmaster.
 		 */
-	    elog(LOG, "reserved shared memory region got incorrect address %p, expected %p",
+		elog(LOG, "reserved shared memory region got incorrect address %p, expected %p",
 			 address, UsedShmemSegAddr);
 		VirtualFreeEx(hChild, address, 0, MEM_RELEASE);
 		return false;
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 8d7282b5121b4f9c91254b24bcb0d2364c3ceaed..8de6d878dddc4c17242c5e36b45a303d04b74494 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -55,7 +55,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.108 2010/02/14 18:42:15 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.109 2010/02/26 02:00:55 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -653,7 +653,7 @@ AutoVacLauncherMain(int argc, char *argv[])
 				 * of a worker will continue to fail in the same way.
 				 */
 				AutoVacuumShmem->av_signal[AutoVacForkFailed] = false;
-				pg_usleep(1000000L);		/* 1s */
+				pg_usleep(1000000L);	/* 1s */
 				SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_WORKER);
 				continue;
 			}
@@ -1770,7 +1770,7 @@ get_database_list(void)
 	/*
 	 * Start a transaction so we can access pg_database, and get a snapshot.
 	 * We don't have a use for the snapshot itself, but we're interested in
-	 * the secondary effect that it sets RecentGlobalXmin.  (This is critical
+	 * the secondary effect that it sets RecentGlobalXmin.	(This is critical
 	 * for anything that reads heap pages, because HOT may decide to prune
 	 * them even if the process doesn't attempt to modify any tuples.)
 	 */
@@ -1786,7 +1786,7 @@ get_database_list(void)
 	while (HeapTupleIsValid(tup = heap_getnext(scan, ForwardScanDirection)))
 	{
 		Form_pg_database pgdatabase = (Form_pg_database) GETSTRUCT(tup);
-		avw_dbase   *avdb;
+		avw_dbase  *avdb;
 
 		avdb = (avw_dbase *) palloc(sizeof(avw_dbase));
 
@@ -2428,15 +2428,15 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map,
 		vac_cost_delay = (avopts && avopts->vacuum_cost_delay >= 0)
 			? avopts->vacuum_cost_delay
 			: (autovacuum_vac_cost_delay >= 0)
-				? autovacuum_vac_cost_delay
-				: VacuumCostDelay;
+			? autovacuum_vac_cost_delay
+			: VacuumCostDelay;
 
 		/* 0 or -1 in autovac setting means use plain vacuum_cost_limit */
 		vac_cost_limit = (avopts && avopts->vacuum_cost_limit > 0)
 			? avopts->vacuum_cost_limit
 			: (autovacuum_vac_cost_limit > 0)
-				? autovacuum_vac_cost_limit
-				: VacuumCostLimit;
+			? autovacuum_vac_cost_limit
+			: VacuumCostLimit;
 
 		/* these do not have autovacuum-specific settings */
 		freeze_min_age = (avopts && avopts->freeze_min_age >= 0)
diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c
index 91ef9de021443e5e9db494cc01f5e4498e51888b..207ba8f0b2ca664b3a6a7da97dbd5693338750be 100644
--- a/src/backend/postmaster/fork_process.c
+++ b/src/backend/postmaster/fork_process.c
@@ -7,7 +7,7 @@
  * Copyright (c) 1996-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/postmaster/fork_process.c,v 1.11 2010/01/11 18:39:32 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/postmaster/fork_process.c,v 1.12 2010/02/26 02:00:55 momjian Exp $
  */
 #include "postgres.h"
 #include "postmaster/fork_process.h"
@@ -69,31 +69,31 @@ fork_process(void)
 		 * stupid, but the kernel hackers seem uninterested in improving it.)
 		 * Therefore it's often a good idea to protect the postmaster by
 		 * setting its oom_adj value negative (which has to be done in a
-		 * root-owned startup script).  If you just do that much, all child
+		 * root-owned startup script).	If you just do that much, all child
 		 * processes will also be protected against OOM kill, which might not
 		 * be desirable.  You can then choose to build with LINUX_OOM_ADJ
-		 * #defined to 0, or some other value that you want child processes
-		 * to adopt here.
+		 * #defined to 0, or some other value that you want child processes to
+		 * adopt here.
 		 */
 #ifdef LINUX_OOM_ADJ
 		{
 			/*
-			 * Use open() not stdio, to ensure we control the open flags.
-			 * Some Linux security environments reject anything but O_WRONLY.
+			 * Use open() not stdio, to ensure we control the open flags. Some
+			 * Linux security environments reject anything but O_WRONLY.
 			 */
-			int		fd = open("/proc/self/oom_adj", O_WRONLY, 0);
+			int			fd = open("/proc/self/oom_adj", O_WRONLY, 0);
 
 			/* We ignore all errors */
 			if (fd >= 0)
 			{
-				char	buf[16];
+				char		buf[16];
 
 				snprintf(buf, sizeof(buf), "%d\n", LINUX_OOM_ADJ);
 				(void) write(fd, buf, strlen(buf));
 				close(fd);
 			}
 		}
-#endif /* LINUX_OOM_ADJ */
+#endif   /* LINUX_OOM_ADJ */
 	}
 
 	return result;
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index a302d8fa0599716394833ba7e6b42c7751f720ad..0d7f62965f484009c4df7a17a13cdf6895d61252 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -13,7 +13,7 @@
  *
  *	Copyright (c) 2001-2010, PostgreSQL Global Development Group
  *
- *	$PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.200 2010/01/31 17:39:34 mha Exp $
+ *	$PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.201 2010/02/26 02:00:55 momjian Exp $
  * ----------
  */
 #include "postgres.h"
@@ -248,7 +248,7 @@ static void pgstat_sighup_handler(SIGNAL_ARGS);
 
 static PgStat_StatDBEntry *pgstat_get_db_entry(Oid databaseid, bool create);
 static PgStat_StatTabEntry *pgstat_get_tab_entry(PgStat_StatDBEntry *dbentry,
-												 Oid tableoid, bool create);
+					 Oid tableoid, bool create);
 static void pgstat_write_statsfile(bool permanent);
 static HTAB *pgstat_read_statsfile(Oid onlydb, bool permanent);
 static void backend_read_statsfile(void);
@@ -1036,7 +1036,7 @@ pgstat_vacuum_stat(void)
  *
  *	Collect the OIDs of all objects listed in the specified system catalog
  *	into a temporary hash table.  Caller should hash_destroy the result
- *	when done with it.  (However, we make the table in CurrentMemoryContext
+ *	when done with it.	(However, we make the table in CurrentMemoryContext
  *	so that it will be freed properly in event of an error.)
  * ----------
  */
@@ -1194,7 +1194,8 @@ pgstat_reset_shared_counters(const char *target)
  *	Tell the statistics collector to reset a single counter.
  * ----------
  */
-void pgstat_reset_single_counter(Oid objoid, PgStat_Single_Reset_Type type)
+void
+pgstat_reset_single_counter(Oid objoid, PgStat_Single_Reset_Type type)
 {
 	PgStat_MsgResetsinglecounter msg;
 
@@ -1832,8 +1833,8 @@ AtEOSubXact_PgStat(bool isCommit, int nestDepth)
 			else
 			{
 				/*
-				 * On abort, update top-level tabstat counts, then forget
-				 * the subtransaction
+				 * On abort, update top-level tabstat counts, then forget the
+				 * subtransaction
 				 */
 
 				/* count attempted actions regardless of commit/abort */
@@ -2353,8 +2354,8 @@ pgstat_beshutdown_hook(int code, Datum arg)
 	volatile PgBackendStatus *beentry = MyBEEntry;
 
 	/*
-	 * If we got as far as discovering our own database ID, we can report
-	 * what we did to the collector.  Otherwise, we'd be sending an invalid
+	 * If we got as far as discovering our own database ID, we can report what
+	 * we did to the collector.  Otherwise, we'd be sending an invalid
 	 * database ID, so forget it.  (This means that accesses to pg_database
 	 * during failed backend starts might never get counted.)
 	 */
@@ -2977,14 +2978,14 @@ PgstatCollectorMain(int argc, char *argv[])
 
 				case PGSTAT_MTYPE_RESETSHAREDCOUNTER:
 					pgstat_recv_resetsharedcounter(
-											 (PgStat_MsgResetsharedcounter *) &msg,
-											 len);
+									   (PgStat_MsgResetsharedcounter *) &msg,
+												   len);
 					break;
 
 				case PGSTAT_MTYPE_RESETSINGLECOUNTER:
 					pgstat_recv_resetsinglecounter(
-											 (PgStat_MsgResetsinglecounter *) &msg,
-											 len);
+									   (PgStat_MsgResetsinglecounter *) &msg,
+												   len);
 					break;
 
 				case PGSTAT_MTYPE_AUTOVAC_START:
@@ -3752,7 +3753,7 @@ pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len)
 		PgStat_TableEntry *tabmsg = &(msg->m_entry[i]);
 
 		tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
-													   (void *) &(tabmsg->t_id),
+													(void *) &(tabmsg->t_id),
 													   HASH_ENTER, &found);
 
 		if (!found)
@@ -3949,7 +3950,7 @@ pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len)
 static void
 pgstat_recv_resetsharedcounter(PgStat_MsgResetsharedcounter *msg, int len)
 {
-    if (msg->m_resettarget==RESET_BGWRITER)
+	if (msg->m_resettarget == RESET_BGWRITER)
 	{
 		/* Reset the global background writer statistics for the cluster. */
 		memset(&globalStats, 0, sizeof(globalStats));
@@ -3982,7 +3983,7 @@ pgstat_recv_resetsinglecounter(PgStat_MsgResetsinglecounter *msg, int len)
 	if (msg->m_resettype == RESET_TABLE)
 		(void) hash_search(dbentry->tables, (void *) &(msg->m_objectid), HASH_REMOVE, NULL);
 	else if (msg->m_resettype == RESET_FUNCTION)
-		(void) hash_search(dbentry->functions, (void *)&(msg->m_objectid), HASH_REMOVE, NULL);
+		(void) hash_search(dbentry->functions, (void *) &(msg->m_objectid), HASH_REMOVE, NULL);
 }
 
 /* ----------
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 6df11b8a7402703ff03788d8950942c3fb7c7508..a672da0d4598c3dce58bd054096fc0df2612ce9f 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -37,7 +37,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.602 2010/01/27 15:27:50 heikki Exp $
+ *	  $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.603 2010/02/26 02:00:56 momjian Exp $
  *
  * NOTES
  *
@@ -232,7 +232,7 @@ static bool RecoveryError = false;		/* T if WAL recovery failed */
  * state and the startup process is launched. The startup process begins by
  * reading the control file and other preliminary initialization steps.
  * In a normal startup, or after crash recovery, the startup process exits
- * with exit code 0 and we switch to PM_RUN state.  However, archive recovery
+ * with exit code 0 and we switch to PM_RUN state.	However, archive recovery
  * is handled specially since it takes much longer and we would like to support
  * hot standby during archive recovery.
  *
@@ -280,7 +280,8 @@ typedef enum
 	PM_WAIT_BACKUP,				/* waiting for online backup mode to end */
 	PM_WAIT_BACKENDS,			/* waiting for live backends to exit */
 	PM_SHUTDOWN,				/* waiting for bgwriter to do shutdown ckpt */
-	PM_SHUTDOWN_2,				/* waiting for archiver and walsenders to finish */
+	PM_SHUTDOWN_2,				/* waiting for archiver and walsenders to
+								 * finish */
 	PM_WAIT_DEAD_END,			/* waiting for dead_end children to exit */
 	PM_NO_CHILDREN				/* all important children have exited */
 } PMState;
@@ -294,8 +295,9 @@ bool		redirection_done = false;	/* stderr redirected for syslogger? */
 
 /* received START_AUTOVAC_LAUNCHER signal */
 static volatile sig_atomic_t start_autovac_launcher = false;
+
 /* the launcher needs to be signalled to communicate some condition */
-static volatile bool		avlauncher_needs_signal = false;
+static volatile bool avlauncher_needs_signal = false;
 
 /*
  * State for assigning random salts and cancel keys.
@@ -352,7 +354,7 @@ static void RandomSalt(char *md5Salt);
 static void signal_child(pid_t pid, int signal);
 static bool SignalSomeChildren(int signal, int targets);
 
-#define SignalChildren(sig)            SignalSomeChildren(sig, BACKEND_TYPE_ALL)
+#define SignalChildren(sig)			   SignalSomeChildren(sig, BACKEND_TYPE_ALL)
 #define SignalAutovacWorkers(sig)  SignalSomeChildren(sig, BACKEND_TYPE_AUTOVAC)
 
 /*
@@ -394,8 +396,8 @@ static pid_t internal_forkexec(int argc, char *argv[], Port *port);
 #ifdef WIN32
 typedef struct
 {
-	SOCKET		origsocket;		/* Original socket value, or PGINVALID_SOCKET if not a
-								 * socket */
+	SOCKET		origsocket;		/* Original socket value, or PGINVALID_SOCKET
+								 * if not a socket */
 	WSAPROTOCOL_INFO wsainfo;
 } InheritableSocket;
 #else
@@ -418,7 +420,7 @@ typedef struct
 #ifndef WIN32
 	unsigned long UsedShmemSegID;
 #else
-	 HANDLE		UsedShmemSegID;
+	HANDLE		UsedShmemSegID;
 #endif
 	void	   *UsedShmemSegAddr;
 	slock_t    *ShmemLock;
@@ -898,12 +900,13 @@ PostmasterMain(int argc, char *argv[])
 		if (err != kDNSServiceErr_NoError)
 			elog(LOG, "DNSServiceRegister() failed: error code %ld",
 				 (long) err);
+
 		/*
-		 * We don't bother to read the mDNS daemon's reply, and we expect
-		 * that it will automatically terminate our registration when the
-		 * socket is closed at postmaster termination.  So there's nothing
-		 * more to be done here.  However, the bonjour_sdref is kept around
-		 * so that forked children can close their copies of the socket.
+		 * We don't bother to read the mDNS daemon's reply, and we expect that
+		 * it will automatically terminate our registration when the socket is
+		 * closed at postmaster termination.  So there's nothing more to be
+		 * done here.  However, the bonjour_sdref is kept around so that
+		 * forked children can close their copies of the socket.
 		 */
 	}
 #endif
@@ -1293,7 +1296,8 @@ pmdaemonize(void)
 
 	/*
 	 * Some systems use setsid() to dissociate from the TTY's process group,
-	 * while on others it depends on stdin/stdout/stderr.  Do both if possible.
+	 * while on others it depends on stdin/stdout/stderr.  Do both if
+	 * possible.
 	 */
 #ifdef HAVE_SETSID
 	if (setsid() < 0)
@@ -1306,17 +1310,20 @@ pmdaemonize(void)
 
 	/*
 	 * Reassociate stdin/stdout/stderr.  fork_process() cleared any pending
-	 * output, so this should be safe.  The only plausible error is EINTR,
+	 * output, so this should be safe.	The only plausible error is EINTR,
 	 * which just means we should retry.
 	 */
-	do {
+	do
+	{
 		res = dup2(dvnull, 0);
 	} while (res < 0 && errno == EINTR);
 	close(dvnull);
-	do {
+	do
+	{
 		res = dup2(pmlog, 1);
 	} while (res < 0 && errno == EINTR);
-	do {
+	do
+	{
 		res = dup2(pmlog, 2);
 	} while (res < 0 && errno == EINTR);
 	close(pmlog);
@@ -1654,8 +1661,8 @@ retry1:
 	/*
 	 * Now fetch parameters out of startup packet and save them into the Port
 	 * structure.  All data structures attached to the Port struct must be
-	 * allocated in TopMemoryContext so that they will remain available in
-	 * a running backend (even after PostmasterContext is destroyed).  We need
+	 * allocated in TopMemoryContext so that they will remain available in a
+	 * running backend (even after PostmasterContext is destroyed).  We need
 	 * not worry about leaking this storage on failure, since we aren't in the
 	 * postmaster process anymore.
 	 */
@@ -1693,7 +1700,7 @@ retry1:
 				port->cmdline_options = pstrdup(valptr);
 			else if (strcmp(nameptr, "replication") == 0)
 			{
-				if(!parse_bool(valptr, &am_walsender))
+				if (!parse_bool(valptr, &am_walsender))
 					ereport(FATAL,
 							(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
 							 errmsg("invalid value for boolean option \"replication\"")));
@@ -1897,7 +1904,8 @@ static enum CAC_state
 canAcceptConnections(void)
 {
 	/*
-	 * Can't start backends when in startup/shutdown/inconsistent recovery state.
+	 * Can't start backends when in startup/shutdown/inconsistent recovery
+	 * state.
 	 *
 	 * In state PM_WAIT_BACKUP only superusers can connect (this must be
 	 * allowed so that a superuser can end online backup mode); we return
@@ -1914,8 +1922,8 @@ canAcceptConnections(void)
 			 pmState == PM_RECOVERY))
 			return CAC_STARTUP; /* normal startup */
 		if (!FatalError &&
-			 pmState == PM_RECOVERY_CONSISTENT)
-			return CAC_OK; /* connection OK during recovery */
+			pmState == PM_RECOVERY_CONSISTENT)
+			return CAC_OK;		/* connection OK during recovery */
 		return CAC_RECOVERY;	/* else must be crash recovery */
 	}
 
@@ -2169,9 +2177,9 @@ pmdie(SIGNAL_ARGS)
 			}
 
 			/*
-			 * Now wait for online backup mode to end and backends to exit.
-			 * If that is already the case, PostmasterStateMachine will take
-			 * the next step.
+			 * Now wait for online backup mode to end and backends to exit. If
+			 * that is already the case, PostmasterStateMachine will take the
+			 * next step.
 			 */
 			PostmasterStateMachine();
 			break;
@@ -2208,7 +2216,7 @@ pmdie(SIGNAL_ARGS)
 						(errmsg("aborting any active transactions")));
 				/* shut down all backends and autovac workers */
 				SignalSomeChildren(SIGTERM,
-								   BACKEND_TYPE_NORMAL | BACKEND_TYPE_AUTOVAC);
+								 BACKEND_TYPE_NORMAL | BACKEND_TYPE_AUTOVAC);
 				/* and the autovac launcher too */
 				if (AutoVacPID != 0)
 					signal_child(AutoVacPID, SIGTERM);
@@ -2478,10 +2486,9 @@ reaper(SIGNAL_ARGS)
 		/*
 		 * Was it the archiver?  If so, just try to start a new one; no need
 		 * to force reset of the rest of the system.  (If fail, we'll try
-		 * again in future cycles of the main loop.).  Unless we were
-		 * waiting for it to shut down; don't restart it in that case, and
-		 * and PostmasterStateMachine() will advance to the next shutdown
-		 * step.
+		 * again in future cycles of the main loop.).  Unless we were waiting
+		 * for it to shut down; don't restart it in that case, and and
+		 * PostmasterStateMachine() will advance to the next shutdown step.
 		 */
 		if (pid == PgArchPID)
 		{
@@ -2919,8 +2926,8 @@ PostmasterStateMachine(void)
 		/*
 		 * PM_SHUTDOWN_2 state ends when there's no other children than
 		 * dead_end children left. There shouldn't be any regular backends
-		 * left by now anyway; what we're really waiting for is walsenders
-		 * and archiver.
+		 * left by now anyway; what we're really waiting for is walsenders and
+		 * archiver.
 		 *
 		 * Walreceiver should normally be dead by now, but not when a fast
 		 * shutdown is performed during recovery.
@@ -3262,9 +3269,9 @@ BackendInitialize(Port *port)
 	/*
 	 * PreAuthDelay is a debugging aid for investigating problems in the
 	 * authentication cycle: it can be set in postgresql.conf to allow time to
-	 * attach to the newly-forked backend with a debugger.  (See also
-	 * PostAuthDelay, which we allow clients to pass through PGOPTIONS, but
-	 * it is not honored until after authentication.)
+	 * attach to the newly-forked backend with a debugger.	(See also
+	 * PostAuthDelay, which we allow clients to pass through PGOPTIONS, but it
+	 * is not honored until after authentication.)
 	 */
 	if (PreAuthDelay > 0)
 		pg_usleep(PreAuthDelay * 1000000L);
@@ -3298,8 +3305,8 @@ BackendInitialize(Port *port)
 #endif
 
 	/*
-	 * We arrange for a simple exit(1) if we receive SIGTERM or SIGQUIT
-	 * or timeout while trying to collect the startup packet.  Otherwise the
+	 * We arrange for a simple exit(1) if we receive SIGTERM or SIGQUIT or
+	 * timeout while trying to collect the startup packet.	Otherwise the
 	 * postmaster cannot shutdown the database FAST or IMMED cleanly if a
 	 * buggy client fails to send the packet promptly.
 	 */
@@ -3360,7 +3367,7 @@ BackendInitialize(Port *port)
 	status = ProcessStartupPacket(port, false);
 
 	/*
-	 * Stop here if it was bad or a cancel packet.  ProcessStartupPacket
+	 * Stop here if it was bad or a cancel packet.	ProcessStartupPacket
 	 * already did any appropriate error reporting.
 	 */
 	if (status != STATUS_OK)
@@ -3372,7 +3379,7 @@ BackendInitialize(Port *port)
 	 *
 	 * For a walsender, the ps display is set in the following form:
 	 *
-	 *     postgres: wal sender process <user> <host> <activity>
+	 * postgres: wal sender process <user> <host> <activity>
 	 *
 	 * To achieve that, we pass "wal sender process" as username and username
 	 * as dbname to init_ps_display(). XXX: should add a new variant of
@@ -3728,8 +3735,8 @@ internal_forkexec(int argc, char *argv[], Port *port)
 			 (int) GetLastError());
 
 	/*
-	 * Reserve the memory region used by our main shared memory segment before we
-	 * resume the child process.
+	 * Reserve the memory region used by our main shared memory segment before
+	 * we resume the child process.
 	 */
 	if (!pgwin32_ReserveSharedMemoryRegion(pi.hProcess))
 	{
@@ -3743,7 +3750,8 @@ internal_forkexec(int argc, char *argv[], Port *port)
 									 (int) GetLastError())));
 		CloseHandle(pi.hProcess);
 		CloseHandle(pi.hThread);
-		return -1;			/* logging done made by pgwin32_ReserveSharedMemoryRegion() */
+		return -1;				/* logging done made by
+								 * pgwin32_ReserveSharedMemoryRegion() */
 	}
 
 	/*
@@ -4113,7 +4121,7 @@ sigusr1_handler(SIGNAL_ARGS)
 		PgStatPID = pgstat_start();
 
 		ereport(LOG,
-				 (errmsg("database system is ready to accept read only connections")));
+		(errmsg("database system is ready to accept read only connections")));
 
 		pmState = PM_RECOVERY_CONSISTENT;
 	}
@@ -4588,8 +4596,8 @@ save_backend_variables(BackendParameters *param, Port *port,
 #ifdef WIN32
 	param->PostmasterHandle = PostmasterHandle;
 	if (!write_duplicated_handle(&param->initial_signal_pipe,
-							pgwin32_create_signal_listener(childPid),
-							childProcess))
+								 pgwin32_create_signal_listener(childPid),
+								 childProcess))
 		return false;
 #endif
 
diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c
index 62caf54e85b96eabf7a62d8860abb445c9ceac5f..cb2c687aa0954de9ab44a2438262ec73fc26c506 100644
--- a/src/backend/regex/regcomp.c
+++ b/src/backend/regex/regcomp.c
@@ -28,7 +28,7 @@
  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
- * $PostgreSQL: pgsql/src/backend/regex/regcomp.c,v 1.47 2010/01/30 04:18:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regcomp.c,v 1.48 2010/02/26 02:00:57 momjian Exp $
  *
  */
 
@@ -1028,8 +1028,8 @@ parseqatom(struct vars * v,
 	/*----------
 	 * Prepare a general-purpose state skeleton.
 	 *
-	 *    ---> [s] ---prefix---> [begin] ---atom---> [end] ----rest---> [rp]
-	 *   /                                            /
+	 *	  ---> [s] ---prefix---> [begin] ---atom---> [end] ----rest---> [rp]
+	 *	 /											  /
 	 * [lp] ----> [s2] ----bypass---------------------
 	 *
 	 * where bypass is an empty, and prefix is some repetitions of atom
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index 875dbafa110e9f7aa55f5d9eebe245e9962dee37..49cf7b597f977d1e059962b2d79c8f2ab887c67d 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -10,7 +10,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c,v 1.4 2010/02/25 07:31:40 heikki Exp $
+ *	  $PostgreSQL: pgsql/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c,v 1.5 2010/02/26 02:00:58 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -49,7 +49,7 @@ static char *recvBuf = NULL;
 /* Prototypes for interface functions */
 static bool libpqrcv_connect(char *conninfo, XLogRecPtr startpoint);
 static bool libpqrcv_receive(int timeout, unsigned char *type,
-							 char **buffer, int *len);
+				 char **buffer, int *len);
 static void libpqrcv_disconnect(void);
 
 /* Prototypes for private functions */
@@ -94,22 +94,23 @@ libpqrcv_connect(char *conninfo, XLogRecPtr startpoint)
 						PQerrorMessage(streamConn))));
 
 	/*
-	 * Get the system identifier and timeline ID as a DataRow message
-	 * from the primary server.
+	 * Get the system identifier and timeline ID as a DataRow message from the
+	 * primary server.
 	 */
 	res = PQexec(streamConn, "IDENTIFY_SYSTEM");
 	if (PQresultStatus(res) != PGRES_TUPLES_OK)
-    {
+	{
 		PQclear(res);
 		ereport(ERROR,
 				(errmsg("could not receive the SYSID and timeline ID from "
 						"the primary server: %s",
 						PQerrorMessage(streamConn))));
-    }
+	}
 	if (PQnfields(res) != 2 || PQntuples(res) != 1)
 	{
-		int ntuples = PQntuples(res);
-		int nfields = PQnfields(res);
+		int			ntuples = PQntuples(res);
+		int			nfields = PQnfields(res);
+
 		PQclear(res);
 		ereport(ERROR,
 				(errmsg("invalid response from primary server"),
@@ -120,8 +121,7 @@ libpqrcv_connect(char *conninfo, XLogRecPtr startpoint)
 	primary_tli = pg_atoi(PQgetvalue(res, 0, 1), 4, 0);
 
 	/*
-	 * Confirm that the system identifier of the primary is the same
-	 * as ours.
+	 * Confirm that the system identifier of the primary is the same as ours.
 	 */
 	snprintf(standby_sysid, sizeof(standby_sysid), UINT64_FORMAT,
 			 GetSystemIdentifier());
@@ -135,8 +135,8 @@ libpqrcv_connect(char *conninfo, XLogRecPtr startpoint)
 	}
 
 	/*
-	 * Confirm that the current timeline of the primary is the same
-	 * as the recovery target timeline.
+	 * Confirm that the current timeline of the primary is the same as the
+	 * recovery target timeline.
 	 */
 	standby_tli = GetRecoveryTargetTLI();
 	PQclear(res);
@@ -172,7 +172,7 @@ libpqrcv_connect(char *conninfo, XLogRecPtr startpoint)
 static bool
 libpq_select(int timeout_ms)
 {
-	int	ret;
+	int			ret;
 
 	Assert(streamConn != NULL);
 	if (PQsocket(streamConn) < 0)
@@ -197,15 +197,15 @@ libpq_select(int timeout_ms)
 		struct timeval *ptr_timeout;
 
 		FD_ZERO(&input_mask);
-		FD_SET(PQsocket(streamConn), &input_mask);
+		FD_SET		(PQsocket(streamConn), &input_mask);
 
 		if (timeout_ms < 0)
 			ptr_timeout = NULL;
 		else
 		{
-			timeout.tv_sec	= timeout_ms / 1000;
-			timeout.tv_usec	= (timeout_ms % 1000) * 1000;
-			ptr_timeout		= &timeout;
+			timeout.tv_sec = timeout_ms / 1000;
+			timeout.tv_usec = (timeout_ms % 1000) * 1000;
+			ptr_timeout = &timeout;
 		}
 
 		ret = select(PQsocket(streamConn) + 1, &input_mask,
@@ -239,12 +239,12 @@ libpqrcv_disconnect(void)
  *
  * Returns:
  *
- *   True if data was received. *type, *buffer and *len are set to
- *   the type of the received data, buffer holding it, and length,
- *   respectively.
+ *	 True if data was received. *type, *buffer and *len are set to
+ *	 the type of the received data, buffer holding it, and length,
+ *	 respectively.
  *
- *   False if no data was available within timeout, or wait was interrupted
- *   by signal.
+ *	 False if no data was available within timeout, or wait was interrupted
+ *	 by signal.
  *
  * The buffer returned is only valid until the next call of this function or
  * libpq_connect/disconnect.
@@ -261,10 +261,10 @@ libpqrcv_receive(int timeout, unsigned char *type, char **buffer, int *len)
 	recvBuf = NULL;
 
 	/*
-	 * If the caller requested to block, wait for data to arrive. But if
-	 * this is the first call after connecting, don't wait, because
-	 * there might already be some data in libpq buffer that we haven't
-	 * returned to caller.
+	 * If the caller requested to block, wait for data to arrive. But if this
+	 * is the first call after connecting, don't wait, because there might
+	 * already be some data in libpq buffer that we haven't returned to
+	 * caller.
 	 */
 	if (timeout > 0 && !justconnected)
 	{
@@ -280,11 +280,11 @@ libpqrcv_receive(int timeout, unsigned char *type, char **buffer, int *len)
 
 	/* Receive CopyData message */
 	rawlen = PQgetCopyData(streamConn, &recvBuf, 1);
-	if (rawlen == 0)	/* no data available yet, then return */
+	if (rawlen == 0)			/* no data available yet, then return */
 		return false;
-	if (rawlen == -1)	/* end-of-streaming or error */
+	if (rawlen == -1)			/* end-of-streaming or error */
 	{
-		PGresult	*res;
+		PGresult   *res;
 
 		res = PQgetResult(streamConn);
 		if (PQresultStatus(res) == PGRES_COMMAND_OK)
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index 3f82693dcea0ff8997ff5ec5c1c28b3667313a70..9f86b0645d0e246f5abdb0fdd71b1e8519b6cd05 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -29,7 +29,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/replication/walreceiver.c,v 1.5 2010/02/19 10:51:04 heikki Exp $
+ *	  $PostgreSQL: pgsql/src/backend/replication/walreceiver.c,v 1.6 2010/02/26 02:00:57 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -51,7 +51,7 @@
 #include "utils/resowner.h"
 
 /* Global variable to indicate if this process is a walreceiver process */
-bool am_walreceiver;
+bool		am_walreceiver;
 
 /* libpqreceiver hooks to these when loaded */
 walrcv_connect_type walrcv_connect = NULL;
@@ -102,9 +102,9 @@ static void
 ProcessWalRcvInterrupts(void)
 {
 	/*
-	 * Although walreceiver interrupt handling doesn't use the same scheme
-	 * as regular backends, call CHECK_FOR_INTERRUPTS() to make sure we
-	 * receive any incoming signals on Win32.
+	 * Although walreceiver interrupt handling doesn't use the same scheme as
+	 * regular backends, call CHECK_FOR_INTERRUPTS() to make sure we receive
+	 * any incoming signals on Win32.
 	 */
 	CHECK_FOR_INTERRUPTS();
 
@@ -148,37 +148,38 @@ static void XLogWalRcvFlush(void);
  */
 static struct
 {
-	XLogRecPtr	Write;	/* last byte + 1 written out in the standby */
-	XLogRecPtr	Flush;	/* last byte + 1 flushed in the standby */
-} LogstreamResult;
+	XLogRecPtr	Write;			/* last byte + 1 written out in the standby */
+	XLogRecPtr	Flush;			/* last byte + 1 flushed in the standby */
+}	LogstreamResult;
 
 /* Main entry point for walreceiver process */
 void
 WalReceiverMain(void)
 {
-	char conninfo[MAXCONNINFO];
-	XLogRecPtr startpoint;
+	char		conninfo[MAXCONNINFO];
+	XLogRecPtr	startpoint;
+
 	/* use volatile pointer to prevent code rearrangement */
 	volatile WalRcvData *walrcv = WalRcv;
 
 	am_walreceiver = true;
 
 	/*
-	 * WalRcv should be set up already (if we are a backend, we inherit
-	 * this by fork() or EXEC_BACKEND mechanism from the postmaster).
+	 * WalRcv should be set up already (if we are a backend, we inherit this
+	 * by fork() or EXEC_BACKEND mechanism from the postmaster).
 	 */
 	Assert(walrcv != NULL);
 
 	/*
 	 * Mark walreceiver as running in shared memory.
 	 *
-	 * Do this as early as possible, so that if we fail later on, we'll
-	 * set state to STOPPED. If we die before this, the startup process
-	 * will keep waiting for us to start up, until it times out.
+	 * Do this as early as possible, so that if we fail later on, we'll set
+	 * state to STOPPED. If we die before this, the startup process will keep
+	 * waiting for us to start up, until it times out.
 	 */
 	SpinLockAcquire(&walrcv->mutex);
 	Assert(walrcv->pid == 0);
-	switch(walrcv->walRcvState)
+	switch (walrcv->walRcvState)
 	{
 		case WALRCV_STOPPING:
 			/* If we've already been requested to stop, don't start up. */
@@ -222,7 +223,8 @@ WalReceiverMain(void)
 #endif
 
 	/* Properly accept or ignore signals the postmaster might send us */
-	pqsignal(SIGHUP, WalRcvSigHupHandler);		/* set flag to read config file */
+	pqsignal(SIGHUP, WalRcvSigHupHandler);		/* set flag to read config
+												 * file */
 	pqsignal(SIGINT, SIG_IGN);
 	pqsignal(SIGTERM, WalRcvShutdownHandler);	/* request shutdown */
 	pqsignal(SIGQUIT, WalRcvQuickDieHandler);	/* hard crash time */
@@ -264,9 +266,9 @@ WalReceiverMain(void)
 	/* Loop until end-of-streaming or error */
 	for (;;)
 	{
-		unsigned char	type;
-		char   *buf;
-		int		len;
+		unsigned char type;
+		char	   *buf;
+		int			len;
 
 		/*
 		 * Emergency bailout if postmaster has died.  This is to avoid the
@@ -299,12 +301,12 @@ WalReceiverMain(void)
 			XLogWalRcvProcessMsg(type, buf, len);
 
 			/* Receive any more data we can without sleeping */
-			while(walrcv_receive(0, &type, &buf, &len))
+			while (walrcv_receive(0, &type, &buf, &len))
 				XLogWalRcvProcessMsg(type, buf, len);
 
 			/*
-			 * If we've written some records, flush them to disk and
-			 * let the startup process know about them.
+			 * If we've written some records, flush them to disk and let the
+			 * startup process know about them.
 			 */
 			XLogWalRcvFlush();
 		}
@@ -375,8 +377,8 @@ WalRcvQuickDieHandler(SIGNAL_ARGS)
 	 * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
 	 * backend.  This is necessary precisely because we don't clean up our
 	 * shared memory state.  (The "dead man switch" mechanism in pmsignal.c
-	 * should ensure the postmaster sees this as a crash, too, but no harm
-	 * in being doubly sure.)
+	 * should ensure the postmaster sees this as a crash, too, but no harm in
+	 * being doubly sure.)
 	 */
 	exit(2);
 }
@@ -389,20 +391,20 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
 {
 	switch (type)
 	{
-		case 'w':	/* WAL records */
-		{
-			XLogRecPtr	recptr;
+		case 'w':				/* WAL records */
+			{
+				XLogRecPtr	recptr;
 
-			if (len < sizeof(XLogRecPtr))
-				ereport(ERROR,
-						(errmsg("invalid WAL message received from primary")));
+				if (len < sizeof(XLogRecPtr))
+					ereport(ERROR,
+					  (errmsg("invalid WAL message received from primary")));
 
-			recptr = *((XLogRecPtr *) buf);
-			buf += sizeof(XLogRecPtr);
-			len -= sizeof(XLogRecPtr);
-			XLogWalRcvWrite(buf, len, recptr);
-			break;
-		}
+				recptr = *((XLogRecPtr *) buf);
+				buf += sizeof(XLogRecPtr);
+				len -= sizeof(XLogRecPtr);
+				XLogWalRcvWrite(buf, len, recptr);
+				break;
+			}
 		default:
 			ereport(ERROR,
 					(errcode(ERRCODE_PROTOCOL_VIOLATION),
@@ -417,20 +419,20 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
 static void
 XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
 {
-	int		startoff;
-	int		byteswritten;
+	int			startoff;
+	int			byteswritten;
 
 	while (nbytes > 0)
 	{
-		int		segbytes;
+		int			segbytes;
 
 		if (recvFile < 0 || !XLByteInSeg(recptr, recvId, recvSeg))
 		{
-			bool	use_existent;
+			bool		use_existent;
 
 			/*
-			 * fsync() and close current file before we switch to next one.
-			 * We would otherwise have to reopen this file to fsync it later
+			 * fsync() and close current file before we switch to next one. We
+			 * would otherwise have to reopen this file to fsync it later
 			 */
 			if (recvFile >= 0)
 			{
@@ -444,8 +446,8 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
 				if (close(recvFile) != 0)
 					ereport(PANIC,
 							(errcode_for_file_access(),
-							 errmsg("could not close log file %u, segment %u: %m",
-									recvId, recvSeg)));
+						errmsg("could not close log file %u, segment %u: %m",
+							   recvId, recvSeg)));
 			}
 			recvFile = -1;
 
@@ -500,14 +502,13 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
 		nbytes -= byteswritten;
 		buf += byteswritten;
 
-		LogstreamResult.Write	= recptr;
+		LogstreamResult.Write = recptr;
 
 		/*
-		 * XXX: Should we signal bgwriter to start a restartpoint
-		 * if we've consumed too much xlog since the last one, like
-		 * in normal processing? But this is not worth doing unless
-		 * a restartpoint can be created independently from a
-		 * checkpoint record.
+		 * XXX: Should we signal bgwriter to start a restartpoint if we've
+		 * consumed too much xlog since the last one, like in normal
+		 * processing? But this is not worth doing unless a restartpoint can
+		 * be created independently from a checkpoint record.
 		 */
 	}
 }
@@ -520,7 +521,7 @@ XLogWalRcvFlush(void)
 	{
 		/* use volatile pointer to prevent code rearrangement */
 		volatile WalRcvData *walrcv = WalRcv;
-		char	activitymsg[50];
+		char		activitymsg[50];
 
 		issue_xlog_fsync(recvFile, recvId, recvSeg);
 
diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c
index 4fb132dcd4e5b8ca5da8eca0e7419b978496d81a..be305790fd3ad8867b233ce8297adae21fa7a82d 100644
--- a/src/backend/replication/walreceiverfuncs.c
+++ b/src/backend/replication/walreceiverfuncs.c
@@ -10,7 +10,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/replication/walreceiverfuncs.c,v 1.3 2010/01/27 15:27:51 heikki Exp $
+ *	  $PostgreSQL: pgsql/src/backend/replication/walreceiverfuncs.c,v 1.4 2010/02/26 02:00:57 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -42,7 +42,7 @@ WalRcvData *WalRcv = NULL;
 Size
 WalRcvShmemSize(void)
 {
-	Size size = 0;
+	Size		size = 0;
 
 	size = add_size(size, sizeof(WalRcvData));
 
@@ -53,7 +53,7 @@ WalRcvShmemSize(void)
 void
 WalRcvShmemInit(void)
 {
-	bool	found;
+	bool		found;
 
 	WalRcv = (WalRcvData *)
 		ShmemInitStruct("Wal Receiver Ctl", WalRcvShmemSize(), &found);
@@ -78,7 +78,7 @@ WalRcvInProgress(void)
 	/* use volatile pointer to prevent code rearrangement */
 	volatile WalRcvData *walrcv = WalRcv;
 	WalRcvState state;
-	pg_time_t startTime;
+	pg_time_t	startTime;
 
 	SpinLockAcquire(&walrcv->mutex);
 
@@ -88,14 +88,14 @@ WalRcvInProgress(void)
 	SpinLockRelease(&walrcv->mutex);
 
 	/*
-	 * If it has taken too long for walreceiver to start up, give up.
-	 * Setting the state to STOPPED ensures that if walreceiver later
-	 * does start up after all, it will see that it's not supposed to be
-	 * running and die without doing anything.
+	 * If it has taken too long for walreceiver to start up, give up. Setting
+	 * the state to STOPPED ensures that if walreceiver later does start up
+	 * after all, it will see that it's not supposed to be running and die
+	 * without doing anything.
 	 */
 	if (state == WALRCV_STARTING)
 	{
-		pg_time_t now = (pg_time_t) time(NULL);
+		pg_time_t	now = (pg_time_t) time(NULL);
 
 		if ((now - startTime) > WALRCV_STARTUP_TIMEOUT)
 		{
@@ -122,7 +122,7 @@ ShutdownWalRcv(void)
 {
 	/* use volatile pointer to prevent code rearrangement */
 	volatile WalRcvData *walrcv = WalRcv;
-	pid_t walrcvpid = 0;
+	pid_t		walrcvpid = 0;
 
 	/*
 	 * Request walreceiver to stop. Walreceiver will switch to WALRCV_STOPPED
@@ -130,7 +130,7 @@ ShutdownWalRcv(void)
 	 * restart itself.
 	 */
 	SpinLockAcquire(&walrcv->mutex);
-	switch(walrcv->walRcvState)
+	switch (walrcv->walRcvState)
 	{
 		case WALRCV_STOPPED:
 			break;
@@ -180,14 +180,13 @@ RequestXLogStreaming(XLogRecPtr recptr, const char *conninfo)
 {
 	/* use volatile pointer to prevent code rearrangement */
 	volatile WalRcvData *walrcv = WalRcv;
-	pg_time_t now = (pg_time_t) time(NULL);
+	pg_time_t	now = (pg_time_t) time(NULL);
 
 	/*
-	 * We always start at the beginning of the segment.
-	 * That prevents a broken segment (i.e., with no records in the
-	 * first half of a segment) from being created by XLOG streaming,
-	 * which might cause trouble later on if the segment is e.g
-	 * archived.
+	 * We always start at the beginning of the segment. That prevents a broken
+	 * segment (i.e., with no records in the first half of a segment) from
+	 * being created by XLOG streaming, which might cause trouble later on if
+	 * the segment is e.g archived.
 	 */
 	if (recptr.xrecoff % XLogSegSize != 0)
 		recptr.xrecoff -= recptr.xrecoff % XLogSegSize;
@@ -225,4 +224,3 @@ GetWalRcvWriteRecPtr(void)
 
 	return recptr;
 }
-
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index a8706ab06f6cbd5e93492f3a0e3d07df55869aef..2a2765645e4803a91e73a6db2a28cd0ac6af9419 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -30,7 +30,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/replication/walsender.c,v 1.8 2010/02/25 07:31:40 heikki Exp $
+ *	  $PostgreSQL: pgsql/src/backend/replication/walsender.c,v 1.9 2010/02/26 02:00:58 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -61,11 +61,11 @@ WalSndCtlData *WalSndCtl = NULL;
 static WalSnd *MyWalSnd = NULL;
 
 /* Global state */
-bool	am_walsender	= false;	/* Am I a walsender process ? */
+bool		am_walsender = false;		/* Am I a walsender process ? */
 
 /* User-settable parameters for walsender */
-int	MaxWalSenders = 0;		/* the maximum number of concurrent walsenders */
-int	WalSndDelay	= 200;		/* max sleep time between some actions */
+int			MaxWalSenders = 0;	/* the maximum number of concurrent walsenders */
+int			WalSndDelay = 200;	/* max sleep time between some actions */
 
 #define NAPTIME_PER_CYCLE 100	/* max sleep time between cycles (100ms) */
 
@@ -96,9 +96,9 @@ static void WalSndQuickDieHandler(SIGNAL_ARGS);
 
 /* Prototypes for private functions */
 static int	WalSndLoop(void);
-static void	InitWalSnd(void);
-static void	WalSndHandshake(void);
-static void	WalSndKill(int code, Datum arg);
+static void InitWalSnd(void);
+static void WalSndHandshake(void);
+static void WalSndKill(int code, Datum arg);
 static void XLogRead(char *buf, XLogRecPtr recptr, Size nbytes);
 static bool XLogSend(StringInfo outMsg);
 static void CheckClosedConnection(void);
@@ -155,13 +155,13 @@ static void
 WalSndHandshake(void)
 {
 	StringInfoData input_message;
-	bool replication_started = false;
+	bool		replication_started = false;
 
 	initStringInfo(&input_message);
 
 	while (!replication_started)
 	{
-		int firstchar;
+		int			firstchar;
 
 		/* Wait for a command to arrive */
 		firstchar = pq_getbyte();
@@ -183,99 +183,99 @@ WalSndHandshake(void)
 			 * blocking because we've been able to get message type code.
 			 */
 			if (pq_getmessage(&input_message, 0))
-				firstchar = EOF;		/* suitable message already logged */
+				firstchar = EOF;	/* suitable message already logged */
 		}
 
 		/* Handle the very limited subset of commands expected in this phase */
 		switch (firstchar)
 		{
-			case 'Q':	/* Query message */
-			{
-				const char *query_string;
-				XLogRecPtr	recptr;
-
-				query_string = pq_getmsgstring(&input_message);
-				pq_getmsgend(&input_message);
-
-				if (strcmp(query_string, "IDENTIFY_SYSTEM") == 0)
-				{
-					StringInfoData	buf;
-					char	sysid[32];
-					char	tli[11];
-
-					/*
-					 * Reply with a result set with one row, two columns.
-					 * First col is system ID, and second if timeline ID
-					 */
-
-					snprintf(sysid, sizeof(sysid), UINT64_FORMAT,
-							 GetSystemIdentifier());
-					snprintf(tli, sizeof(tli), "%u", ThisTimeLineID);
-
-					/* Send a RowDescription message */
-					pq_beginmessage(&buf, 'T');
-					pq_sendint(&buf, 2, 2); /* 2 fields */
-
-					/* first field */
-					pq_sendstring(&buf, "systemid"); /* col name */
-					pq_sendint(&buf, 0, 4);			/* table oid */
-					pq_sendint(&buf, 0, 2);			/* attnum */
-					pq_sendint(&buf, TEXTOID, 4);	/* type oid */
-					pq_sendint(&buf, -1, 2);		/* typlen */
-					pq_sendint(&buf, 0, 4);			/* typmod */
-					pq_sendint(&buf, 0, 2);			/* format code */
-
-					/* second field */
-					pq_sendstring(&buf, "timeline"); /* col name */
-					pq_sendint(&buf, 0, 4);			/* table oid */
-					pq_sendint(&buf, 0, 2);			/* attnum */
-					pq_sendint(&buf, INT4OID, 4);	/* type oid */
-					pq_sendint(&buf, 4, 2);			/* typlen */
-					pq_sendint(&buf, 0, 4);			/* typmod */
-					pq_sendint(&buf, 0, 2);			/* format code */
-					pq_endmessage(&buf);
-
-					/* Send a DataRow message */
-					pq_beginmessage(&buf, 'D');
-					pq_sendint(&buf, 2, 2);			/* # of columns */
-					pq_sendint(&buf, strlen(sysid), 4); /* col1 len */
-					pq_sendbytes(&buf, (char *) &sysid, strlen(sysid));
-					pq_sendint(&buf, strlen(tli), 4); /* col2 len */
-					pq_sendbytes(&buf, (char *) tli, strlen(tli));
-					pq_endmessage(&buf);
-
-					/* Send CommandComplete and ReadyForQuery messages */
-					EndCommand("SELECT", DestRemote);
-					ReadyForQuery(DestRemote);
-				}
-				else if (sscanf(query_string, "START_REPLICATION %X/%X",
-								&recptr.xlogid, &recptr.xrecoff) == 2)
+			case 'Q':			/* Query message */
 				{
-					StringInfoData	buf;
-
-					/* Send a CopyOutResponse message, and start streaming */
-					pq_beginmessage(&buf, 'H');
-					pq_sendbyte(&buf, 0);
-					pq_sendint(&buf, 0, 2);
-					pq_endmessage(&buf);
-
-					/*
-					 * Initialize position to the received one, then
-					 * the xlog records begin to be shipped from that position
-					 */
-					sentPtr	= recptr;
-
-					/* break out of the loop */
-					replication_started = true;
+					const char *query_string;
+					XLogRecPtr	recptr;
+
+					query_string = pq_getmsgstring(&input_message);
+					pq_getmsgend(&input_message);
+
+					if (strcmp(query_string, "IDENTIFY_SYSTEM") == 0)
+					{
+						StringInfoData buf;
+						char		sysid[32];
+						char		tli[11];
+
+						/*
+						 * Reply with a result set with one row, two columns.
+						 * First col is system ID, and second if timeline ID
+						 */
+
+						snprintf(sysid, sizeof(sysid), UINT64_FORMAT,
+								 GetSystemIdentifier());
+						snprintf(tli, sizeof(tli), "%u", ThisTimeLineID);
+
+						/* Send a RowDescription message */
+						pq_beginmessage(&buf, 'T');
+						pq_sendint(&buf, 2, 2); /* 2 fields */
+
+						/* first field */
+						pq_sendstring(&buf, "systemid");		/* col name */
+						pq_sendint(&buf, 0, 4); /* table oid */
+						pq_sendint(&buf, 0, 2); /* attnum */
+						pq_sendint(&buf, TEXTOID, 4);	/* type oid */
+						pq_sendint(&buf, -1, 2);		/* typlen */
+						pq_sendint(&buf, 0, 4); /* typmod */
+						pq_sendint(&buf, 0, 2); /* format code */
+
+						/* second field */
+						pq_sendstring(&buf, "timeline");		/* col name */
+						pq_sendint(&buf, 0, 4); /* table oid */
+						pq_sendint(&buf, 0, 2); /* attnum */
+						pq_sendint(&buf, INT4OID, 4);	/* type oid */
+						pq_sendint(&buf, 4, 2); /* typlen */
+						pq_sendint(&buf, 0, 4); /* typmod */
+						pq_sendint(&buf, 0, 2); /* format code */
+						pq_endmessage(&buf);
+
+						/* Send a DataRow message */
+						pq_beginmessage(&buf, 'D');
+						pq_sendint(&buf, 2, 2); /* # of columns */
+						pq_sendint(&buf, strlen(sysid), 4);		/* col1 len */
+						pq_sendbytes(&buf, (char *) &sysid, strlen(sysid));
+						pq_sendint(&buf, strlen(tli), 4);		/* col2 len */
+						pq_sendbytes(&buf, (char *) tli, strlen(tli));
+						pq_endmessage(&buf);
+
+						/* Send CommandComplete and ReadyForQuery messages */
+						EndCommand("SELECT", DestRemote);
+						ReadyForQuery(DestRemote);
+					}
+					else if (sscanf(query_string, "START_REPLICATION %X/%X",
+									&recptr.xlogid, &recptr.xrecoff) == 2)
+					{
+						StringInfoData buf;
+
+						/* Send a CopyOutResponse message, and start streaming */
+						pq_beginmessage(&buf, 'H');
+						pq_sendbyte(&buf, 0);
+						pq_sendint(&buf, 0, 2);
+						pq_endmessage(&buf);
+
+						/*
+						 * Initialize position to the received one, then the
+						 * xlog records begin to be shipped from that position
+						 */
+						sentPtr = recptr;
+
+						/* break out of the loop */
+						replication_started = true;
+					}
+					else
+					{
+						ereport(FATAL,
+								(errcode(ERRCODE_PROTOCOL_VIOLATION),
+								 errmsg("invalid standby query string: %s", query_string)));
+					}
+					break;
 				}
-				else
-				{
-					ereport(FATAL,
-							(errcode(ERRCODE_PROTOCOL_VIOLATION),
-							 errmsg("invalid standby query string: %s", query_string)));
-				}
-				break;
-			}
 
 			case 'X':
 				/* standby is closing the connection */
@@ -303,7 +303,7 @@ static void
 CheckClosedConnection(void)
 {
 	unsigned char firstchar;
-	int r;
+	int			r;
 
 	r = pq_getbyte_if_available(&firstchar);
 	if (r < 0)
@@ -323,9 +323,9 @@ CheckClosedConnection(void)
 	/* Handle the very limited subset of commands expected in this phase */
 	switch (firstchar)
 	{
-		/*
-		 * 'X' means that the standby is closing down the socket.
-		 */
+			/*
+			 * 'X' means that the standby is closing down the socket.
+			 */
 		case 'X':
 			proc_exit(0);
 
@@ -348,7 +348,7 @@ WalSndLoop(void)
 	/* Loop forever */
 	for (;;)
 	{
-		int remain;	/* remaining time (ms) */
+		int			remain;		/* remaining time (ms) */
 
 		/*
 		 * Emergency bailout if postmaster has died.  This is to avoid the
@@ -416,15 +416,16 @@ WalSndLoop(void)
 	return 1;
 
 eof:
+
 	/*
-	 * Reset whereToSendOutput to prevent ereport from attempting
-	 * to send any more messages to the standby.
+	 * Reset whereToSendOutput to prevent ereport from attempting to send any
+	 * more messages to the standby.
 	 */
 	if (whereToSendOutput == DestRemote)
 		whereToSendOutput = DestNone;
 
 	proc_exit(0);
-	return 1;		/* keep the compiler quiet */
+	return 1;					/* keep the compiler quiet */
 }
 
 /* Initialize a per-walsender data structure for this walsender process */
@@ -432,7 +433,7 @@ static void
 InitWalSnd(void)
 {
 	/* use volatile pointer to prevent code rearrangement */
-	int		i;
+	int			i;
 
 	/*
 	 * WalSndCtl should be set up already (we inherit this by fork() or
@@ -497,13 +498,13 @@ WalSndKill(int code, Datum arg)
 void
 XLogRead(char *buf, XLogRecPtr recptr, Size nbytes)
 {
-	char path[MAXPGPATH];
-	uint32 startoff;
+	char		path[MAXPGPATH];
+	uint32		startoff;
 
 	while (nbytes > 0)
 	{
-		int segbytes;
-		int readbytes;
+		int			segbytes;
+		int			readbytes;
 
 		startoff = recptr.xrecoff % XLogSegSize;
 
@@ -518,7 +519,7 @@ XLogRead(char *buf, XLogRecPtr recptr, Size nbytes)
 
 			sendFile = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0);
 			if (sendFile < 0)
-				ereport(FATAL, /* XXX: Why FATAL? */
+				ereport(FATAL,	/* XXX: Why FATAL? */
 						(errcode_for_file_access(),
 						 errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
 								path, sendId, sendSeg)));
@@ -546,9 +547,9 @@ XLogRead(char *buf, XLogRecPtr recptr, Size nbytes)
 		if (readbytes <= 0)
 			ereport(FATAL,
 					(errcode_for_file_access(),
-					 errmsg("could not read from log file %u, segment %u, offset %u, "
-							"length %lu: %m",
-							sendId, sendSeg, sendOff, (unsigned long) segbytes)));
+			errmsg("could not read from log file %u, segment %u, offset %u, "
+				   "length %lu: %m",
+				   sendId, sendSeg, sendOff, (unsigned long) segbytes)));
 
 		/* Update state for read */
 		XLByteAdvance(recptr, readbytes);
@@ -569,7 +570,8 @@ static bool
 XLogSend(StringInfo outMsg)
 {
 	XLogRecPtr	SendRqstPtr;
-	char	activitymsg[50];
+	char		activitymsg[50];
+
 	/* use volatile pointer to prevent code rearrangement */
 	volatile WalSnd *walsnd = MyWalSnd;
 
@@ -581,15 +583,15 @@ XLogSend(StringInfo outMsg)
 		return true;
 
 	/*
-	 * We gather multiple records together by issuing just one XLogRead()
-	 * of a suitable size, and send them as one CopyData message. Repeat
-	 * until we've sent everything we can.
+	 * We gather multiple records together by issuing just one XLogRead() of a
+	 * suitable size, and send them as one CopyData message. Repeat until
+	 * we've sent everything we can.
 	 */
 	while (XLByteLT(sentPtr, SendRqstPtr))
 	{
-		XLogRecPtr startptr;
-		XLogRecPtr endptr;
-		Size	nbytes;
+		XLogRecPtr	startptr;
+		XLogRecPtr	endptr;
+		Size		nbytes;
 
 		/*
 		 * Figure out how much to send in one message. If there's less than
@@ -600,8 +602,8 @@ XLogSend(StringInfo outMsg)
 		 * relies on the fact that we never split a WAL record across two
 		 * messages. Since a long WAL record is split at page boundary into
 		 * continuation records, page boundary is always a safe cut-off point.
-		 * We also assume that SendRqstPtr never points in the middle of a
-		 * WAL record.
+		 * We also assume that SendRqstPtr never points in the middle of a WAL
+		 * record.
 		 */
 		startptr = sentPtr;
 		if (startptr.xrecoff >= XLogFileSize)
@@ -625,10 +627,10 @@ XLogSend(StringInfo outMsg)
 		/*
 		 * OK to read and send the slice.
 		 *
-		 * We don't need to convert the xlogid/xrecoff from host byte order
-		 * to network byte order because the both server can be expected to
-		 * have the same byte order. If they have different byte order, we
-		 * don't reach here.
+		 * We don't need to convert the xlogid/xrecoff from host byte order to
+		 * network byte order because the both server can be expected to have
+		 * the same byte order. If they have different byte order, we don't
+		 * reach here.
 		 */
 		pq_sendbyte(outMsg, 'w');
 		pq_sendbytes(outMsg, (char *) &startptr, sizeof(startptr));
@@ -644,8 +646,8 @@ XLogSend(StringInfo outMsg)
 		sentPtr = endptr;
 
 		/*
-		 * Read the log directly into the output buffer to prevent
-		 * extra memcpy calls.
+		 * Read the log directly into the output buffer to prevent extra
+		 * memcpy calls.
 		 */
 		enlargeStringInfo(outMsg, nbytes);
 
@@ -714,8 +716,8 @@ WalSndQuickDieHandler(SIGNAL_ARGS)
 	 * system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
 	 * backend.  This is necessary precisely because we don't clean up our
 	 * shared memory state.  (The "dead man switch" mechanism in pmsignal.c
-	 * should ensure the postmaster sees this as a crash, too, but no harm
-	 * in being doubly sure.)
+	 * should ensure the postmaster sees this as a crash, too, but no harm in
+	 * being doubly sure.)
 	 */
 	exit(2);
 }
@@ -732,14 +734,16 @@ void
 WalSndSignals(void)
 {
 	/* Set up signal handlers */
-	pqsignal(SIGHUP, WalSndSigHupHandler);	/* set flag to read config file */
+	pqsignal(SIGHUP, WalSndSigHupHandler);		/* set flag to read config
+												 * file */
 	pqsignal(SIGINT, SIG_IGN);	/* not used */
 	pqsignal(SIGTERM, WalSndShutdownHandler);	/* request shutdown */
 	pqsignal(SIGQUIT, WalSndQuickDieHandler);	/* hard crash time */
 	pqsignal(SIGALRM, SIG_IGN);
 	pqsignal(SIGPIPE, SIG_IGN);
-	pqsignal(SIGUSR1, SIG_IGN);	/* not used */
-	pqsignal(SIGUSR2, WalSndLastCycleHandler);	/* request a last cycle and shutdown */
+	pqsignal(SIGUSR1, SIG_IGN); /* not used */
+	pqsignal(SIGUSR2, WalSndLastCycleHandler);	/* request a last cycle and
+												 * shutdown */
 
 	/* Reset some signals that are accepted by postmaster but not here */
 	pqsignal(SIGCHLD, SIG_DFL);
@@ -753,7 +757,7 @@ WalSndSignals(void)
 Size
 WalSndShmemSize(void)
 {
-	Size size = 0;
+	Size		size = 0;
 
 	size = offsetof(WalSndCtlData, walsnds);
 	size = add_size(size, mul_size(MaxWalSenders, sizeof(WalSnd)));
@@ -765,8 +769,8 @@ WalSndShmemSize(void)
 void
 WalSndShmemInit(void)
 {
-	bool	found;
-	int		i;
+	bool		found;
+	int			i;
 
 	WalSndCtl = (WalSndCtlData *)
 		ShmemInitStruct("Wal Sender Ctl", WalSndShmemSize(), &found);
@@ -783,7 +787,8 @@ WalSndShmemInit(void)
 
 	for (i = 0; i < MaxWalSenders; i++)
 	{
-		WalSnd	*walsnd = &WalSndCtl->walsnds[i];
+		WalSnd	   *walsnd = &WalSndCtl->walsnds[i];
+
 		SpinLockInit(&walsnd->mutex);
 	}
 }
@@ -795,15 +800,15 @@ WalSndShmemInit(void)
 XLogRecPtr
 GetOldestWALSendPointer(void)
 {
-	XLogRecPtr oldest = {0, 0};
-	int		i;
-	bool	found = false;
+	XLogRecPtr	oldest = {0, 0};
+	int			i;
+	bool		found = false;
 
 	for (i = 0; i < MaxWalSenders; i++)
 	{
 		/* use volatile pointer to prevent code rearrangement */
-		volatile WalSnd	*walsnd = &WalSndCtl->walsnds[i];
-		XLogRecPtr recptr;
+		volatile WalSnd *walsnd = &WalSndCtl->walsnds[i];
+		XLogRecPtr	recptr;
 
 		if (walsnd->pid == 0)
 			continue;
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 7d4e75c63c226bd02396a49791e3c56652dddc6c..a8016f4adf323743cfec223b7ce6ade01c70551d 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.193 2010/01/02 16:57:51 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.194 2010/02/26 02:00:58 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -56,7 +56,7 @@ static void markQueryForLocking(Query *qry, Node *jtnode,
 static List *matchLocks(CmdType event, RuleLock *rulelocks,
 		   int varno, Query *parsetree);
 static Query *fireRIRrules(Query *parsetree, List *activeRIRs,
-						   bool forUpdatePushedDown);
+			 bool forUpdatePushedDown);
 
 
 /*
@@ -214,7 +214,7 @@ AcquireRewriteLocks(Query *parsetree, bool forUpdatePushedDown)
 				 */
 				AcquireRewriteLocks(rte->subquery,
 									(forUpdatePushedDown ||
-									 get_parse_rowmark(parsetree, rt_index) != NULL));
+							get_parse_rowmark(parsetree, rt_index) != NULL));
 				break;
 
 			default:
@@ -1205,9 +1205,9 @@ ApplyRetrieveRule(Query *parsetree,
 	rte->modifiedCols = NULL;
 
 	/*
-	 * If FOR UPDATE/SHARE of view, mark all the contained tables as
-	 * implicit FOR UPDATE/SHARE, the same as the parser would have done
-	 * if the view's subquery had been written out explicitly.
+	 * If FOR UPDATE/SHARE of view, mark all the contained tables as implicit
+	 * FOR UPDATE/SHARE, the same as the parser would have done if the view's
+	 * subquery had been written out explicitly.
 	 *
 	 * Note: we don't consider forUpdatePushedDown here; such marks will be
 	 * made by recursing from the upper level in markQueryForLocking.
@@ -1350,7 +1350,7 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
 		{
 			rte->subquery = fireRIRrules(rte->subquery, activeRIRs,
 										 (forUpdatePushedDown ||
-										  get_parse_rowmark(parsetree, rt_index) != NULL));
+							get_parse_rowmark(parsetree, rt_index) != NULL));
 			continue;
 		}
 
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index 4a0b52eef8c6faf3c2218d4e3fa5fec17c304475..724e94b913fd4b952d02c77aff77fa721a4e58c3 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -7,7 +7,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.126 2010/01/02 16:57:51 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.127 2010/02/26 02:00:59 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -940,9 +940,9 @@ getInsertSelectQuery(Query *parsetree, Query ***subquery_ptr)
 
 	/*
 	 * Currently, this is ONLY applied to rule-action queries, and so we
-	 * expect to find the OLD and NEW placeholder entries in the given
-	 * query.  If they're not there, it must be an INSERT/SELECT in which
-	 * they've been pushed down to the SELECT.
+	 * expect to find the OLD and NEW placeholder entries in the given query.
+	 * If they're not there, it must be an INSERT/SELECT in which they've been
+	 * pushed down to the SELECT.
 	 */
 	if (list_length(parsetree->rtable) >= 2 &&
 		strcmp(rt_fetch(PRS2_OLD_VARNO, parsetree->rtable)->eref->aliasname,
@@ -1236,11 +1236,11 @@ ResolveNew_callback(Var *var,
 		List	   *fields;
 
 		/*
-		 * If generating an expansion for a var of a named rowtype
-		 * (ie, this is a plain relation RTE), then we must include
-		 * dummy items for dropped columns.  If the var is RECORD (ie,
-		 * this is a JOIN), then omit dropped columns.	Either way,
-		 * attach column names to the RowExpr for use of ruleutils.c.
+		 * If generating an expansion for a var of a named rowtype (ie, this
+		 * is a plain relation RTE), then we must include dummy items for
+		 * dropped columns.  If the var is RECORD (ie, this is a JOIN), then
+		 * omit dropped columns.  Either way, attach column names to the
+		 * RowExpr for use of ruleutils.c.
 		 */
 		expandRTE(rcon->target_rte,
 				  var->varno, var->varlevelsup, var->location,
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 470800d5f47b9f9b9765087f4f93546c0ac1fb0e..3b6938135aca02ab4baa6bbedb8509c2522e9955 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.255 2010/01/23 16:37:12 sriggs Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.256 2010/02/26 02:00:59 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -2443,12 +2443,12 @@ LockBufferForCleanup(Buffer buffer)
 bool
 HoldingBufferPinThatDelaysRecovery(void)
 {
-	int		bufid = GetStartupBufferPinWaitBufId();
+	int			bufid = GetStartupBufferPinWaitBufId();
 
 	/*
-	 * If we get woken slowly then it's possible that the Startup process
-	 * was already woken by other backends before we got here. Also possible
-	 * that we get here by multiple interrupts or interrupts at inappropriate
+	 * If we get woken slowly then it's possible that the Startup process was
+	 * already woken by other backends before we got here. Also possible that
+	 * we get here by multiple interrupts or interrupts at inappropriate
 	 * times, so make sure we do nothing if the bufid is not set.
 	 */
 	if (bufid < 0)
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 06edf062c1b038445fd89af3820452744848613c..f68129a8b8751745040442447437bade88e3f529 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.155 2010/02/22 15:26:14 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.156 2010/02/26 02:00:59 momjian Exp $
  *
  * NOTES:
  *
@@ -395,7 +395,7 @@ count_usable_fds(int max_to_probe, int *usable_fds, int *already_open)
 #ifdef HAVE_GETRLIMIT
 #ifdef RLIMIT_NOFILE			/* most platforms use RLIMIT_NOFILE */
 	getrlimit_status = getrlimit(RLIMIT_NOFILE, &rlim);
-#else	/* but BSD doesn't ... */
+#else							/* but BSD doesn't ... */
 	getrlimit_status = getrlimit(RLIMIT_OFILE, &rlim);
 #endif   /* RLIMIT_NOFILE */
 	if (getrlimit_status != 0)
@@ -1798,9 +1798,9 @@ CleanupTempFiles(bool isProcExit)
 				/*
 				 * If we're in the process of exiting a backend process, close
 				 * all temporary files. Otherwise, only close temporary files
-				 * local to the current transaction. They should be closed
-				 * by the ResourceOwner mechanism already, so this is just
-				 * a debugging cross-check.
+				 * local to the current transaction. They should be closed by
+				 * the ResourceOwner mechanism already, so this is just a
+				 * debugging cross-check.
 				 */
 				if (isProcExit)
 					FileClose(i);
@@ -1860,7 +1860,7 @@ RemovePgTempFiles(void)
 			continue;
 
 		snprintf(temp_path, sizeof(temp_path), "pg_tblspc/%s/%s/%s",
-				 spc_de->d_name, TABLESPACE_VERSION_DIRECTORY, PG_TEMP_FILES_DIR);
+			spc_de->d_name, TABLESPACE_VERSION_DIRECTORY, PG_TEMP_FILES_DIR);
 		RemovePgTempFilesInDir(temp_path);
 	}
 
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index b584c1fe3801ac45a926fd7219fe2aa931fa2a3b..a872f1e78fb79bb78f479c842bbe8797e190e990 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.76 2010/02/09 21:43:30 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.77 2010/02/26 02:00:59 momjian Exp $
  *
  *
  * NOTES:
@@ -307,10 +307,10 @@ FreeSpaceMapTruncateRel(Relation rel, BlockNumber nblocks)
 
 	/*
 	 * We might as well update the local smgr_fsm_nblocks setting.
-	 * smgrtruncate sent an smgr cache inval message, which will cause
-	 * other backends to invalidate their copy of smgr_fsm_nblocks, and
-	 * this one too at the next command boundary.  But this ensures it
-	 * isn't outright wrong until then.
+	 * smgrtruncate sent an smgr cache inval message, which will cause other
+	 * backends to invalidate their copy of smgr_fsm_nblocks, and this one too
+	 * at the next command boundary.  But this ensures it isn't outright wrong
+	 * until then.
 	 */
 	if (rel->rd_smgr)
 		rel->rd_smgr->smgr_fsm_nblocks = new_nfsmblocks;
@@ -509,9 +509,9 @@ fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
 
 	/*
 	 * If we haven't cached the size of the FSM yet, check it first.  Also
-	 * recheck if the requested block seems to be past end, since our
-	 * cached value might be stale.  (We send smgr inval messages on
-	 * truncation, but not on extension.)
+	 * recheck if the requested block seems to be past end, since our cached
+	 * value might be stale.  (We send smgr inval messages on truncation, but
+	 * not on extension.)
 	 */
 	if (rel->rd_smgr->smgr_fsm_nblocks == InvalidBlockNumber ||
 		blkno >= rel->rd_smgr->smgr_fsm_nblocks)
@@ -575,8 +575,8 @@ fsm_extend(Relation rel, BlockNumber fsm_nblocks)
 	RelationOpenSmgr(rel);
 
 	/*
-	 * Create the FSM file first if it doesn't exist.  If smgr_fsm_nblocks
-	 * is positive then it must exist, no need for an smgrexists call.
+	 * Create the FSM file first if it doesn't exist.  If smgr_fsm_nblocks is
+	 * positive then it must exist, no need for an smgrexists call.
 	 */
 	if ((rel->rd_smgr->smgr_fsm_nblocks == 0 ||
 		 rel->rd_smgr->smgr_fsm_nblocks == InvalidBlockNumber) &&
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 7cd57f31405d1cc4747dd2602c1b41e0b494760e..baa2e43f50d659c7376a967e2633e20aeefa95b6 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -37,7 +37,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.59 2010/01/23 16:37:12 sriggs Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.60 2010/02/26 02:01:00 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -56,7 +56,7 @@
 #include "utils/builtins.h"
 #include "utils/snapmgr.h"
 
-static RunningTransactionsData	CurrentRunningXactsData;
+static RunningTransactionsData CurrentRunningXactsData;
 
 /* Our shared memory area */
 typedef struct ProcArrayStruct
@@ -64,13 +64,16 @@ typedef struct ProcArrayStruct
 	int			numProcs;		/* number of valid procs entries */
 	int			maxProcs;		/* allocated size of procs array */
 
-	int			numKnownAssignedXids;	/* current number of known assigned xids */
-	int			maxKnownAssignedXids;	/* allocated size of known assigned xids */
+	int			numKnownAssignedXids;	/* current number of known assigned
+										 * xids */
+	int			maxKnownAssignedXids;	/* allocated size of known assigned
+										 * xids */
+
 	/*
 	 * Highest subxid that overflowed KnownAssignedXids array. Similar to
 	 * overflowing cached subxids in PGPROC entries.
 	 */
-	TransactionId	lastOverflowedXid;
+	TransactionId lastOverflowedXid;
 
 	/*
 	 * We declare procs[] as 1 entry because C wants a fixed-size array, but
@@ -85,7 +88,7 @@ static ProcArrayStruct *procArray;
  * Bookkeeping for tracking emulated transactions in recovery
  */
 static HTAB *KnownAssignedXidsHash;
-static TransactionId	latestObservedXid = InvalidTransactionId;
+static TransactionId latestObservedXid = InvalidTransactionId;
 
 /*
  * If we're in STANDBY_SNAPSHOT_PENDING state, standbySnapshotPendingXmin is
@@ -135,9 +138,9 @@ static void DisplayXidCache(void);
 #endif   /* XIDCACHE_DEBUG */
 
 /* Primitives for KnownAssignedXids array handling for standby */
-static int  KnownAssignedXidsGet(TransactionId *xarray, TransactionId xmax);
-static int	KnownAssignedXidsGetAndSetXmin(TransactionId *xarray, TransactionId *xmin,
-											TransactionId xmax);
+static int	KnownAssignedXidsGet(TransactionId *xarray, TransactionId xmax);
+static int KnownAssignedXidsGetAndSetXmin(TransactionId *xarray, TransactionId *xmin,
+							   TransactionId xmax);
 static bool KnownAssignedXidsExist(TransactionId xid);
 static void KnownAssignedXidsAdd(TransactionId *xids, int nxids);
 static void KnownAssignedXidsRemove(TransactionId xid);
@@ -436,9 +439,9 @@ ProcArrayInitRecoveryInfo(TransactionId oldestActiveXid)
 void
 ProcArrayApplyRecoveryInfo(RunningTransactions running)
 {
-	int				xid_index;	/* main loop */
-	TransactionId	*xids;
-	int				nxids;
+	int			xid_index;		/* main loop */
+	TransactionId *xids;
+	int			nxids;
 
 	Assert(standbyState >= STANDBY_INITIALIZED);
 
@@ -455,14 +458,14 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
 		return;
 
 	/*
-	 * If our initial RunningXactData had an overflowed snapshot then we
-	 * knew we were missing some subxids from our snapshot. We can use
-	 * this data as an initial snapshot, but we cannot yet mark it valid.
-	 * We know that the missing subxids are equal to or earlier than
-	 * nextXid. After we initialise we continue to apply changes during
-	 * recovery, so once the oldestRunningXid is later than the nextXid
-	 * from the initial snapshot we know that we no longer have missing
-	 * information and can mark the snapshot as valid.
+	 * If our initial RunningXactData had an overflowed snapshot then we knew
+	 * we were missing some subxids from our snapshot. We can use this data as
+	 * an initial snapshot, but we cannot yet mark it valid. We know that the
+	 * missing subxids are equal to or earlier than nextXid. After we
+	 * initialise we continue to apply changes during recovery, so once the
+	 * oldestRunningXid is later than the nextXid from the initial snapshot we
+	 * know that we no longer have missing information and can mark the
+	 * snapshot as valid.
 	 */
 	if (standbyState == STANDBY_SNAPSHOT_PENDING)
 	{
@@ -471,9 +474,9 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
 		{
 			standbyState = STANDBY_SNAPSHOT_READY;
 			elog(trace_recovery(DEBUG2),
-					"running xact data now proven complete");
+				 "running xact data now proven complete");
 			elog(trace_recovery(DEBUG2),
-					"recovery snapshots are now enabled");
+				 "recovery snapshots are now enabled");
 		}
 		return;
 	}
@@ -485,9 +488,9 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
 	TransactionIdRetreat(latestObservedXid);
 
 	/*
-	 * If the snapshot overflowed, then we still initialise with what we
-	 * know, but the recovery snapshot isn't fully valid yet because we
-	 * know there are some subxids missing (ergo we don't know which ones)
+	 * If the snapshot overflowed, then we still initialise with what we know,
+	 * but the recovery snapshot isn't fully valid yet because we know there
+	 * are some subxids missing (ergo we don't know which ones)
 	 */
 	if (!running->subxid_overflow)
 	{
@@ -508,12 +511,12 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
 	KnownAssignedXidsDisplay(trace_recovery(DEBUG3));
 
 	/*
-	 * Scan through the incoming array of RunningXacts and collect xids.
-	 * We don't use SubtransSetParent because it doesn't matter yet. If
-	 * we aren't overflowed then all xids will fit in snapshot and so we
-	 * don't need subtrans. If we later overflow, an xid assignment record
-	 * will add xids to subtrans. If RunningXacts is overflowed then we
-	 * don't have enough information to correctly update subtrans anyway.
+	 * Scan through the incoming array of RunningXacts and collect xids. We
+	 * don't use SubtransSetParent because it doesn't matter yet. If we aren't
+	 * overflowed then all xids will fit in snapshot and so we don't need
+	 * subtrans. If we later overflow, an xid assignment record will add xids
+	 * to subtrans. If RunningXacts is overflowed then we don't have enough
+	 * information to correctly update subtrans anyway.
 	 */
 
 	/*
@@ -563,10 +566,10 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
 		ShmemVariableCache->nextXid = running->nextXid;
 
 	elog(trace_recovery(DEBUG2),
-		"running transaction data initialized");
+		 "running transaction data initialized");
 	if (standbyState == STANDBY_SNAPSHOT_READY)
 		elog(trace_recovery(DEBUG2),
-			"recovery snapshots are now enabled");
+			 "recovery snapshots are now enabled");
 }
 
 void
@@ -574,7 +577,7 @@ ProcArrayApplyXidAssignment(TransactionId topxid,
 							int nsubxids, TransactionId *subxids)
 {
 	TransactionId max_xid;
-	int		i;
+	int			i;
 
 	if (standbyState < STANDBY_SNAPSHOT_PENDING)
 		return;
@@ -592,15 +595,15 @@ ProcArrayApplyXidAssignment(TransactionId topxid,
 	RecordKnownAssignedTransactionIds(max_xid);
 
 	/*
-	 * Notice that we update pg_subtrans with the top-level xid, rather
-	 * than the parent xid. This is a difference between normal
-	 * processing and recovery, yet is still correct in all cases. The
-	 * reason is that subtransaction commit is not marked in clog until
-	 * commit processing, so all aborted subtransactions have already been
-	 * clearly marked in clog. As a result we are able to refer directly
-	 * to the top-level transaction's state rather than skipping through
-	 * all the intermediate states in the subtransaction tree. This
-	 * should be the first time we have attempted to SubTransSetParent().
+	 * Notice that we update pg_subtrans with the top-level xid, rather than
+	 * the parent xid. This is a difference between normal processing and
+	 * recovery, yet is still correct in all cases. The reason is that
+	 * subtransaction commit is not marked in clog until commit processing, so
+	 * all aborted subtransactions have already been clearly marked in clog.
+	 * As a result we are able to refer directly to the top-level
+	 * transaction's state rather than skipping through all the intermediate
+	 * states in the subtransaction tree. This should be the first time we
+	 * have attempted to SubTransSetParent().
 	 */
 	for (i = 0; i < nsubxids; i++)
 		SubTransSetParent(subxids[i], topxid, false);
@@ -697,12 +700,12 @@ TransactionIdIsInProgress(TransactionId xid)
 	if (xids == NULL)
 	{
 		/*
-		 * In hot standby mode, reserve enough space to hold all xids in
-		 * the known-assigned list. If we later finish recovery, we no longer
-		 * need the bigger array, but we don't bother to shrink it.
+		 * In hot standby mode, reserve enough space to hold all xids in the
+		 * known-assigned list. If we later finish recovery, we no longer need
+		 * the bigger array, but we don't bother to shrink it.
 		 */
-		int	maxxids = RecoveryInProgress() ?
-			arrayP->maxProcs : TOTAL_MAX_CACHED_SUBXIDS;
+		int			maxxids = RecoveryInProgress() ?
+		arrayP->maxProcs : TOTAL_MAX_CACHED_SUBXIDS;
 
 		xids = (TransactionId *) malloc(maxxids * sizeof(TransactionId));
 		if (xids == NULL)
@@ -799,10 +802,10 @@ TransactionIdIsInProgress(TransactionId xid)
 		}
 
 		/*
-		 * If the KnownAssignedXids overflowed, we have to check
-		 * pg_subtrans too. Copy all xids from KnownAssignedXids that are
-		 * lower than xid, since if xid is a subtransaction its parent will
-		 * always have a lower value.
+		 * If the KnownAssignedXids overflowed, we have to check pg_subtrans
+		 * too. Copy all xids from KnownAssignedXids that are lower than xid,
+		 * since if xid is a subtransaction its parent will always have a
+		 * lower value.
 		 */
 		if (TransactionIdPrecedesOrEquals(xid, procArray->lastOverflowedXid))
 			nxids = KnownAssignedXidsGet(xids, xid);
@@ -1052,8 +1055,8 @@ GetSnapshotData(Snapshot snapshot)
 	if (snapshot->xip == NULL)
 	{
 		/*
-		 * First call for this snapshot. Snapshot is same size whether
-		 * or not we are in recovery, see later comments.
+		 * First call for this snapshot. Snapshot is same size whether or not
+		 * we are in recovery, see later comments.
 		 */
 		snapshot->xip = (TransactionId *)
 			malloc(arrayP->maxProcs * sizeof(TransactionId));
@@ -1176,16 +1179,16 @@ GetSnapshotData(Snapshot snapshot)
 		 * In recovery we don't know which xids are top-level and which are
 		 * subxacts, a design choice that greatly simplifies xid processing.
 		 *
-		 * It seems like we would want to try to put xids into xip[] only,
-		 * but that is fairly small. We would either need to make that bigger
-		 * or to increase the rate at which we WAL-log xid assignment;
-		 * neither is an appealing choice.
+		 * It seems like we would want to try to put xids into xip[] only, but
+		 * that is fairly small. We would either need to make that bigger or
+		 * to increase the rate at which we WAL-log xid assignment; neither is
+		 * an appealing choice.
 		 *
 		 * We could try to store xids into xip[] first and then into subxip[]
 		 * if there are too many xids. That only works if the snapshot doesn't
 		 * overflow because we do not search subxip[] in that case. A simpler
-		 * way is to just store all xids in the subxact array because this
-		 * is by far the bigger array. We just leave the xip array empty.
+		 * way is to just store all xids in the subxact array because this is
+		 * by far the bigger array. We just leave the xip array empty.
 		 *
 		 * Either way we need to change the way XidInMVCCSnapshot() works
 		 * depending upon when the snapshot was taken, or change normal
@@ -1269,8 +1272,8 @@ GetRunningTransactionData(void)
 	 * the lock, so we can't look at numProcs.  Likewise, we allocate much
 	 * more subxip storage than is probably needed.
 	 *
-	 * Should only be allocated for bgwriter, since only ever executed
-	 * during checkpoints.
+	 * Should only be allocated for bgwriter, since only ever executed during
+	 * checkpoints.
 	 */
 	if (CurrentRunningXacts->xids == NULL)
 	{
@@ -1300,6 +1303,7 @@ GetRunningTransactionData(void)
 	latestCompletedXid = ShmemVariableCache->latestCompletedXid;
 
 	oldestRunningXid = ShmemVariableCache->nextXid;
+
 	/*
 	 * Spin over procArray collecting all xids and subxids.
 	 */
@@ -1325,8 +1329,8 @@ GetRunningTransactionData(void)
 			oldestRunningXid = xid;
 
 		/*
-		 * Save subtransaction XIDs. Other backends can't add or remove entries
-		 * while we're holding XidGenLock.
+		 * Save subtransaction XIDs. Other backends can't add or remove
+		 * entries while we're holding XidGenLock.
 		 */
 		nxids = proc->subxids.nxids;
 		if (nxids > 0)
@@ -1642,13 +1646,13 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0,
  *
  * By using exclusive lock we prevent new snapshots from being taken while
  * we work out which snapshots to conflict with. This protects those new
- * snapshots from also being included in our conflict list. 
+ * snapshots from also being included in our conflict list.
  *
  * After the lock is released, we allow snapshots again. It is possible
  * that we arrive at a snapshot that is identical to one that we just
  * decided we should conflict with. This a case of false positives, not an
  * actual problem.
- * 
+ *
  * There are two cases: (1) if we were correct in using latestCompletedXid
  * then that means that all xids in the snapshot lower than that are FATAL
  * errors, so not xids that ever commit. We can make no visibility errors
@@ -1657,11 +1661,11 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0,
  * latestCompletedXid then we conflicted with a snapshot needlessly. Taking
  * another identical snapshot is OK, because the earlier conflicted
  * snapshot was a false positive.
- * 
+ *
  * In either case, a snapshot taken after conflict assessment will still be
  * valid and non-conflicting even if an identical snapshot that existed
  * before conflict assessment was assessed as conflicting.
- * 
+ *
  * If we allowed concurrent snapshots while we were deciding who to
  * conflict with we would need to include all concurrent snapshotters in
  * the conflict list as well. We'd have difficulty in working out exactly
@@ -1669,7 +1673,7 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0,
  * lock. Notice that we only hold that lock for as long as it takes to
  * make the conflict list, not for the whole duration of the conflict
  * resolution.
- * 
+ *
  * It also means that users waiting for a snapshot is a good thing, since
  * it is more likely that they will live longer after having waited. So it
  * is a benefit, not an oversight that we use exclusive lock here.
@@ -1695,8 +1699,8 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid)
 
 	/*
 	 * If not first time through, get workspace to remember main XIDs in. We
-	 * malloc it permanently to avoid repeated palloc/pfree overhead.
-	 * Allow result space, remembering room for a terminator.
+	 * malloc it permanently to avoid repeated palloc/pfree overhead. Allow
+	 * result space, remembering room for a terminator.
 	 */
 	if (vxids == NULL)
 	{
@@ -1711,8 +1715,8 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid)
 	LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
 
 	/*
-	 * If we don't know the TransactionId that created the conflict, set
-	 * it to latestCompletedXid which is the latest possible value.
+	 * If we don't know the TransactionId that created the conflict, set it to
+	 * latestCompletedXid which is the latest possible value.
 	 */
 	if (!TransactionIdIsValid(limitXmin))
 		limitXmin = ShmemVariableCache->latestCompletedXid;
@@ -1732,8 +1736,9 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid)
 			TransactionId pxmin = proc->xmin;
 
 			/*
-			 * We ignore an invalid pxmin because this means that backend
-			 * has no snapshot and cannot get another one while we hold exclusive lock.
+			 * We ignore an invalid pxmin because this means that backend has
+			 * no snapshot and cannot get another one while we hold exclusive
+			 * lock.
 			 */
 			if (TransactionIdIsValid(pxmin) && !TransactionIdFollows(pxmin, limitXmin))
 			{
@@ -1784,8 +1789,8 @@ CancelVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode)
 			if (pid != 0)
 			{
 				/*
-				 * Kill the pid if it's still here. If not, that's what we wanted
-				 * so ignore any errors.
+				 * Kill the pid if it's still here. If not, that's what we
+				 * wanted so ignore any errors.
 				 */
 				(void) SendProcSignal(pid, sigmode, vxid.backendId);
 			}
@@ -1905,8 +1910,8 @@ CancelDBBackends(Oid databaseid, ProcSignalReason sigmode, bool conflictPending)
 			if (pid != 0)
 			{
 				/*
-				 * Kill the pid if it's still here. If not, that's what we wanted
-				 * so ignore any errors.
+				 * Kill the pid if it's still here. If not, that's what we
+				 * wanted so ignore any errors.
 				 */
 				(void) SendProcSignal(pid, sigmode, procvxid.backendId);
 			}
@@ -2133,11 +2138,10 @@ DisplayXidCache(void)
 			xc_no_overflow,
 			xc_slow_answer);
 }
-
 #endif   /* XIDCACHE_DEBUG */
 
 /* ----------------------------------------------
- * 		KnownAssignedTransactions sub-module
+ *		KnownAssignedTransactions sub-module
  * ----------------------------------------------
  */
 
@@ -2199,48 +2203,49 @@ RecordKnownAssignedTransactionIds(TransactionId xid)
 		return;
 
 	/*
-	 * We can see WAL records before the running-xacts snapshot that
-	 * contain XIDs that are not in the running-xacts snapshot, but that we
-	 * know to have finished before the running-xacts snapshot was taken.
-	 * Don't waste precious shared memory by keeping them in the hash table.
+	 * We can see WAL records before the running-xacts snapshot that contain
+	 * XIDs that are not in the running-xacts snapshot, but that we know to
+	 * have finished before the running-xacts snapshot was taken. Don't waste
+	 * precious shared memory by keeping them in the hash table.
 	 *
 	 * We can also see WAL records before the running-xacts snapshot that
 	 * contain XIDs that are not in the running-xacts snapshot for a different
-	 * reason: the transaction started *after* the running-xacts snapshot
-	 * was taken, but before it was written to WAL. We must be careful to
-	 * not ignore such XIDs. Because such a transaction started after the
-	 * running-xacts snapshot was taken, it must have an XID larger than
-	 * the oldest XID according to the running-xacts snapshot.
+	 * reason: the transaction started *after* the running-xacts snapshot was
+	 * taken, but before it was written to WAL. We must be careful to not
+	 * ignore such XIDs. Because such a transaction started after the
+	 * running-xacts snapshot was taken, it must have an XID larger than the
+	 * oldest XID according to the running-xacts snapshot.
 	 */
 	if (TransactionIdPrecedes(xid, snapshotOldestActiveXid))
 		return;
 
 	ereport(trace_recovery(DEBUG4),
-				(errmsg("record known xact %u latestObservedXid %u",
-							xid, latestObservedXid)));
+			(errmsg("record known xact %u latestObservedXid %u",
+					xid, latestObservedXid)));
 
 	/*
-	 * When a newly observed xid arrives, it is frequently the case
-	 * that it is *not* the next xid in sequence. When this occurs, we
-	 * must treat the intervening xids as running also.
+	 * When a newly observed xid arrives, it is frequently the case that it is
+	 * *not* the next xid in sequence. When this occurs, we must treat the
+	 * intervening xids as running also.
 	 */
 	if (TransactionIdFollows(xid, latestObservedXid))
 	{
-		TransactionId	next_expected_xid = latestObservedXid;
+		TransactionId next_expected_xid = latestObservedXid;
+
 		TransactionIdAdvance(next_expected_xid);
 
 		/*
-		 * Locking requirement is currently higher than for xid assignment
-		 * in normal running. However, we only get called here for new
-		 * high xids - so on a multi-processor where it is common that xids
-		 * arrive out of order the average number of locks per assignment
-		 * will actually reduce. So not too worried about this locking.
+		 * Locking requirement is currently higher than for xid assignment in
+		 * normal running. However, we only get called here for new high xids
+		 * - so on a multi-processor where it is common that xids arrive out
+		 * of order the average number of locks per assignment will actually
+		 * reduce. So not too worried about this locking.
 		 *
-		 * XXX It does seem possible that we could add a whole range
-		 * of numbers atomically to KnownAssignedXids, if we use a sorted
-		 * list for KnownAssignedXids. But that design also increases the
-		 * length of time we hold lock when we process commits/aborts, so
-		 * on balance don't worry about this.
+		 * XXX It does seem possible that we could add a whole range of
+		 * numbers atomically to KnownAssignedXids, if we use a sorted list
+		 * for KnownAssignedXids. But that design also increases the length of
+		 * time we hold lock when we process commits/aborts, so on balance
+		 * don't worry about this.
 		 */
 		LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
 
@@ -2248,8 +2253,8 @@ RecordKnownAssignedTransactionIds(TransactionId xid)
 		{
 			if (TransactionIdPrecedes(next_expected_xid, xid))
 				ereport(trace_recovery(DEBUG4),
-						(errmsg("recording unobserved xid %u (latestObservedXid %u)",
-									next_expected_xid, latestObservedXid)));
+				(errmsg("recording unobserved xid %u (latestObservedXid %u)",
+						next_expected_xid, latestObservedXid)));
 			KnownAssignedXidsAdd(&next_expected_xid, 1);
 
 			/*
@@ -2327,9 +2332,9 @@ ExpireOldKnownAssignedTransactionIds(TransactionId xid)
  *
  * There are 3 main users of the KnownAssignedXids data structure:
  *
- *   * backends taking snapshots
- *   * startup process adding new knownassigned xids
- *   * startup process removing xids as transactions end
+ *	 * backends taking snapshots
+ *	 * startup process adding new knownassigned xids
+ *	 * startup process removing xids as transactions end
  *
  * If we make KnownAssignedXids a simple sorted array then the first two
  * operations are fast, but the last one is at least O(N). If we make
@@ -2354,8 +2359,8 @@ static void
 KnownAssignedXidsAdd(TransactionId *xids, int nxids)
 {
 	TransactionId *result;
-	bool found;
-	int i;
+	bool		found;
+	int			i;
 
 	for (i = 0; i < nxids; i++)
 	{
@@ -2369,19 +2374,19 @@ KnownAssignedXidsAdd(TransactionId *xids, int nxids)
 			KnownAssignedXidsDisplay(LOG);
 			LWLockRelease(ProcArrayLock);
 			ereport(ERROR,
-						(errcode(ERRCODE_OUT_OF_MEMORY),
-						 errmsg("too many KnownAssignedXids")));
+					(errcode(ERRCODE_OUT_OF_MEMORY),
+					 errmsg("too many KnownAssignedXids")));
 		}
 
 		result = (TransactionId *) hash_search(KnownAssignedXidsHash, &xids[i], HASH_ENTER,
-												&found);
+											   &found);
 
 		if (!result)
 		{
 			LWLockRelease(ProcArrayLock);
 			ereport(ERROR,
-						(errcode(ERRCODE_OUT_OF_MEMORY),
-						 errmsg("out of shared memory")));
+					(errcode(ERRCODE_OUT_OF_MEMORY),
+					 errmsg("out of shared memory")));
 		}
 
 		if (found)
@@ -2401,7 +2406,8 @@ KnownAssignedXidsAdd(TransactionId *xids, int nxids)
 static bool
 KnownAssignedXidsExist(TransactionId xid)
 {
-	bool found;
+	bool		found;
+
 	(void) hash_search(KnownAssignedXidsHash, &xid, HASH_FIND, &found);
 	return found;
 }
@@ -2414,7 +2420,7 @@ KnownAssignedXidsExist(TransactionId xid)
 static void
 KnownAssignedXidsRemove(TransactionId xid)
 {
-	bool found;
+	bool		found;
 
 	Assert(TransactionIdIsValid(xid));
 
@@ -2427,14 +2433,14 @@ KnownAssignedXidsRemove(TransactionId xid)
 	Assert(procArray->numKnownAssignedXids >= 0);
 
 	/*
-	 * We can fail to find an xid if the xid came from a subtransaction
-	 * that aborts, though the xid hadn't yet been reported and no WAL records
-	 * have been written using the subxid. In that case the abort record will
+	 * We can fail to find an xid if the xid came from a subtransaction that
+	 * aborts, though the xid hadn't yet been reported and no WAL records have
+	 * been written using the subxid. In that case the abort record will
 	 * contain that subxid and we haven't seen it before.
 	 *
-	 * If we fail to find it for other reasons it might be a problem, but
-	 * it isn't much use to log that it happened, since we can't divine much
-	 * from just an isolated xid value.
+	 * If we fail to find it for other reasons it might be a problem, but it
+	 * isn't much use to log that it happened, since we can't divine much from
+	 * just an isolated xid value.
 	 */
 }
 
@@ -2460,7 +2466,7 @@ KnownAssignedXidsGet(TransactionId *xarray, TransactionId xmax)
  */
 static int
 KnownAssignedXidsGetAndSetXmin(TransactionId *xarray, TransactionId *xmin,
-					 TransactionId xmax)
+							   TransactionId xmax)
 {
 	HASH_SEQ_STATUS status;
 	TransactionId *knownXid;
@@ -2496,7 +2502,7 @@ KnownAssignedXidsGetAndSetXmin(TransactionId *xarray, TransactionId *xmin,
 static void
 KnownAssignedXidsRemoveMany(TransactionId xid, bool keepPreparedXacts)
 {
-	TransactionId	*knownXid;
+	TransactionId *knownXid;
 	HASH_SEQ_STATUS status;
 
 	if (TransactionIdIsValid(xid))
@@ -2508,7 +2514,7 @@ KnownAssignedXidsRemoveMany(TransactionId xid, bool keepPreparedXacts)
 	while ((knownXid = (TransactionId *) hash_seq_search(&status)) != NULL)
 	{
 		TransactionId removeXid = *knownXid;
-		bool found;
+		bool		found;
 
 		if (!TransactionIdIsValid(xid) || TransactionIdPrecedes(removeXid, xid))
 		{
@@ -2537,9 +2543,9 @@ KnownAssignedXidsDisplay(int trace_level)
 	HASH_SEQ_STATUS status;
 	TransactionId *knownXid;
 	StringInfoData buf;
-	TransactionId   *xids;
-	int				nxids;
-	int				i;
+	TransactionId *xids;
+	int			nxids;
+	int			i;
 
 	xids = palloc(sizeof(TransactionId) * TOTAL_MAX_CACHED_SUBXIDS);
 	nxids = 0;
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index 03f61b20eee94c7c1245637ef359dc7146c91c15..a60f466c860045c3261f31be4501a30a55ea6874 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/ipc/procsignal.c,v 1.5 2010/02/13 01:32:19 sriggs Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/ipc/procsignal.c,v 1.6 2010/02/26 02:01:00 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -36,12 +36,12 @@
  * reason is signaled more than once nearly simultaneously, the process may
  * observe it only once.)
  *
- * Each process that wants to receive signals registers its process ID 
+ * Each process that wants to receive signals registers its process ID
  * in the ProcSignalSlots array. The array is indexed by backend ID to make
  * slot allocation simple, and to avoid having to search the array when you
  * know the backend ID of the process you're signalling.  (We do support
  * signalling without backend ID, but it's a bit less efficient.)
- * 
+ *
  * The flags are actually declared as "volatile sig_atomic_t" for maximum
  * portability.  This should ensure that loads and stores of the flag
  * values are atomic, allowing us to dispense with any explicit locking.
@@ -57,7 +57,7 @@ typedef struct
  * possible auxiliary process type.  (This scheme assumes there is not
  * more than one of any auxiliary process type at a time.)
  */
-#define NumProcSignalSlots  (MaxBackends + NUM_AUXPROCTYPES)
+#define NumProcSignalSlots	(MaxBackends + NUM_AUXPROCTYPES)
 
 static ProcSignalSlot *ProcSignalSlots = NULL;
 static volatile ProcSignalSlot *MyProcSignalSlot = NULL;
@@ -146,8 +146,8 @@ CleanupProcSignalState(int status, Datum arg)
 	if (slot->pss_pid != MyProcPid)
 	{
 		/*
-		 * don't ERROR here. We're exiting anyway, and don't want to
-		 * get into infinite loop trying to exit
+		 * don't ERROR here. We're exiting anyway, and don't want to get into
+		 * infinite loop trying to exit
 		 */
 		elog(LOG, "process %d releasing ProcSignal slot %d, but it contains %d",
 			 MyProcPid, pss_idx, (int) slot->pss_pid);
@@ -201,7 +201,7 @@ SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId)
 		 * InvalidBackendId means that the target is most likely an auxiliary
 		 * process, which will have a slot near the end of the array.
 		 */
-		int		i;
+		int			i;
 
 		for (i = NumProcSignalSlots - 1; i >= 0; i--)
 		{
@@ -252,7 +252,7 @@ CheckProcSignal(ProcSignalReason reason)
 void
 procsignal_sigusr1_handler(SIGNAL_ARGS)
 {
-	int		save_errno = errno;
+	int			save_errno = errno;
 
 	if (CheckProcSignal(PROCSIG_CATCHUP_INTERRUPT))
 		HandleCatchupInterrupt();
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index ec3c5599270722889cec784ca270de8a0cabb2be..0667652ed72381f6c3d6421613f9d36518c293f8 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.81 2010/01/02 16:57:51 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.82 2010/02/26 02:01:00 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -145,9 +145,10 @@ typedef struct ProcState
 	bool		signaled;		/* backend has been sent catchup signal */
 
 	/*
-	 * Backend only sends invalidations, never receives them. This only makes sense
-	 * for Startup process during recovery because it doesn't maintain a relcache,
-	 * yet it fires inval messages to allow query backends to see schema changes.
+	 * Backend only sends invalidations, never receives them. This only makes
+	 * sense for Startup process during recovery because it doesn't maintain a
+	 * relcache, yet it fires inval messages to allow query backends to see
+	 * schema changes.
 	 */
 	bool		sendOnly;		/* backend only sends, never receives */
 
@@ -587,7 +588,7 @@ SICleanupQueue(bool callerHasWriteLock, int minFree)
 	/*
 	 * Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the
 	 * furthest-back backend that needs signaling (if any), and reset any
-	 * backends that are too far back.  Note that because we ignore sendOnly
+	 * backends that are too far back.	Note that because we ignore sendOnly
 	 * backends here it is possible for them to keep sending messages without
 	 * a problem even when they are the only active backend.
 	 */
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 4712e3bdd8ad24c10cb880b5dcacf03d130ed4f7..a0357bb547630557e0a60811d3e0bd52ea7ecff6 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -3,15 +3,15 @@
  * standby.c
  *	  Misc functions used in Hot Standby mode.
  *
- *  All functions for handling RM_STANDBY_ID, which relate to
- *  AccessExclusiveLocks and starting snapshots for Hot Standby mode.
- *  Plus conflict recovery processing.
+ *	All functions for handling RM_STANDBY_ID, which relate to
+ *	AccessExclusiveLocks and starting snapshots for Hot Standby mode.
+ *	Plus conflict recovery processing.
  *
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/ipc/standby.c,v 1.13 2010/02/13 16:29:38 sriggs Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/ipc/standby.c,v 1.14 2010/02/26 02:01:00 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -30,7 +30,7 @@
 #include "storage/standby.h"
 #include "utils/ps_status.h"
 
-int		vacuum_defer_cleanup_age;
+int			vacuum_defer_cleanup_age;
 
 static List *RecoveryLockList;
 
@@ -58,10 +58,10 @@ InitRecoveryTransactionEnvironment(void)
 	VirtualTransactionId vxid;
 
 	/*
-	 * Initialise shared invalidation management for Startup process,
-	 * being careful to register ourselves as a sendOnly process so
-	 * we don't need to read messages, nor will we get signalled
-	 * when the queue starts filling up.
+	 * Initialise shared invalidation management for Startup process, being
+	 * careful to register ourselves as a sendOnly process so we don't need to
+	 * read messages, nor will we get signalled when the queue starts filling
+	 * up.
 	 */
 	SharedInvalBackendInit(true);
 
@@ -74,8 +74,8 @@ InitRecoveryTransactionEnvironment(void)
 	 * Lock a virtual transaction id for Startup process.
 	 *
 	 * We need to do GetNextLocalTransactionId() because
-	 * SharedInvalBackendInit() leaves localTransactionid invalid and
-	 * the lock manager doesn't like that at all.
+	 * SharedInvalBackendInit() leaves localTransactionid invalid and the lock
+	 * manager doesn't like that at all.
 	 *
 	 * Note that we don't need to run XactLockTableInsert() because nobody
 	 * needs to wait on xids. That sounds a little strange, but table locks
@@ -109,12 +109,12 @@ ShutdownRecoveryTransactionEnvironment(void)
 
 /*
  * -----------------------------------------------------
- * 		Standby wait timers and backend cancel logic
+ *		Standby wait timers and backend cancel logic
  * -----------------------------------------------------
  */
 
 #define STANDBY_INITIAL_WAIT_US  1000
-static int standbyWait_us = STANDBY_INITIAL_WAIT_US;
+static int	standbyWait_us = STANDBY_INITIAL_WAIT_US;
 
 /*
  * Standby wait logic for ResolveRecoveryConflictWithVirtualXIDs.
@@ -124,8 +124,8 @@ static int standbyWait_us = STANDBY_INITIAL_WAIT_US;
 static bool
 WaitExceedsMaxStandbyDelay(void)
 {
-	long	delay_secs;
-	int		delay_usecs;
+	long		delay_secs;
+	int			delay_usecs;
 
 	if (MaxStandbyDelay == -1)
 		return false;
@@ -168,8 +168,8 @@ ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist,
 
 	while (VirtualTransactionIdIsValid(*waitlist))
 	{
-		long wait_s;
-		int wait_us;			/* wait in microseconds (us) */
+		long		wait_s;
+		int			wait_us;	/* wait in microseconds (us) */
 		TimestampTz waitStart;
 		bool		logged;
 
@@ -178,12 +178,13 @@ ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist,
 		logged = false;
 
 		/* wait until the virtual xid is gone */
-		while(!ConditionalVirtualXactLockTableWait(*waitlist))
+		while (!ConditionalVirtualXactLockTableWait(*waitlist))
 		{
 			/*
 			 * Report if we have been waiting for a while now...
 			 */
 			TimestampTz now = GetCurrentTimestamp();
+
 			TimestampDifference(waitStart, now, &wait_s, &wait_us);
 			if (!logged && (wait_s > 0 || wait_us > 500000))
 			{
@@ -211,7 +212,7 @@ ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist,
 			/* Is it time to kill it? */
 			if (WaitExceedsMaxStandbyDelay())
 			{
-				pid_t pid;
+				pid_t		pid;
 
 				/*
 				 * Now find out who to throw out of the balloon.
@@ -237,7 +238,7 @@ ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist,
 
 		/* The virtual transaction is gone now, wait for the next one */
 		waitlist++;
-    }
+	}
 }
 
 void
@@ -249,7 +250,7 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileNode
 										 node.dbNode);
 
 	ResolveRecoveryConflictWithVirtualXIDs(backends,
-										   PROCSIG_RECOVERY_CONFLICT_SNAPSHOT);
+										 PROCSIG_RECOVERY_CONFLICT_SNAPSHOT);
 }
 
 void
@@ -258,43 +259,41 @@ ResolveRecoveryConflictWithTablespace(Oid tsid)
 	VirtualTransactionId *temp_file_users;
 
 	/*
-	 * Standby users may be currently using this tablespace for
-	 * for their temporary files. We only care about current
-	 * users because temp_tablespace parameter will just ignore
-	 * tablespaces that no longer exist.
+	 * Standby users may be currently using this tablespace for for their
+	 * temporary files. We only care about current users because
+	 * temp_tablespace parameter will just ignore tablespaces that no longer
+	 * exist.
 	 *
-	 * Ask everybody to cancel their queries immediately so
-	 * we can ensure no temp files remain and we can remove the
-	 * tablespace. Nuke the entire site from orbit, it's the only
-	 * way to be sure.
+	 * Ask everybody to cancel their queries immediately so we can ensure no
+	 * temp files remain and we can remove the tablespace. Nuke the entire
+	 * site from orbit, it's the only way to be sure.
 	 *
-	 * XXX: We could work out the pids of active backends
-	 * using this tablespace by examining the temp filenames in the
-	 * directory. We would then convert the pids into VirtualXIDs
-	 * before attempting to cancel them.
+	 * XXX: We could work out the pids of active backends using this
+	 * tablespace by examining the temp filenames in the directory. We would
+	 * then convert the pids into VirtualXIDs before attempting to cancel
+	 * them.
 	 *
-	 * We don't wait for commit because drop tablespace is
-	 * non-transactional.
+	 * We don't wait for commit because drop tablespace is non-transactional.
 	 */
 	temp_file_users = GetConflictingVirtualXIDs(InvalidTransactionId,
 												InvalidOid);
 	ResolveRecoveryConflictWithVirtualXIDs(temp_file_users,
-										   PROCSIG_RECOVERY_CONFLICT_TABLESPACE);
+									   PROCSIG_RECOVERY_CONFLICT_TABLESPACE);
 }
 
 void
 ResolveRecoveryConflictWithDatabase(Oid dbid)
 {
 	/*
-	 * We don't do ResolveRecoveryConflictWithVirutalXIDs() here since
-	 * that only waits for transactions and completely idle sessions
-	 * would block us. This is rare enough that we do this as simply
-	 * as possible: no wait, just force them off immediately.
+	 * We don't do ResolveRecoveryConflictWithVirutalXIDs() here since that
+	 * only waits for transactions and completely idle sessions would block
+	 * us. This is rare enough that we do this as simply as possible: no wait,
+	 * just force them off immediately.
 	 *
 	 * No locking is required here because we already acquired
-	 * AccessExclusiveLock. Anybody trying to connect while we do this
-	 * will block during InitPostgres() and then disconnect when they
-	 * see the database has been removed.
+	 * AccessExclusiveLock. Anybody trying to connect while we do this will
+	 * block during InitPostgres() and then disconnect when they see the
+	 * database has been removed.
 	 */
 	while (CountDBBackends(dbid) > 0)
 	{
@@ -312,20 +311,20 @@ static void
 ResolveRecoveryConflictWithLock(Oid dbOid, Oid relOid)
 {
 	VirtualTransactionId *backends;
-	bool			report_memory_error = false;
-	bool			lock_acquired = false;
-	int				num_attempts = 0;
-	LOCKTAG			locktag;
+	bool		report_memory_error = false;
+	bool		lock_acquired = false;
+	int			num_attempts = 0;
+	LOCKTAG		locktag;
 
 	SET_LOCKTAG_RELATION(locktag, dbOid, relOid);
 
 	/*
-	 * If blowing away everybody with conflicting locks doesn't work,
-	 * after the first two attempts then we just start blowing everybody
-	 * away until it does work. We do this because its likely that we
-	 * either have too many locks and we just can't get one at all,
-	 * or that there are many people crowding for the same table.
-	 * Recovery must win; the end justifies the means.
+	 * If blowing away everybody with conflicting locks doesn't work, after
+	 * the first two attempts then we just start blowing everybody away until
+	 * it does work. We do this because its likely that we either have too
+	 * many locks and we just can't get one at all, or that there are many
+	 * people crowding for the same table. Recovery must win; the end
+	 * justifies the means.
 	 */
 	while (!lock_acquired)
 	{
@@ -339,10 +338,10 @@ ResolveRecoveryConflictWithLock(Oid dbOid, Oid relOid)
 		}
 
 		ResolveRecoveryConflictWithVirtualXIDs(backends,
-											   PROCSIG_RECOVERY_CONFLICT_LOCK);
+											 PROCSIG_RECOVERY_CONFLICT_LOCK);
 
 		if (LockAcquireExtended(&locktag, AccessExclusiveLock, true, true, false)
-											!= LOCKACQUIRE_NOT_AVAIL)
+			!= LOCKACQUIRE_NOT_AVAIL)
 			lock_acquired = true;
 	}
 }
@@ -372,14 +371,14 @@ ResolveRecoveryConflictWithLock(Oid dbOid, Oid relOid)
 void
 ResolveRecoveryConflictWithBufferPin(void)
 {
-	bool	sig_alarm_enabled = false;
+	bool		sig_alarm_enabled = false;
 
 	Assert(InHotStandby);
 
 	if (MaxStandbyDelay == 0)
 	{
 		/*
-		 * We don't want to wait, so just tell everybody holding the pin to 
+		 * We don't want to wait, so just tell everybody holding the pin to
 		 * get out of town.
 		 */
 		SendRecoveryConflictWithBufferPin(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN);
@@ -387,17 +386,17 @@ ResolveRecoveryConflictWithBufferPin(void)
 	else if (MaxStandbyDelay == -1)
 	{
 		/*
-		 * Send out a request to check for buffer pin deadlocks before we wait.
-		 * This is fairly cheap, so no need to wait for deadlock timeout before
-		 * trying to send it out.
+		 * Send out a request to check for buffer pin deadlocks before we
+		 * wait. This is fairly cheap, so no need to wait for deadlock timeout
+		 * before trying to send it out.
 		 */
 		SendRecoveryConflictWithBufferPin(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
 	}
 	else
 	{
 		TimestampTz now;
-		long	standby_delay_secs;		/* How far Startup process is lagging */
-		int		standby_delay_usecs;
+		long		standby_delay_secs; /* How far Startup process is lagging */
+		int			standby_delay_usecs;
 
 		now = GetCurrentTimestamp();
 
@@ -414,14 +413,15 @@ ResolveRecoveryConflictWithBufferPin(void)
 		}
 		else
 		{
-			TimestampTz fin_time;			/* Expected wake-up time by timer */
-			long	timer_delay_secs;		/* Amount of time we set timer for */
-			int		timer_delay_usecs = 0;
+			TimestampTz fin_time;		/* Expected wake-up time by timer */
+			long		timer_delay_secs;		/* Amount of time we set timer
+												 * for */
+			int			timer_delay_usecs = 0;
 
 			/*
-			 * Send out a request to check for buffer pin deadlocks before we wait.
-			 * This is fairly cheap, so no need to wait for deadlock timeout before
-			 * trying to send it out.
+			 * Send out a request to check for buffer pin deadlocks before we
+			 * wait. This is fairly cheap, so no need to wait for deadlock
+			 * timeout before trying to send it out.
 			 */
 			SendRecoveryConflictWithBufferPin(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
 
@@ -446,8 +446,8 @@ ResolveRecoveryConflictWithBufferPin(void)
 			 * When is the finish time? We recheck this if we are woken early.
 			 */
 			fin_time = TimestampTzPlusMilliseconds(now,
-													(timer_delay_secs * 1000) +
-													(timer_delay_usecs / 1000));
+												   (timer_delay_secs * 1000) +
+												 (timer_delay_usecs / 1000));
 
 			if (enable_standby_sig_alarm(timer_delay_secs, timer_delay_usecs, fin_time))
 				sig_alarm_enabled = true;
@@ -473,10 +473,10 @@ SendRecoveryConflictWithBufferPin(ProcSignalReason reason)
 		   reason == PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
 
 	/*
-	 * We send signal to all backends to ask them if they are holding
-	 * the buffer pin which is delaying the Startup process. We must
-	 * not set the conflict flag yet, since most backends will be innocent.
-	 * Let the SIGUSR1 handling in each backend decide their own fate.
+	 * We send signal to all backends to ask them if they are holding the
+	 * buffer pin which is delaying the Startup process. We must not set the
+	 * conflict flag yet, since most backends will be innocent. Let the
+	 * SIGUSR1 handling in each backend decide their own fate.
 	 */
 	CancelDBBackends(InvalidOid, reason, false);
 }
@@ -503,15 +503,15 @@ CheckRecoveryConflictDeadlock(LWLockId partitionLock)
 
 	/*
 	 * Error message should match ProcessInterrupts() but we avoid calling
-	 * that because we aren't handling an interrupt at this point. Note
-	 * that we only cancel the current transaction here, so if we are in a
+	 * that because we aren't handling an interrupt at this point. Note that
+	 * we only cancel the current transaction here, so if we are in a
 	 * subtransaction and the pin is held by a parent, then the Startup
 	 * process will continue to wait even though we have avoided deadlock.
 	 */
 	ereport(ERROR,
 			(errcode(ERRCODE_QUERY_CANCELED),
 			 errmsg("canceling statement due to conflict with recovery"),
-			 errdetail("User transaction caused buffer deadlock with recovery.")));
+	   errdetail("User transaction caused buffer deadlock with recovery.")));
 }
 
 /*
@@ -543,8 +543,8 @@ CheckRecoveryConflictDeadlock(LWLockId partitionLock)
 void
 StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
 {
-	xl_standby_lock	*newlock;
-	LOCKTAG			locktag;
+	xl_standby_lock *newlock;
+	LOCKTAG		locktag;
 
 	/* Already processed? */
 	if (TransactionIdDidCommit(xid) || TransactionIdDidAbort(xid))
@@ -568,7 +568,7 @@ StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
 	SET_LOCKTAG_RELATION(locktag, newlock->dbOid, newlock->relOid);
 
 	if (LockAcquireExtended(&locktag, AccessExclusiveLock, true, true, false)
-											== LOCKACQUIRE_NOT_AVAIL)
+		== LOCKACQUIRE_NOT_AVAIL)
 		ResolveRecoveryConflictWithLock(newlock->dbOid, newlock->relOid);
 }
 
@@ -586,6 +586,7 @@ StandbyReleaseLocks(TransactionId xid)
 	for (cell = list_head(RecoveryLockList); cell; cell = next)
 	{
 		xl_standby_lock *lock = (xl_standby_lock *) lfirst(cell);
+
 		next = lnext(cell);
 
 		if (!TransactionIdIsValid(xid) || lock->xid == xid)
@@ -619,7 +620,7 @@ StandbyReleaseLocks(TransactionId xid)
 void
 StandbyReleaseLockTree(TransactionId xid, int nsubxids, TransactionId *subxids)
 {
-	int i;
+	int			i;
 
 	StandbyReleaseLocks(xid);
 
@@ -647,6 +648,7 @@ StandbyReleaseLocksMany(TransactionId removeXid, bool keepPreparedXacts)
 	for (cell = list_head(RecoveryLockList); cell; cell = next)
 	{
 		xl_standby_lock *lock = (xl_standby_lock *) lfirst(cell);
+
 		next = lnext(cell);
 
 		if (!TransactionIdIsValid(removeXid) || TransactionIdPrecedes(lock->xid, removeXid))
@@ -692,7 +694,7 @@ StandbyReleaseOldLocks(TransactionId removeXid)
 
 /*
  * --------------------------------------------------------------------
- * 		Recovery handling for Rmgr RM_STANDBY_ID
+ *		Recovery handling for Rmgr RM_STANDBY_ID
  *
  * These record types will only be created if XLogStandbyInfoActive()
  * --------------------------------------------------------------------
@@ -710,7 +712,7 @@ standby_redo(XLogRecPtr lsn, XLogRecord *record)
 	if (info == XLOG_STANDBY_LOCK)
 	{
 		xl_standby_locks *xlrec = (xl_standby_locks *) XLogRecGetData(record);
-		int i;
+		int			i;
 
 		for (i = 0; i < xlrec->nlocks; i++)
 			StandbyAcquireAccessExclusiveLock(xlrec->locks[i].xid,
@@ -761,7 +763,7 @@ standby_desc(StringInfo buf, uint8 xl_info, char *rec)
 	if (info == XLOG_STANDBY_LOCK)
 	{
 		xl_standby_locks *xlrec = (xl_standby_locks *) rec;
-		int i;
+		int			i;
 
 		appendStringInfo(buf, "AccessExclusive locks:");
 
@@ -790,7 +792,7 @@ LogStandbySnapshot(TransactionId *oldestActiveXid, TransactionId *nextXid)
 {
 	RunningTransactions running;
 	xl_standby_lock *locks;
-	int nlocks;
+	int			nlocks;
 
 	Assert(XLogStandbyInfoActive());
 
@@ -823,9 +825,9 @@ LogStandbySnapshot(TransactionId *oldestActiveXid, TransactionId *nextXid)
 static void
 LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
 {
-	xl_running_xacts	xlrec;
-	XLogRecData 			rdata[2];
-	int						lastrdata = 0;
+	xl_running_xacts xlrec;
+	XLogRecData rdata[2];
+	int			lastrdata = 0;
 	XLogRecPtr	recptr;
 
 	xlrec.xcnt = CurrRunningXacts->xcnt;
@@ -876,8 +878,8 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
 static void
 LogAccessExclusiveLocks(int nlocks, xl_standby_lock *locks)
 {
-	XLogRecData		rdata[2];
-	xl_standby_locks	xlrec;
+	XLogRecData rdata[2];
+	xl_standby_locks xlrec;
 
 	xlrec.nlocks = nlocks;
 
@@ -900,22 +902,22 @@ LogAccessExclusiveLocks(int nlocks, xl_standby_lock *locks)
 void
 LogAccessExclusiveLock(Oid dbOid, Oid relOid)
 {
-	xl_standby_lock		xlrec;
+	xl_standby_lock xlrec;
 
 	/*
-	 * Ensure that a TransactionId has been assigned to this transaction.
-	 * We don't actually need the xid yet but if we don't do this then
+	 * Ensure that a TransactionId has been assigned to this transaction. We
+	 * don't actually need the xid yet but if we don't do this then
 	 * RecordTransactionCommit() and RecordTransactionAbort() will optimise
 	 * away the transaction completion record which recovery relies upon to
-	 * release locks. It's a hack, but for a corner case not worth adding
-	 * code for into the main commit path.
+	 * release locks. It's a hack, but for a corner case not worth adding code
+	 * for into the main commit path.
 	 */
 	xlrec.xid = GetTopTransactionId();
 
 	/*
-	 * Decode the locktag back to the original values, to avoid
-	 * sending lots of empty bytes with every message.  See
-	 * lock.h to check how a locktag is defined for LOCKTAG_RELATION
+	 * Decode the locktag back to the original values, to avoid sending lots
+	 * of empty bytes with every message.  See lock.h to check how a locktag
+	 * is defined for LOCKTAG_RELATION
 	 */
 	xlrec.dbOid = dbOid;
 	xlrec.relOid = relOid;
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index e80d6de77706d09a2acee28f59717744330fe6b8..36da56da7460aaffded1894df6e17960ee9c5c0f 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -24,7 +24,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.140 2010/01/02 16:57:51 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.141 2010/02/26 02:01:00 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -145,8 +145,8 @@ static bool
 myLargeObjectExists(Oid loid, Snapshot snapshot)
 {
 	Relation	pg_lo_meta;
-	ScanKeyData	skey[1];
-	SysScanDesc	sd;
+	ScanKeyData skey[1];
+	SysScanDesc sd;
 	HeapTuple	tuple;
 	bool		retval = false;
 
@@ -210,14 +210,14 @@ inv_create(Oid lobjId)
 	 * dependency on the owner of largeobject
 	 *
 	 * The reason why we use LargeObjectRelationId instead of
-	 * LargeObjectMetadataRelationId here is to provide backward
-	 * compatibility to the applications which utilize a knowledge
-	 * about internal layout of system catalogs.
-	 * OID of pg_largeobject_metadata and loid of pg_largeobject
-	 * are same value, so there are no actual differences here.
+	 * LargeObjectMetadataRelationId here is to provide backward compatibility
+	 * to the applications which utilize a knowledge about internal layout of
+	 * system catalogs. OID of pg_largeobject_metadata and loid of
+	 * pg_largeobject are same value, so there are no actual differences here.
 	 */
 	recordDependencyOnOwner(LargeObjectRelationId,
 							lobjId_new, GetUserId());
+
 	/*
 	 * Advance command counter to make new tuple visible to later operations.
 	 */
@@ -298,7 +298,7 @@ inv_close(LargeObjectDesc *obj_desc)
 int
 inv_drop(Oid lobjId)
 {
-	ObjectAddress	object;
+	ObjectAddress object;
 
 	/*
 	 * Delete any comments and dependencies on the large object
@@ -554,7 +554,7 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes)
 	if (!LargeObjectExists(obj_desc->id))
 		ereport(ERROR,
 				(errcode(ERRCODE_UNDEFINED_OBJECT),
-				 errmsg("large object %u was already dropped", obj_desc->id)));
+			   errmsg("large object %u was already dropped", obj_desc->id)));
 
 	if (nbytes <= 0)
 		return 0;
@@ -751,7 +751,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int len)
 	if (!LargeObjectExists(obj_desc->id))
 		ereport(ERROR,
 				(errcode(ERRCODE_UNDEFINED_OBJECT),
-				 errmsg("large object %u was already dropped", obj_desc->id)));
+			   errmsg("large object %u was already dropped", obj_desc->id)));
 
 	open_lo_relation();
 
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index c59355cfc2d560f8619049c3132de9cf1ed506de..75fd29f520053e8e1dae76dede9956f26522d7bf 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.194 2010/01/31 19:01:11 sriggs Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.195 2010/02/26 02:01:00 momjian Exp $
  *
  * NOTES
  *	  A lock table is a shared memory hash table.  When
@@ -484,10 +484,10 @@ LockAcquire(const LOCKTAG *locktag,
  */
 LockAcquireResult
 LockAcquireExtended(const LOCKTAG *locktag,
-			LOCKMODE lockmode,
-			bool sessionLock,
-			bool dontWait,
-			bool reportMemoryError)
+					LOCKMODE lockmode,
+					bool sessionLock,
+					bool dontWait,
+					bool reportMemoryError)
 {
 	LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
 	LockMethod	lockMethodTable;
@@ -512,12 +512,12 @@ LockAcquireExtended(const LOCKTAG *locktag,
 
 	if (RecoveryInProgress() && !InRecovery &&
 		(locktag->locktag_type == LOCKTAG_OBJECT ||
-		 locktag->locktag_type == LOCKTAG_RELATION ) &&
+		 locktag->locktag_type == LOCKTAG_RELATION) &&
 		lockmode > RowExclusiveLock)
 		ereport(ERROR,
 				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
 				 errmsg("cannot acquire lockmode %s on database objects while recovery is in progress",
-									lockMethodTable->lockModeNames[lockmode]),
+						lockMethodTable->lockModeNames[lockmode]),
 				 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
 
 #ifdef LOCK_DEBUG
@@ -612,7 +612,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
 			ereport(ERROR,
 					(errcode(ERRCODE_OUT_OF_MEMORY),
 					 errmsg("out of shared memory"),
-				  errhint("You might need to increase max_locks_per_transaction.")));
+					 errhint("You might need to increase max_locks_per_transaction.")));
 		else
 			return LOCKACQUIRE_NOT_AVAIL;
 	}
@@ -681,7 +681,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
 			ereport(ERROR,
 					(errcode(ERRCODE_OUT_OF_MEMORY),
 					 errmsg("out of shared memory"),
-				  errhint("You might need to increase max_locks_per_transaction.")));
+					 errhint("You might need to increase max_locks_per_transaction.")));
 		else
 			return LOCKACQUIRE_NOT_AVAIL;
 	}
@@ -871,9 +871,9 @@ LockAcquireExtended(const LOCKTAG *locktag,
 	LWLockRelease(partitionLock);
 
 	/*
-	 * Emit a WAL record if acquisition of this lock need to be replayed in
-	 * a standby server. Only AccessExclusiveLocks can conflict with lock
-	 * types that read-only transactions can acquire in a standby server.
+	 * Emit a WAL record if acquisition of this lock need to be replayed in a
+	 * standby server. Only AccessExclusiveLocks can conflict with lock types
+	 * that read-only transactions can acquire in a standby server.
 	 *
 	 * Make sure this definition matches the one GetRunningTransactionLocks().
 	 */
@@ -883,9 +883,9 @@ LockAcquireExtended(const LOCKTAG *locktag,
 		XLogStandbyInfoActive())
 	{
 		/*
-		 * Decode the locktag back to the original values, to avoid
-		 * sending lots of empty bytes with every message.  See
-		 * lock.h to check how a locktag is defined for LOCKTAG_RELATION
+		 * Decode the locktag back to the original values, to avoid sending
+		 * lots of empty bytes with every message.	See lock.h to check how a
+		 * locktag is defined for LOCKTAG_RELATION
 		 */
 		LogAccessExclusiveLock(locktag->locktag_field1,
 							   locktag->locktag_field2);
@@ -1824,7 +1824,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
 		if (vxids == NULL)
 			vxids = (VirtualTransactionId *)
 				MemoryContextAlloc(TopMemoryContext,
-					sizeof(VirtualTransactionId) * (MaxBackends + 1));
+						   sizeof(VirtualTransactionId) * (MaxBackends + 1));
 	}
 	else
 		vxids = (VirtualTransactionId *)
@@ -2275,7 +2275,7 @@ GetRunningTransactionLocks(int *nlocks)
 	PROCLOCK   *proclock;
 	HASH_SEQ_STATUS seqstat;
 	int			i;
-	int 		index;
+	int			index;
 	int			els;
 	xl_standby_lock *accessExclusiveLocks;
 
@@ -2300,11 +2300,11 @@ GetRunningTransactionLocks(int *nlocks)
 	accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
 
 	/*
-	 * If lock is a currently granted AccessExclusiveLock then
-	 * it will have just one proclock holder, so locks are never
-	 * accessed twice in this particular case. Don't copy this code
-	 * for use elsewhere because in the general case this will
-	 * give you duplicate locks when looking at non-exclusive lock types.
+	 * If lock is a currently granted AccessExclusiveLock then it will have
+	 * just one proclock holder, so locks are never accessed twice in this
+	 * particular case. Don't copy this code for use elsewhere because in the
+	 * general case this will give you duplicate locks when looking at
+	 * non-exclusive lock types.
 	 */
 	index = 0;
 	while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
@@ -2313,11 +2313,11 @@ GetRunningTransactionLocks(int *nlocks)
 		if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
 			proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
 		{
-			PGPROC	*proc = proclock->tag.myProc;
-			LOCK	*lock = proclock->tag.myLock;
+			PGPROC	   *proc = proclock->tag.myProc;
+			LOCK	   *lock = proclock->tag.myLock;
 
-			accessExclusiveLocks[index].xid 	= proc->xid;
-			accessExclusiveLocks[index].dbOid  = lock->tag.locktag_field1;
+			accessExclusiveLocks[index].xid = proc->xid;
+			accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
 			accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
 
 			index++;
@@ -2605,8 +2605,8 @@ lock_twophase_recover(TransactionId xid, uint16 info,
 			 lock->tag.locktag_field3);
 
 	/*
-	 * We ignore any possible conflicts and just grant ourselves the lock.
-	 * Not only because we don't bother, but also to avoid deadlocks when
+	 * We ignore any possible conflicts and just grant ourselves the lock. Not
+	 * only because we don't bother, but also to avoid deadlocks when
 	 * switching from standby to normal mode. See function comment.
 	 */
 	GrantLock(lock, proclock, lockmode);
@@ -2639,8 +2639,8 @@ lock_twophase_standby_recover(TransactionId xid, uint16 info,
 		locktag->locktag_type == LOCKTAG_RELATION)
 	{
 		StandbyAcquireAccessExclusiveLock(xid,
-										  locktag->locktag_field1 /* dboid */,
-										  locktag->locktag_field2 /* reloid */);
+										locktag->locktag_field1 /* dboid */ ,
+									  locktag->locktag_field2 /* reloid */ );
 	}
 }
 
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 1e103743b2e392b80b25d323efd44f042130fa57..80775a4061f9ca9eaaffc80720f4019229ee5426 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.216 2010/02/13 01:32:19 sriggs Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.217 2010/02/26 02:01:01 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -519,7 +519,7 @@ SetStartupBufferPinWaitBufId(int bufid)
 int
 GetStartupBufferPinWaitBufId(void)
 {
-	int bufid;
+	int			bufid;
 
 	/* use volatile pointer to prevent code rearrangement */
 	volatile PROC_HDR *procglobal = ProcGlobal;
@@ -702,8 +702,8 @@ ProcKill(int code, Datum arg)
 
 	/*
 	 * This process is no longer present in shared memory in any meaningful
-	 * way, so tell the postmaster we've cleaned up acceptably well.
-	 * (XXX autovac launcher should be included here someday)
+	 * way, so tell the postmaster we've cleaned up acceptably well. (XXX
+	 * autovac launcher should be included here someday)
 	 */
 	if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
 		MarkPostmasterChildInactive();
@@ -1376,11 +1376,11 @@ ProcSendSignal(int pid)
 
 		/*
 		 * Check to see whether it is the Startup process we wish to signal.
-		 * This call is made by the buffer manager when it wishes to wake
-		 * up a process that has been waiting for a pin in so it can obtain a
+		 * This call is made by the buffer manager when it wishes to wake up a
+		 * process that has been waiting for a pin in so it can obtain a
 		 * cleanup lock using LockBufferForCleanup(). Startup is not a normal
-		 * backend, so BackendPidGetProc() will not return any pid at all.
-		 * So we remember the information for this special case.
+		 * backend, so BackendPidGetProc() will not return any pid at all. So
+		 * we remember the information for this special case.
 		 */
 		if (pid == procglobal->startupProcPid)
 			proc = procglobal->startupProc;
@@ -1713,7 +1713,7 @@ CheckStandbyTimeout(void)
 void
 handle_standby_sig_alarm(SIGNAL_ARGS)
 {
-	int save_errno = errno;
+	int			save_errno = errno;
 
 	if (standby_timeout_active)
 		(void) CheckStandbyTimeout();
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 2fdbe31b3bc7c5a537231abbbbfa3d7ea4f6158d..eb5c73d6f8d34f878357a4e3613d6c9e4a0788e9 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.150 2010/01/02 16:57:52 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.151 2010/02/26 02:01:01 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -161,7 +161,7 @@ static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
 static void register_unlink(RelFileNode rnode);
 static MdfdVec *_fdvec_alloc(void);
 static char *_mdfd_segpath(SMgrRelation reln, ForkNumber forknum,
-						   BlockNumber segno);
+			  BlockNumber segno);
 static MdfdVec *_mdfd_openseg(SMgrRelation reln, ForkNumber forkno,
 			  BlockNumber segno, int oflags);
 static MdfdVec *_mdfd_getseg(SMgrRelation reln, ForkNumber forkno,
@@ -392,7 +392,7 @@ mdunlink(RelFileNode rnode, ForkNumber forkNum, bool isRedo)
 				if (errno != ENOENT)
 					ereport(WARNING,
 							(errcode_for_file_access(),
-					 errmsg("could not remove file \"%s\": %m", segpath)));
+					   errmsg("could not remove file \"%s\": %m", segpath)));
 				break;
 			}
 		}
@@ -1080,12 +1080,12 @@ mdsync(void)
 					failures > 0)
 					ereport(ERROR,
 							(errcode_for_file_access(),
-							 errmsg("could not fsync file \"%s\": %m", path)));
+						   errmsg("could not fsync file \"%s\": %m", path)));
 				else
 					ereport(DEBUG1,
 							(errcode_for_file_access(),
-							 errmsg("could not fsync file \"%s\" but retrying: %m",
-									path)));
+					   errmsg("could not fsync file \"%s\" but retrying: %m",
+							  path)));
 				pfree(path);
 
 				/*
@@ -1465,8 +1465,8 @@ _fdvec_alloc(void)
 static char *
 _mdfd_segpath(SMgrRelation reln, ForkNumber forknum, BlockNumber segno)
 {
-	char   *path,
-		   *fullpath;
+	char	   *path,
+			   *fullpath;
 
 	path = relpath(reln->smgr_rnode, forknum);
 
@@ -1583,9 +1583,9 @@ _mdfd_getseg(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno,
 					return NULL;
 				ereport(ERROR,
 						(errcode_for_file_access(),
-						 errmsg("could not open file \"%s\" (target block %u): %m",
-								_mdfd_segpath(reln, forknum, nextsegno),
-								blkno)));
+				   errmsg("could not open file \"%s\" (target block %u): %m",
+						  _mdfd_segpath(reln, forknum, nextsegno),
+						  blkno)));
 			}
 		}
 		v = v->mdfd_chain;
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 87ae9dbe4b1e60d2d371a56d8109411d4ddd4610..7a35b0a833350b74bd33e5603f2070e7563c7ec0 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -11,7 +11,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.120 2010/02/09 21:43:30 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.121 2010/02/26 02:01:01 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -356,11 +356,11 @@ smgr_internal_unlink(RelFileNode rnode, ForkNumber forknum,
 
 	/*
 	 * Send a shared-inval message to force other backends to close any
-	 * dangling smgr references they may have for this rel.  We should do
-	 * this before starting the actual unlinking, in case we fail partway
-	 * through that step.  Note that the sinval message will eventually come
-	 * back to this backend, too, and thereby provide a backstop that we
-	 * closed our own smgr rel.
+	 * dangling smgr references they may have for this rel.  We should do this
+	 * before starting the actual unlinking, in case we fail partway through
+	 * that step.  Note that the sinval message will eventually come back to
+	 * this backend, too, and thereby provide a backstop that we closed our
+	 * own smgr rel.
 	 */
 	CacheInvalidateSmgr(rnode);
 
@@ -468,11 +468,11 @@ smgrtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks,
 	 * Send a shared-inval message to force other backends to close any smgr
 	 * references they may have for this rel.  This is useful because they
 	 * might have open file pointers to segments that got removed, and/or
-	 * smgr_targblock variables pointing past the new rel end.  (The inval
+	 * smgr_targblock variables pointing past the new rel end.	(The inval
 	 * message will come back to our backend, too, causing a
 	 * probably-unnecessary local smgr flush.  But we don't expect that this
-	 * is a performance-critical path.)  As in the unlink code, we want to
-	 * be sure the message is sent before we start changing things on-disk.
+	 * is a performance-critical path.)  As in the unlink code, we want to be
+	 * sure the message is sent before we start changing things on-disk.
 	 */
 	CacheInvalidateSmgr(reln->smgr_rnode);
 
diff --git a/src/backend/tcop/dest.c b/src/backend/tcop/dest.c
index 4488f128e6a8ca3482e25693b6b3bd34c27ada71..937e1cfed3bc9d685a216e1626fb38e38edb290e 100644
--- a/src/backend/tcop/dest.c
+++ b/src/backend/tcop/dest.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/tcop/dest.c,v 1.77 2010/01/30 20:09:53 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/tcop/dest.c,v 1.78 2010/02/26 02:01:01 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -142,9 +142,10 @@ EndCommand(const char *commandTag, CommandDest dest)
 	{
 		case DestRemote:
 		case DestRemoteExecute:
+
 			/*
-			 * We assume the commandTag is plain ASCII and therefore
-			 * requires no encoding conversion.
+			 * We assume the commandTag is plain ASCII and therefore requires
+			 * no encoding conversion.
 			 */
 			pq_putmessage('C', commandTag, strlen(commandTag) + 1);
 			break;
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 2ae15d5ce029714188dd4a9411aadb74f65f6b65..46756e7169606a05c305bb2ad9f7ba15d51d7b8f 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.590 2010/02/16 22:34:50 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.591 2010/02/26 02:01:01 momjian Exp $
  *
  * NOTES
  *	  this is the "main" module of the postgres backend and
@@ -158,9 +158,9 @@ static MemoryContext unnamed_stmt_context = NULL;
 
 
 /* assorted command-line switches */
-static const char *userDoption = NULL;		/* -D switch */
+static const char *userDoption = NULL;	/* -D switch */
 
-static bool EchoQuery = false;				/* -E switch */
+static bool EchoQuery = false;	/* -E switch */
 
 /*
  * people who want to use EOF should #define DONTUSENEWLINE in
@@ -174,7 +174,7 @@ static int	UseNewLine = 0;		/* Use EOF as query delimiters */
 
 /* whether or not, and why, we were cancelled by conflict with recovery */
 static bool RecoveryConflictPending = false;
-static ProcSignalReason	RecoveryConflictReason;
+static ProcSignalReason RecoveryConflictReason;
 
 /* ----------------------------------------------------------------
  *		decls for routines only used in this file
@@ -188,8 +188,8 @@ static List *pg_rewrite_query(Query *query);
 static bool check_log_statement(List *stmt_list);
 static int	errdetail_execute(List *raw_parsetree_list);
 static int	errdetail_params(ParamListInfo params);
-static int  errdetail_abort(void);
-static int  errdetail_recovery_conflict(void);
+static int	errdetail_abort(void);
+static int	errdetail_recovery_conflict(void);
 static void start_xact_command(void);
 static void finish_xact_command(void);
 static bool IsTransactionExitStmt(Node *parsetree);
@@ -646,7 +646,7 @@ pg_analyze_and_rewrite_params(Node *parsetree,
 	Query	   *query;
 	List	   *querytree_list;
 
-	Assert(query_string != NULL); /* required as of 8.4 */
+	Assert(query_string != NULL);		/* required as of 8.4 */
 
 	TRACE_POSTGRESQL_QUERY_REWRITE_START(query_string);
 
@@ -948,7 +948,7 @@ exec_simple_query(const char *query_string)
 			ereport(ERROR,
 					(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
 					 errmsg("current transaction is aborted, "
-							"commands ignored until end of transaction block"),
+						  "commands ignored until end of transaction block"),
 					 errdetail_abort()));
 
 		/* Make sure we are in a transaction command */
@@ -1258,7 +1258,7 @@ exec_parse_message(const char *query_string,	/* string to execute */
 			ereport(ERROR,
 					(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
 					 errmsg("current transaction is aborted, "
-							"commands ignored until end of transaction block"),
+						  "commands ignored until end of transaction block"),
 					 errdetail_abort()));
 
 		/*
@@ -2267,26 +2267,26 @@ errdetail_recovery_conflict(void)
 	switch (RecoveryConflictReason)
 	{
 		case PROCSIG_RECOVERY_CONFLICT_BUFFERPIN:
-				errdetail("User was holding shared buffer pin for too long.");
-				break;
+			errdetail("User was holding shared buffer pin for too long.");
+			break;
 		case PROCSIG_RECOVERY_CONFLICT_LOCK:
-				errdetail("User was holding a relation lock for too long.");
-				break;
+			errdetail("User was holding a relation lock for too long.");
+			break;
 		case PROCSIG_RECOVERY_CONFLICT_TABLESPACE:
-				errdetail("User was or may have been using tablespace that must be dropped.");
-				break;
+			errdetail("User was or may have been using tablespace that must be dropped.");
+			break;
 		case PROCSIG_RECOVERY_CONFLICT_SNAPSHOT:
-				errdetail("User query might have needed to see row versions that must be removed.");
-				break;
+			errdetail("User query might have needed to see row versions that must be removed.");
+			break;
 		case PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK:
-				errdetail("User transaction caused buffer deadlock with recovery.");
-				break;
+			errdetail("User transaction caused buffer deadlock with recovery.");
+			break;
 		case PROCSIG_RECOVERY_CONFLICT_DATABASE:
-				errdetail("User was connected to a database that must be dropped.");
-				break;
+			errdetail("User was connected to a database that must be dropped.");
+			break;
 		default:
-				break;
-				/* no errdetail */
+			break;
+			/* no errdetail */
 	}
 
 	return 0;
@@ -2598,14 +2598,14 @@ drop_unnamed_stmt(void)
 void
 quickdie(SIGNAL_ARGS)
 {
-	sigaddset(&BlockSig, SIGQUIT); /* prevent nested calls */
+	sigaddset(&BlockSig, SIGQUIT);		/* prevent nested calls */
 	PG_SETMASK(&BlockSig);
 
 	/*
 	 * If we're aborting out of client auth, don't risk trying to send
-	 * anything to the client; we will likely violate the protocol,
-	 * not to mention that we may have interrupted the guts of OpenSSL
-	 * or some authentication library.
+	 * anything to the client; we will likely violate the protocol, not to
+	 * mention that we may have interrupted the guts of OpenSSL or some
+	 * authentication library.
 	 */
 	if (ClientAuthInProgress && whereToSendOutput == DestRemote)
 		whereToSendOutput = DestNone;
@@ -2747,88 +2747,91 @@ SigHupHandler(SIGNAL_ARGS)
 void
 RecoveryConflictInterrupt(ProcSignalReason reason)
 {
-	int                     save_errno = errno;
+	int			save_errno = errno;
 
 	/*
-	* Don't joggle the elbow of proc_exit
-	*/
+	 * Don't joggle the elbow of proc_exit
+	 */
 	if (!proc_exit_inprogress)
 	{
 		RecoveryConflictReason = reason;
 		switch (reason)
 		{
 			case PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK:
-					/*
-					 * If we aren't waiting for a lock we can never deadlock.
-					 */
-					if (!IsWaitingForLock())
-						return;
 
-					/* Intentional drop through to check wait for pin */
+				/*
+				 * If we aren't waiting for a lock we can never deadlock.
+				 */
+				if (!IsWaitingForLock())
+					return;
+
+				/* Intentional drop through to check wait for pin */
 
 			case PROCSIG_RECOVERY_CONFLICT_BUFFERPIN:
-					/*
-					 * If we aren't blocking the Startup process there is
-					 * nothing more to do.
-					 */
-					if (!HoldingBufferPinThatDelaysRecovery())
-						return;
 
-					MyProc->recoveryConflictPending = true;
+				/*
+				 * If we aren't blocking the Startup process there is nothing
+				 * more to do.
+				 */
+				if (!HoldingBufferPinThatDelaysRecovery())
+					return;
+
+				MyProc->recoveryConflictPending = true;
 
-					/* Intentional drop through to error handling */
+				/* Intentional drop through to error handling */
 
 			case PROCSIG_RECOVERY_CONFLICT_LOCK:
 			case PROCSIG_RECOVERY_CONFLICT_TABLESPACE:
 			case PROCSIG_RECOVERY_CONFLICT_SNAPSHOT:
-					/*
-					 * If we aren't in a transaction any longer then ignore.
-					 */
-					if (!IsTransactionOrTransactionBlock())
-						return;
 
+				/*
+				 * If we aren't in a transaction any longer then ignore.
+				 */
+				if (!IsTransactionOrTransactionBlock())
+					return;
+
+				/*
+				 * If we can abort just the current subtransaction then we are
+				 * OK to throw an ERROR to resolve the conflict. Otherwise
+				 * drop through to the FATAL case.
+				 *
+				 * XXX other times that we can throw just an ERROR *may* be
+				 * PROCSIG_RECOVERY_CONFLICT_LOCK if no locks are held in
+				 * parent transactions
+				 *
+				 * PROCSIG_RECOVERY_CONFLICT_SNAPSHOT if no snapshots are held
+				 * by parent transactions and the transaction is not
+				 * serializable
+				 *
+				 * PROCSIG_RECOVERY_CONFLICT_TABLESPACE if no temp files or
+				 * cursors open in parent transactions
+				 */
+				if (!IsSubTransaction())
+				{
 					/*
-					 * If we can abort just the current subtransaction then we
-					 * are OK to throw an ERROR to resolve the conflict. Otherwise
-					 * drop through to the FATAL case.
-					 *
-					 * XXX other times that we can throw just an ERROR *may* be
-					 *   PROCSIG_RECOVERY_CONFLICT_LOCK
-					 *		if no locks are held in parent transactions
-					 *
-					 *   PROCSIG_RECOVERY_CONFLICT_SNAPSHOT
-					 *		if no snapshots are held by parent transactions
-					 *		and the transaction is not serializable
-					 *
-					 *   PROCSIG_RECOVERY_CONFLICT_TABLESPACE
-					 *		if no temp files or cursors open in parent transactions
+					 * If we already aborted then we no longer need to cancel.
+					 * We do this here since we do not wish to ignore aborted
+					 * subtransactions, which must cause FATAL, currently.
 					 */
-					if (!IsSubTransaction())
-					{
-						/*
-						 * If we already aborted then we no longer need to cancel.
-						 * We do this here since we do not wish to ignore aborted
-						 * subtransactions, which must cause FATAL, currently.
-						 */
-						if (IsAbortedTransactionBlockState())
-							return;
-
-						RecoveryConflictPending = true;
-						QueryCancelPending = true;
-						InterruptPending = true;
-						break;
-					}
-
-					/* Intentional drop through to session cancel */
+					if (IsAbortedTransactionBlockState())
+						return;
 
-			case PROCSIG_RECOVERY_CONFLICT_DATABASE:
 					RecoveryConflictPending = true;
-					ProcDiePending = true;
+					QueryCancelPending = true;
 					InterruptPending = true;
 					break;
+				}
+
+				/* Intentional drop through to session cancel */
+
+			case PROCSIG_RECOVERY_CONFLICT_DATABASE:
+				RecoveryConflictPending = true;
+				ProcDiePending = true;
+				InterruptPending = true;
+				break;
 
 			default:
-					elog(FATAL, "Unknown conflict mode");
+				elog(FATAL, "Unknown conflict mode");
 		}
 
 		Assert(RecoveryConflictPending && (QueryCancelPending || ProcDiePending));
@@ -2885,7 +2888,7 @@ ProcessInterrupts(void)
 		else if (RecoveryConflictPending)
 			ereport(FATAL,
 					(errcode(ERRCODE_ADMIN_SHUTDOWN),
-					 errmsg("terminating connection due to conflict with recovery"),
+			  errmsg("terminating connection due to conflict with recovery"),
 					 errdetail_recovery_conflict()));
 		else
 			ereport(FATAL,
@@ -2897,7 +2900,7 @@ ProcessInterrupts(void)
 		QueryCancelPending = false;
 		if (ClientAuthInProgress)
 		{
-			ImmediateInterruptOK = false;	/* not idle anymore */
+			ImmediateInterruptOK = false;		/* not idle anymore */
 			DisableNotifyInterrupt();
 			DisableCatchupInterrupt();
 			/* As in quickdie, don't risk sending to client during auth */
@@ -2909,7 +2912,7 @@ ProcessInterrupts(void)
 		}
 		if (cancel_from_timeout)
 		{
-			ImmediateInterruptOK = false;	/* not idle anymore */
+			ImmediateInterruptOK = false;		/* not idle anymore */
 			DisableNotifyInterrupt();
 			DisableCatchupInterrupt();
 			ereport(ERROR,
@@ -2918,7 +2921,7 @@ ProcessInterrupts(void)
 		}
 		if (IsAutoVacuumWorkerProcess())
 		{
-			ImmediateInterruptOK = false;	/* not idle anymore */
+			ImmediateInterruptOK = false;		/* not idle anymore */
 			DisableNotifyInterrupt();
 			DisableCatchupInterrupt();
 			ereport(ERROR,
@@ -2927,7 +2930,7 @@ ProcessInterrupts(void)
 		}
 		if (RecoveryConflictPending)
 		{
-			ImmediateInterruptOK = false;	/* not idle anymore */
+			ImmediateInterruptOK = false;		/* not idle anymore */
 			RecoveryConflictPending = false;
 			DisableNotifyInterrupt();
 			DisableCatchupInterrupt();
@@ -2936,23 +2939,23 @@ ProcessInterrupts(void)
 						(errcode(ERRCODE_ADMIN_SHUTDOWN),
 						 errmsg("terminating connection due to conflict with recovery"),
 						 errdetail_recovery_conflict(),
-						 errhint("In a moment you should be able to reconnect to the"
-								 " database and repeat your command.")));
+				 errhint("In a moment you should be able to reconnect to the"
+						 " database and repeat your command.")));
 			else
 				ereport(ERROR,
 						(errcode(ERRCODE_QUERY_CANCELED),
-						 errmsg("canceling statement due to conflict with recovery"),
+				 errmsg("canceling statement due to conflict with recovery"),
 						 errdetail_recovery_conflict()));
 		}
 
 		/*
-		 * If we are reading a command from the client, just ignore the
-		 * cancel request --- sending an extra error message won't
-		 * accomplish anything.  Otherwise, go ahead and throw the error.
+		 * If we are reading a command from the client, just ignore the cancel
+		 * request --- sending an extra error message won't accomplish
+		 * anything.  Otherwise, go ahead and throw the error.
 		 */
 		if (!DoingCommandRead)
 		{
-			ImmediateInterruptOK = false;	/* not idle anymore */
+			ImmediateInterruptOK = false;		/* not idle anymore */
 			DisableNotifyInterrupt();
 			DisableCatchupInterrupt();
 			ereport(ERROR,
@@ -3154,7 +3157,7 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx)
 
 	if (secure)
 	{
-		gucsource = PGC_S_ARGV;			/* switches came from command line */
+		gucsource = PGC_S_ARGV; /* switches came from command line */
 
 		/* Ignore the initial --single argument, if present */
 		if (argc > 1 && strcmp(argv[1], "--single") == 0)
@@ -3285,12 +3288,13 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx)
 				}
 
 			case 'v':
+
 				/*
 				 * -v is no longer used in normal operation, since
-				 * FrontendProtocol is already set before we get here.
-				 * We keep the switch only for possible use in standalone
-				 * operation, in case we ever support using normal FE/BE
-				 * protocol with a standalone backend.
+				 * FrontendProtocol is already set before we get here. We keep
+				 * the switch only for possible use in standalone operation,
+				 * in case we ever support using normal FE/BE protocol with a
+				 * standalone backend.
 				 */
 				if (secure)
 					FrontendProtocol = (ProtocolVersion) atoi(optarg);
@@ -3344,13 +3348,13 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx)
 			ereport(FATAL,
 					(errcode(ERRCODE_SYNTAX_ERROR),
 				 errmsg("invalid command-line arguments for server process"),
-			   errhint("Try \"%s --help\" for more information.", progname)));
+			  errhint("Try \"%s --help\" for more information.", progname)));
 		else
 			ereport(FATAL,
 					(errcode(ERRCODE_SYNTAX_ERROR),
 					 errmsg("%s: invalid command-line arguments",
 							progname),
-			   errhint("Try \"%s --help\" for more information.", progname)));
+			  errhint("Try \"%s --help\" for more information.", progname)));
 	}
 
 	if (argc - optind == 1)
@@ -3443,9 +3447,9 @@ PostgresMain(int argc, char *argv[], const char *username)
 		dbname = username;
 		if (dbname == NULL)
 			ereport(FATAL,
-				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-				 errmsg("%s: no database nor user name specified",
-						progname)));
+					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+					 errmsg("%s: no database nor user name specified",
+							progname)));
 	}
 
 	/* Acquire configuration parameters, unless inherited from postmaster */
@@ -3482,26 +3486,27 @@ PostgresMain(int argc, char *argv[], const char *username)
 		WalSndSignals();
 	else
 	{
-		pqsignal(SIGHUP, SigHupHandler);	/* set flag to read config file */
-		pqsignal(SIGINT, StatementCancelHandler);	/* cancel current query */
-		pqsignal(SIGTERM, die);		/* cancel current query and exit */
+		pqsignal(SIGHUP, SigHupHandler);		/* set flag to read config
+												 * file */
+		pqsignal(SIGINT, StatementCancelHandler);		/* cancel current query */
+		pqsignal(SIGTERM, die); /* cancel current query and exit */
 
 		/*
 		 * In a standalone backend, SIGQUIT can be generated from the keyboard
-		 * easily, while SIGTERM cannot, so we make both signals do die() rather
-		 * than quickdie().
+		 * easily, while SIGTERM cannot, so we make both signals do die()
+		 * rather than quickdie().
 		 */
 		if (IsUnderPostmaster)
-			pqsignal(SIGQUIT, quickdie);	/* hard crash time */
+			pqsignal(SIGQUIT, quickdie);		/* hard crash time */
 		else
-			pqsignal(SIGQUIT, die); /* cancel current query and exit */
-		pqsignal(SIGALRM, handle_sig_alarm);		/* timeout conditions */
+			pqsignal(SIGQUIT, die);		/* cancel current query and exit */
+		pqsignal(SIGALRM, handle_sig_alarm);	/* timeout conditions */
 
 		/*
 		 * Ignore failure to write to frontend. Note: if frontend closes
 		 * connection, we will notice it and exit cleanly when control next
-		 * returns to outer loop.  This seems safer than forcing exit in the midst
-		 * of output during who-knows-what operation...
+		 * returns to outer loop.  This seems safer than forcing exit in the
+		 * midst of output during who-knows-what operation...
 		 */
 		pqsignal(SIGPIPE, SIG_IGN);
 		pqsignal(SIGUSR1, procsignal_sigusr1_handler);
@@ -3509,9 +3514,11 @@ PostgresMain(int argc, char *argv[], const char *username)
 		pqsignal(SIGFPE, FloatExceptionHandler);
 
 		/*
-		 * Reset some signals that are accepted by postmaster but not by backend
+		 * Reset some signals that are accepted by postmaster but not by
+		 * backend
 		 */
-		pqsignal(SIGCHLD, SIG_DFL); /* system() requires this on some platforms */
+		pqsignal(SIGCHLD, SIG_DFL);		/* system() requires this on some
+										 * platforms */
 	}
 
 	pqinitmask();
@@ -3779,7 +3786,7 @@ PostgresMain(int argc, char *argv[], const char *username)
 		 * collector, and to update the PS stats display.  We avoid doing
 		 * those every time through the message loop because it'd slow down
 		 * processing of batched messages, and because we don't want to report
-		 * uncommitted updates (that confuses autovacuum).  The notification
+		 * uncommitted updates (that confuses autovacuum).	The notification
 		 * processor wants a call too, if we are not in a transaction block.
 		 */
 		if (send_ready_for_query)
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 8beb82385a6bf9798f7f8a009aeafdac08eff3d4..d60dc49c3c386e065f83d6c9e21e166115221714 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.136 2010/02/16 20:58:14 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.137 2010/02/26 02:01:02 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -80,7 +80,8 @@ CreateQueryDesc(PlannedStmt *plannedstmt,
 	qd->crosscheck_snapshot = RegisterSnapshot(crosscheck_snapshot);
 	qd->dest = dest;			/* output dest */
 	qd->params = params;		/* parameter values passed into query */
-	qd->instrument_options = instrument_options;	/* instrumentation wanted? */
+	qd->instrument_options = instrument_options;		/* instrumentation
+														 * wanted? */
 
 	/* null these fields until set by ExecutorStart */
 	qd->tupDesc = NULL;
@@ -111,7 +112,7 @@ CreateUtilityQueryDesc(Node *utilitystmt,
 	qd->crosscheck_snapshot = InvalidSnapshot;	/* RI check snapshot */
 	qd->dest = dest;			/* output dest */
 	qd->params = params;		/* parameter values passed into query */
-	qd->instrument_options = false;	/* uninteresting for utilities */
+	qd->instrument_options = false;		/* uninteresting for utilities */
 
 	/* null these fields until set by ExecutorStart */
 	qd->tupDesc = NULL;
@@ -803,7 +804,7 @@ PortalRun(Portal portal, long count, bool isTopLevel,
 				{
 					if (strcmp(portal->commandTag, "SELECT") == 0)
 						snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
-										"SELECT %u", nprocessed);
+								 "SELECT %u", nprocessed);
 					else
 						strcpy(completionTag, portal->commandTag);
 				}
@@ -1316,14 +1317,13 @@ PortalRunMulti(Portal portal, bool isTopLevel,
 	 * If a command completion tag was supplied, use it.  Otherwise use the
 	 * portal's commandTag as the default completion tag.
 	 *
-	 * Exception: Clients expect INSERT/UPDATE/DELETE tags to have
-	 * counts, so fake them with zeros.  This can happen with DO INSTEAD
-	 * rules if there is no replacement query of the same type as the
-	 * original.  We print "0 0" here because technically there is no
-	 * query of the matching tag type, and printing a non-zero count for
-	 * a different query type seems wrong, e.g.  an INSERT that does
-	 * an UPDATE instead should not print "0 1" if one row
-	 * was updated.  See QueryRewrite(), step 3, for details.
+	 * Exception: Clients expect INSERT/UPDATE/DELETE tags to have counts, so
+	 * fake them with zeros.  This can happen with DO INSTEAD rules if there
+	 * is no replacement query of the same type as the original.  We print "0
+	 * 0" here because technically there is no query of the matching tag type,
+	 * and printing a non-zero count for a different query type seems wrong,
+	 * e.g.  an INSERT that does an UPDATE instead should not print "0 1" if
+	 * one row was updated.  See QueryRewrite(), step 3, for details.
 	 */
 	if (completionTag && completionTag[0] == '\0')
 	{
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 6dc5a51cd2a0beed83bb660ac060a6260dd89eb0..33b1aca72dc14e2fa5a4776117b053d7984d3bcd 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -10,7 +10,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.334 2010/02/20 21:24:02 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.335 2010/02/26 02:01:04 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -238,7 +238,7 @@ PreventCommandIfReadOnly(const char *cmdname)
 	if (XactReadOnly)
 		ereport(ERROR,
 				(errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
-				 /* translator: %s is name of a SQL command, eg CREATE */
+		/* translator: %s is name of a SQL command, eg CREATE */
 				 errmsg("cannot execute %s in a read-only transaction",
 						cmdname)));
 }
@@ -247,7 +247,7 @@ PreventCommandIfReadOnly(const char *cmdname)
  * PreventCommandDuringRecovery: throw error if RecoveryInProgress
  *
  * The majority of operations that are unsafe in a Hot Standby slave
- * will be rejected by XactReadOnly tests.  However there are a few
+ * will be rejected by XactReadOnly tests.	However there are a few
  * commands that are allowed in "read-only" xacts but cannot be allowed
  * in Hot Standby mode.  Those commands should call this function.
  */
@@ -257,7 +257,7 @@ PreventCommandDuringRecovery(const char *cmdname)
 	if (RecoveryInProgress())
 		ereport(ERROR,
 				(errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
-				 /* translator: %s is name of a SQL command, eg CREATE */
+		/* translator: %s is name of a SQL command, eg CREATE */
 				 errmsg("cannot execute %s during recovery",
 						cmdname)));
 }
@@ -275,9 +275,9 @@ CheckRestrictedOperation(const char *cmdname)
 	if (InSecurityRestrictedOperation())
 		ereport(ERROR,
 				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-				 /* translator: %s is name of a SQL command, eg PREPARE */
-				 errmsg("cannot execute %s within security-restricted operation",
-						cmdname)));
+		/* translator: %s is name of a SQL command, eg PREPARE */
+			 errmsg("cannot execute %s within security-restricted operation",
+					cmdname)));
 }
 
 
@@ -312,9 +312,9 @@ ProcessUtility(Node *parsetree,
 	Assert(queryString != NULL);	/* required as of 8.4 */
 
 	/*
-	 * We provide a function hook variable that lets loadable plugins
-	 * get control when ProcessUtility is called.  Such a plugin would
-	 * normally call standard_ProcessUtility().
+	 * We provide a function hook variable that lets loadable plugins get
+	 * control when ProcessUtility is called.  Such a plugin would normally
+	 * call standard_ProcessUtility().
 	 */
 	if (ProcessUtility_hook)
 		(*ProcessUtility_hook) (parsetree, queryString, params,
@@ -1126,12 +1126,13 @@ standard_ProcessUtility(Node *parsetree,
 				ereport(ERROR,
 						(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
 						 errmsg("must be superuser to do CHECKPOINT")));
+
 			/*
 			 * You might think we should have a PreventCommandDuringRecovery()
-			 * here, but we interpret a CHECKPOINT command during recovery
-			 * as a request for a restartpoint instead. We allow this since
-			 * it can be a useful way of reducing switchover time when
-			 * using various forms of replication.
+			 * here, but we interpret a CHECKPOINT command during recovery as
+			 * a request for a restartpoint instead. We allow this since it
+			 * can be a useful way of reducing switchover time when using
+			 * various forms of replication.
 			 */
 			RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_WAIT |
 							  (RecoveryInProgress() ? 0 : CHECKPOINT_FORCE));
@@ -2462,13 +2463,13 @@ GetCommandLogLevel(Node *parsetree)
 		case T_ExplainStmt:
 			{
 				ExplainStmt *stmt = (ExplainStmt *) parsetree;
-				bool		 analyze = false;
-				ListCell	*lc;
+				bool		analyze = false;
+				ListCell   *lc;
 
 				/* Look through an EXPLAIN ANALYZE to the contained stmt */
 				foreach(lc, stmt->options)
 				{
-					DefElem *opt = (DefElem *) lfirst(lc);
+					DefElem    *opt = (DefElem *) lfirst(lc);
 
 					if (strcmp(opt->defname, "analyze") == 0)
 						analyze = defGetBoolean(opt);
diff --git a/src/backend/tsearch/dict_synonym.c b/src/backend/tsearch/dict_synonym.c
index e26977bbe1732b3fdd38f68218849dc7f98d7c66..b85fe93bd81f6f63f684b44d282e19142e293bba 100644
--- a/src/backend/tsearch/dict_synonym.c
+++ b/src/backend/tsearch/dict_synonym.c
@@ -7,7 +7,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/tsearch/dict_synonym.c,v 1.12 2010/01/02 16:57:53 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/tsearch/dict_synonym.c,v 1.13 2010/02/26 02:01:05 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -67,7 +67,7 @@ findwrd(char *in, char **end, uint16 *flags)
 		in += pg_mblen(in);
 	}
 
-	if ( in - lastchar == 1 && t_iseq(lastchar, '*') && flags )
+	if (in - lastchar == 1 && t_iseq(lastchar, '*') && flags)
 	{
 		*flags = TSL_PREFIX;
 		*end = lastchar;
@@ -75,7 +75,7 @@ findwrd(char *in, char **end, uint16 *flags)
 	else
 	{
 		if (flags)
-				*flags = 0;
+			*flags = 0;
 		*end = in;
 	}
 
@@ -189,7 +189,7 @@ dsynonym_init(PG_FUNCTION_ARGS)
 		}
 
 		d->syn[cur].outlen = strlen(starto);
-		d->syn[cur].flags = flags; 
+		d->syn[cur].flags = flags;
 
 		cur++;
 
diff --git a/src/backend/tsearch/ts_parse.c b/src/backend/tsearch/ts_parse.c
index d65d239ea1148be1edc2e6a5e2d239337e2d2631..55d740a9f51daa0dec4cacabe3f978f1d9b8e42e 100644
--- a/src/backend/tsearch/ts_parse.c
+++ b/src/backend/tsearch/ts_parse.c
@@ -7,7 +7,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/tsearch/ts_parse.c,v 1.16 2010/01/02 16:57:53 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/tsearch/ts_parse.c,v 1.17 2010/02/26 02:01:05 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -188,7 +188,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
 		{
 			ParsedLex  *curVal = ld->towork.head;
 			char	   *curValLemm = curVal->lemm;
-			int	   		curValLenLemm = curVal->lenlemm;
+			int			curValLenLemm = curVal->lenlemm;
 
 			map = ld->cfg->map + curVal->type;
 
@@ -208,8 +208,8 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
 				res = (TSLexeme *) DatumGetPointer(FunctionCall4(
 															 &(dict->lexize),
 											 PointerGetDatum(dict->dictData),
-											     PointerGetDatum(curValLemm),
-											    Int32GetDatum(curValLenLemm),
+												 PointerGetDatum(curValLemm),
+												Int32GetDatum(curValLenLemm),
 											  PointerGetDatum(&ld->dictState)
 																 ));
 
@@ -231,7 +231,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
 				if (!res)		/* dictionary doesn't know this lexeme */
 					continue;
 
-				if ( res->flags & TSL_FILTER )
+				if (res->flags & TSL_FILTER)
 				{
 					curValLemm = res->lexeme;
 					curValLenLemm = strlen(res->lexeme);
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 8457fc5e480967dc96741faf2277fb2a89365e22..79ca6c13724117c8c24b3411688581a85a11b68f 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/acl.c,v 1.156 2010/02/14 18:42:16 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/acl.c,v 1.157 2010/02/26 02:01:05 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -78,7 +78,7 @@ static Acl *allocacl(int n);
 static void check_acl(const Acl *acl);
 static const char *aclparse(const char *s, AclItem *aip);
 static bool aclitem_match(const AclItem *a1, const AclItem *a2);
-static int aclitemComparator(const void *arg1, const void *arg2);
+static int	aclitemComparator(const void *arg1, const void *arg2);
 static void check_circularity(const Acl *old_acl, const AclItem *mod_aip,
 				  Oid ownerId);
 static Acl *recursive_revoke(Acl *acl, Oid grantee, AclMode revoke_privs,
@@ -470,7 +470,7 @@ aclmerge(const Acl *left_acl, const Acl *right_acl, Oid ownerId)
 
 	for (i = 0; i < num; i++, aip++)
 	{
-		Acl *tmp_acl;
+		Acl		   *tmp_acl;
 
 		tmp_acl = aclupdate(result_acl, aip, ACL_MODECHG_ADD,
 							ownerId, DROP_RESTRICT);
@@ -1669,17 +1669,17 @@ convert_aclright_to_string(int aclright)
  * returns the table
  *
  * {{ OID(joe), 0::OID,   'SELECT', false },
- *  { OID(joe), OID(foo), 'INSERT', true },
- *  { OID(joe), OID(foo), 'UPDATE', false }}
+ *	{ OID(joe), OID(foo), 'INSERT', true },
+ *	{ OID(joe), OID(foo), 'UPDATE', false }}
  *----------
  */
 Datum
 aclexplode(PG_FUNCTION_ARGS)
 {
 	Acl		   *acl = PG_GETARG_ACL_P(0);
-	FuncCallContext	*funcctx;
+	FuncCallContext *funcctx;
 	int		   *idx;
-	AclItem	   *aidat;
+	AclItem    *aidat;
 
 	if (SRF_IS_FIRSTCALL())
 	{
@@ -1692,8 +1692,8 @@ aclexplode(PG_FUNCTION_ARGS)
 		oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
 
 		/*
-		 * build tupdesc for result tuples (matches out parameters in
-		 * pg_proc entry)
+		 * build tupdesc for result tuples (matches out parameters in pg_proc
+		 * entry)
 		 */
 		tupdesc = CreateTemplateTupleDesc(4, false);
 		TupleDescInitEntry(tupdesc, (AttrNumber) 1, "grantor",
@@ -1731,7 +1731,7 @@ aclexplode(PG_FUNCTION_ARGS)
 		{
 			idx[1] = 0;
 			idx[0]++;
-			if (idx[0] >= ACL_NUM(acl))				/* done */
+			if (idx[0] >= ACL_NUM(acl)) /* done */
 				break;
 		}
 		aidata = &aidat[idx[0]];
@@ -2003,8 +2003,8 @@ has_sequence_privilege_name_name(PG_FUNCTION_ARGS)
 	if (get_rel_relkind(sequenceoid) != RELKIND_SEQUENCE)
 		ereport(ERROR,
 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				errmsg("\"%s\" is not a sequence",
-				text_to_cstring(sequencename))));
+				 errmsg("\"%s\" is not a sequence",
+						text_to_cstring(sequencename))));
 
 	aclresult = pg_class_aclcheck(sequenceoid, roleid, mode);
 
@@ -2033,8 +2033,8 @@ has_sequence_privilege_name(PG_FUNCTION_ARGS)
 	if (get_rel_relkind(sequenceoid) != RELKIND_SEQUENCE)
 		ereport(ERROR,
 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				errmsg("\"%s\" is not a sequence",
-				text_to_cstring(sequencename))));
+				 errmsg("\"%s\" is not a sequence",
+						text_to_cstring(sequencename))));
 
 	aclresult = pg_class_aclcheck(sequenceoid, roleid, mode);
 
@@ -2065,8 +2065,8 @@ has_sequence_privilege_name_id(PG_FUNCTION_ARGS)
 	else if (relkind != RELKIND_SEQUENCE)
 		ereport(ERROR,
 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				errmsg("\"%s\" is not a sequence",
-				get_rel_name(sequenceoid))));
+				 errmsg("\"%s\" is not a sequence",
+						get_rel_name(sequenceoid))));
 
 	aclresult = pg_class_aclcheck(sequenceoid, roleid, mode);
 
@@ -2097,8 +2097,8 @@ has_sequence_privilege_id(PG_FUNCTION_ARGS)
 	else if (relkind != RELKIND_SEQUENCE)
 		ereport(ERROR,
 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				errmsg("\"%s\" is not a sequence",
-				get_rel_name(sequenceoid))));
+				 errmsg("\"%s\" is not a sequence",
+						get_rel_name(sequenceoid))));
 
 	aclresult = pg_class_aclcheck(sequenceoid, roleid, mode);
 
@@ -2125,8 +2125,8 @@ has_sequence_privilege_id_name(PG_FUNCTION_ARGS)
 	if (get_rel_relkind(sequenceoid) != RELKIND_SEQUENCE)
 		ereport(ERROR,
 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				errmsg("\"%s\" is not a sequence",
-				text_to_cstring(sequencename))));
+				 errmsg("\"%s\" is not a sequence",
+						text_to_cstring(sequencename))));
 
 	aclresult = pg_class_aclcheck(sequenceoid, roleid, mode);
 
@@ -2155,8 +2155,8 @@ has_sequence_privilege_id_id(PG_FUNCTION_ARGS)
 	else if (relkind != RELKIND_SEQUENCE)
 		ereport(ERROR,
 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-				errmsg("\"%s\" is not a sequence",
-				get_rel_name(sequenceoid))));
+				 errmsg("\"%s\" is not a sequence",
+						get_rel_name(sequenceoid))));
 
 	aclresult = pg_class_aclcheck(sequenceoid, roleid, mode);
 
@@ -2171,10 +2171,10 @@ static AclMode
 convert_sequence_priv_string(text *priv_type_text)
 {
 	static const priv_map sequence_priv_map[] = {
-		{ "USAGE", ACL_USAGE },
-		{ "SELECT", ACL_SELECT },
-		{ "UPDATE", ACL_UPDATE },
-		{ NULL, 0 }
+		{"USAGE", ACL_USAGE},
+		{"SELECT", ACL_SELECT},
+		{"UPDATE", ACL_UPDATE},
+		{NULL, 0}
 	};
 
 	return convert_any_priv_string(priv_type_text, sequence_priv_map);
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index 7d4ea11bf8b73cea2b393229ec08c414b704c20c..bca0b894422a8d731daa1df34f835a35a1edccd7 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -6,7 +6,7 @@
  * Copyright (c) 2003-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.34 2010/02/08 20:39:51 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.35 2010/02/26 02:01:06 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -531,9 +531,9 @@ array_agg_finalfn(PG_FUNCTION_ARGS)
 
 	/*
 	 * Make the result.  We cannot release the ArrayBuildState because
-	 * sometimes aggregate final functions are re-executed.  Rather, it
-	 * is nodeAgg.c's responsibility to reset the aggcontext when it's
-	 * safe to do so.
+	 * sometimes aggregate final functions are re-executed.  Rather, it is
+	 * nodeAgg.c's responsibility to reset the aggcontext when it's safe to do
+	 * so.
 	 */
 	result = makeMdArrayResult(state, 1, dims, lbs,
 							   CurrentMemoryContext,
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index aa110ce58fb32a4f1d84c795f1df424b2f5e27db..533b77c1cd080363b2a0e05b51c0a244368d6fc4 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.163 2010/01/02 16:57:53 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.164 2010/02/26 02:01:07 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -328,10 +328,11 @@ array_in(PG_FUNCTION_ARGS)
 	SET_VARSIZE(retval, nbytes);
 	retval->ndim = ndim;
 	retval->dataoffset = dataoffset;
+
 	/*
-	 *	This comes from the array's pg_type.typelem (which points to the
-	 *	base data type's pg_type.oid) and stores system oids in user tables.
-	 *	This oid must be preserved by binary upgrades.
+	 * This comes from the array's pg_type.typelem (which points to the base
+	 * data type's pg_type.oid) and stores system oids in user tables. This
+	 * oid must be preserved by binary upgrades.
 	 */
 	retval->elemtype = element_type;
 	memcpy(ARR_DIMS(retval), dim, ndim * sizeof(int));
@@ -1212,7 +1213,7 @@ array_recv(PG_FUNCTION_ARGS)
 
 	for (i = 0; i < ndim; i++)
 	{
-		int ub;
+		int			ub;
 
 		dim[i] = pq_getmsgint(buf, 4);
 		lBound[i] = pq_getmsgint(buf, 4);
@@ -4194,12 +4195,12 @@ accumArrayResult(ArrayBuildState *astate,
 	}
 
 	/*
-	 * Ensure pass-by-ref stuff is copied into mcontext; and detoast it too
-	 * if it's varlena.  (You might think that detoasting is not needed here
+	 * Ensure pass-by-ref stuff is copied into mcontext; and detoast it too if
+	 * it's varlena.  (You might think that detoasting is not needed here
 	 * because construct_md_array can detoast the array elements later.
 	 * However, we must not let construct_md_array modify the ArrayBuildState
-	 * because that would mean array_agg_finalfn damages its input, which
-	 * is verboten.  Also, this way frequently saves one copying step.)
+	 * because that would mean array_agg_finalfn damages its input, which is
+	 * verboten.  Also, this way frequently saves one copying step.)
 	 */
 	if (!disnull && !astate->typbyval)
 	{
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 7985a644f3de0e990c7256e080c6b374d27492e6..b5dfe08c9bb7ab76c9173c29a55f388d7d2fe085 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.151 2010/02/18 04:31:16 itagaki Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.152 2010/02/26 02:01:07 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -203,7 +203,7 @@ Datum
 date_recv(PG_FUNCTION_ARGS)
 {
 	StringInfo	buf = (StringInfo) PG_GETARG_POINTER(0);
-	DateADT result;
+	DateADT		result;
 
 	result = (DateADT) pq_getmsgint(buf, sizeof(DateADT));
 
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 2bb2427c0600f03f39f9ee93da960b5120000bff..8b5def4d15a9f00d90019e1362d4640137ad8b6c 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -5,7 +5,7 @@
  * Copyright (c) 2002-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/dbsize.c,v 1.30 2010/02/14 18:42:16 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/dbsize.c,v 1.31 2010/02/26 02:01:07 momjian Exp $
  *
  */
 
@@ -185,7 +185,7 @@ calculate_tablespace_size(Oid tblspcOid)
 		snprintf(tblspcPath, MAXPGPATH, "global");
 	else
 		snprintf(tblspcPath, MAXPGPATH, "pg_tblspc/%u/%s", tblspcOid,
-										 TABLESPACE_VERSION_DIRECTORY);
+				 TABLESPACE_VERSION_DIRECTORY);
 
 	dirdesc = AllocateDir(tblspcPath);
 
@@ -318,10 +318,10 @@ pg_relation_size(PG_FUNCTION_ARGS)
 static int64
 calculate_toast_table_size(Oid toastrelid)
 {
-	int64      size = 0;
-	Relation   toastRel;
-	Relation   toastIdxRel;
-	ForkNumber forkNum;
+	int64		size = 0;
+	Relation	toastRel;
+	Relation	toastIdxRel;
+	ForkNumber	forkNum;
 
 	toastRel = relation_open(toastrelid, AccessShareLock);
 
@@ -351,9 +351,9 @@ calculate_toast_table_size(Oid toastrelid)
 static int64
 calculate_table_size(Oid relOid)
 {
-	int64      size = 0;
-	Relation   rel;
-	ForkNumber forkNum;
+	int64		size = 0;
+	Relation	rel;
+	ForkNumber	forkNum;
 
 	rel = relation_open(relOid, AccessShareLock);
 
@@ -382,8 +382,8 @@ calculate_table_size(Oid relOid)
 static int64
 calculate_indexes_size(Oid relOid)
 {
-	int64    size = 0;
-	Relation rel;
+	int64		size = 0;
+	Relation	rel;
 
 	rel = relation_open(relOid, AccessShareLock);
 
@@ -392,14 +392,14 @@ calculate_indexes_size(Oid relOid)
 	 */
 	if (rel->rd_rel->relhasindex)
 	{
-		List	 *index_oids = RelationGetIndexList(rel);
-		ListCell *cell;
+		List	   *index_oids = RelationGetIndexList(rel);
+		ListCell   *cell;
 
 		foreach(cell, index_oids)
 		{
 			Oid			idxOid = lfirst_oid(cell);
 			Relation	idxRel;
-			ForkNumber  forkNum;
+			ForkNumber	forkNum;
 
 			idxRel = relation_open(idxOid, AccessShareLock);
 
@@ -443,9 +443,8 @@ calculate_total_relation_size(Oid Relid)
 	int64		size;
 
 	/*
-	 * Aggregate the table size, this includes size of
-	 * the heap, toast and toast index with free space
-	 * and visibility map
+	 * Aggregate the table size, this includes size of the heap, toast and
+	 * toast index with free space and visibility map
 	 */
 	size = calculate_table_size(Relid);
 
@@ -515,7 +514,7 @@ pg_size_pretty(PG_FUNCTION_ARGS)
  * This is expected to be used in queries like
  *		SELECT pg_relation_filenode(oid) FROM pg_class;
  * That leads to a couple of choices.  We work from the pg_class row alone
- * rather than actually opening each relation, for efficiency.  We don't
+ * rather than actually opening each relation, for efficiency.	We don't
  * fail if we can't find the relation --- some rows might be visible in
  * the query's MVCC snapshot but already dead according to SnapshotNow.
  * (Note: we could avoid using the catcache, but there's little point
@@ -545,7 +544,7 @@ pg_relation_filenode(PG_FUNCTION_ARGS)
 			/* okay, these have storage */
 			if (relform->relfilenode)
 				result = relform->relfilenode;
-			else				/* Consult the relation mapper */
+			else	/* Consult the relation mapper */
 				result = RelationMapOidToFilenode(relid,
 												  relform->relisshared);
 			break;
@@ -602,9 +601,9 @@ pg_relation_filepath(PG_FUNCTION_ARGS)
 				rnode.dbNode = MyDatabaseId;
 			if (relform->relfilenode)
 				rnode.relNode = relform->relfilenode;
-			else				/* Consult the relation mapper */
+			else	/* Consult the relation mapper */
 				rnode.relNode = RelationMapOidToFilenode(relid,
-														 relform->relisshared);
+													   relform->relisshared);
 			break;
 
 		default:
diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c
index 8bbe092ee9934411f840d8f62eb5f5afcdf51f80..97b047686f98f28af1ef1a3b2fd42a8fbfc8a4c1 100644
--- a/src/backend/utils/adt/domains.c
+++ b/src/backend/utils/adt/domains.c
@@ -25,7 +25,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/domains.c,v 1.10 2010/01/02 16:57:53 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/domains.c,v 1.11 2010/02/26 02:01:07 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -305,7 +305,7 @@ domain_recv(PG_FUNCTION_ARGS)
 
 /*
  * domain_check - check that a datum satisfies the constraints of a
- * domain.  extra and mcxt can be passed if they are available from,
+ * domain.	extra and mcxt can be passed if they are available from,
  * say, a FmgrInfo structure, or they can be NULL, in which case the
  * setup is repeated for each call.
  */
diff --git a/src/backend/utils/adt/enum.c b/src/backend/utils/adt/enum.c
index ebee928f17ca4acd0b520fdceec397cc8927e17b..9000d1ca160b7a079e5d809941be1aac46df527c 100644
--- a/src/backend/utils/adt/enum.c
+++ b/src/backend/utils/adt/enum.c
@@ -7,7 +7,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/enum.c,v 1.10 2010/02/14 18:42:16 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/enum.c,v 1.11 2010/02/26 02:01:08 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -56,8 +56,8 @@ enum_in(PG_FUNCTION_ARGS)
 						name)));
 
 	/*
-	 *	This comes from pg_enum.oid and stores system oids in user tables.
-	 *	This oid must be preserved by binary upgrades.
+	 * This comes from pg_enum.oid and stores system oids in user tables. This
+	 * oid must be preserved by binary upgrades.
 	 */
 	enumoid = HeapTupleGetOid(tup);
 
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 2ca7f6a1fa8162b2c8d15a44a89da214b5a017c7..e7e82a1b14850d55048c9451ac2c849f54ec08dd 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1,7 +1,7 @@
 /* -----------------------------------------------------------------------
  * formatting.c
  *
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.167 2010/02/25 18:36:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.168 2010/02/26 02:01:08 momjian Exp $
  *
  *
  *	 Portions Copyright (c) 1999-2010, PostgreSQL Global Development Group
@@ -1053,165 +1053,165 @@ NUMDesc_prepare(NUMDesc *num, FormatNode *n)
 	 */
 	PG_TRY();
 	{
-	if (IS_EEEE(num) && n->key->id != NUM_E)
-		ereport(ERROR,
-				(errcode(ERRCODE_SYNTAX_ERROR),
-				 errmsg("\"EEEE\" must be the last pattern used")));
+		if (IS_EEEE(num) && n->key->id != NUM_E)
+			ereport(ERROR,
+					(errcode(ERRCODE_SYNTAX_ERROR),
+					 errmsg("\"EEEE\" must be the last pattern used")));
 
-	switch (n->key->id)
-	{
-		case NUM_9:
-			if (IS_BRACKET(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("\"9\" must be ahead of \"PR\"")));
-			if (IS_MULTI(num))
-			{
-				++num->multi;
+		switch (n->key->id)
+		{
+			case NUM_9:
+				if (IS_BRACKET(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+							 errmsg("\"9\" must be ahead of \"PR\"")));
+				if (IS_MULTI(num))
+				{
+					++num->multi;
+					break;
+				}
+				if (IS_DECIMAL(num))
+					++num->post;
+				else
+					++num->pre;
 				break;
-			}
-			if (IS_DECIMAL(num))
-				++num->post;
-			else
-				++num->pre;
-			break;
 
-		case NUM_0:
-			if (IS_BRACKET(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("\"0\" must be ahead of \"PR\"")));
-			if (!IS_ZERO(num) && !IS_DECIMAL(num))
-			{
-				num->flag |= NUM_F_ZERO;
-				num->zero_start = num->pre + 1;
-			}
-			if (!IS_DECIMAL(num))
-				++num->pre;
-			else
-				++num->post;
+			case NUM_0:
+				if (IS_BRACKET(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+							 errmsg("\"0\" must be ahead of \"PR\"")));
+				if (!IS_ZERO(num) && !IS_DECIMAL(num))
+				{
+					num->flag |= NUM_F_ZERO;
+					num->zero_start = num->pre + 1;
+				}
+				if (!IS_DECIMAL(num))
+					++num->pre;
+				else
+					++num->post;
 
-			num->zero_end = num->pre + num->post;
-			break;
+				num->zero_end = num->pre + num->post;
+				break;
 
-		case NUM_B:
-			if (num->pre == 0 && num->post == 0 && (!IS_ZERO(num)))
-				num->flag |= NUM_F_BLANK;
-			break;
+			case NUM_B:
+				if (num->pre == 0 && num->post == 0 && (!IS_ZERO(num)))
+					num->flag |= NUM_F_BLANK;
+				break;
 
-		case NUM_D:
-			num->flag |= NUM_F_LDECIMAL;
-			num->need_locale = TRUE;
-		case NUM_DEC:
-			if (IS_DECIMAL(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("multiple decimal points")));
-			if (IS_MULTI(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
+			case NUM_D:
+				num->flag |= NUM_F_LDECIMAL;
+				num->need_locale = TRUE;
+			case NUM_DEC:
+				if (IS_DECIMAL(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+							 errmsg("multiple decimal points")));
+				if (IS_MULTI(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
 					 errmsg("cannot use \"V\" and decimal point together")));
-			num->flag |= NUM_F_DECIMAL;
-			break;
+				num->flag |= NUM_F_DECIMAL;
+				break;
 
-		case NUM_FM:
-			num->flag |= NUM_F_FILLMODE;
-			break;
+			case NUM_FM:
+				num->flag |= NUM_F_FILLMODE;
+				break;
 
-		case NUM_S:
-			if (IS_LSIGN(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("cannot use \"S\" twice")));
-			if (IS_PLUS(num) || IS_MINUS(num) || IS_BRACKET(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("cannot use \"S\" and \"PL\"/\"MI\"/\"SG\"/\"PR\" together")));
-			if (!IS_DECIMAL(num))
-			{
-				num->lsign = NUM_LSIGN_PRE;
-				num->pre_lsign_num = num->pre;
-				num->need_locale = TRUE;
-				num->flag |= NUM_F_LSIGN;
-			}
-			else if (num->lsign == NUM_LSIGN_NONE)
-			{
-				num->lsign = NUM_LSIGN_POST;
+			case NUM_S:
+				if (IS_LSIGN(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+							 errmsg("cannot use \"S\" twice")));
+				if (IS_PLUS(num) || IS_MINUS(num) || IS_BRACKET(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+							 errmsg("cannot use \"S\" and \"PL\"/\"MI\"/\"SG\"/\"PR\" together")));
+				if (!IS_DECIMAL(num))
+				{
+					num->lsign = NUM_LSIGN_PRE;
+					num->pre_lsign_num = num->pre;
+					num->need_locale = TRUE;
+					num->flag |= NUM_F_LSIGN;
+				}
+				else if (num->lsign == NUM_LSIGN_NONE)
+				{
+					num->lsign = NUM_LSIGN_POST;
+					num->need_locale = TRUE;
+					num->flag |= NUM_F_LSIGN;
+				}
+				break;
+
+			case NUM_MI:
+				if (IS_LSIGN(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+							 errmsg("cannot use \"S\" and \"MI\" together")));
+				num->flag |= NUM_F_MINUS;
+				if (IS_DECIMAL(num))
+					num->flag |= NUM_F_MINUS_POST;
+				break;
+
+			case NUM_PL:
+				if (IS_LSIGN(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+							 errmsg("cannot use \"S\" and \"PL\" together")));
+				num->flag |= NUM_F_PLUS;
+				if (IS_DECIMAL(num))
+					num->flag |= NUM_F_PLUS_POST;
+				break;
+
+			case NUM_SG:
+				if (IS_LSIGN(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+							 errmsg("cannot use \"S\" and \"SG\" together")));
+				num->flag |= NUM_F_MINUS;
+				num->flag |= NUM_F_PLUS;
+				break;
+
+			case NUM_PR:
+				if (IS_LSIGN(num) || IS_PLUS(num) || IS_MINUS(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+							 errmsg("cannot use \"PR\" and \"S\"/\"PL\"/\"MI\"/\"SG\" together")));
+				num->flag |= NUM_F_BRACKET;
+				break;
+
+			case NUM_rn:
+			case NUM_RN:
+				num->flag |= NUM_F_ROMAN;
+				break;
+
+			case NUM_L:
+			case NUM_G:
 				num->need_locale = TRUE;
-				num->flag |= NUM_F_LSIGN;
-			}
-			break;
+				break;
 
-		case NUM_MI:
-			if (IS_LSIGN(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("cannot use \"S\" and \"MI\" together")));
-			num->flag |= NUM_F_MINUS;
-			if (IS_DECIMAL(num))
-				num->flag |= NUM_F_MINUS_POST;
-			break;
-
-		case NUM_PL:
-			if (IS_LSIGN(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("cannot use \"S\" and \"PL\" together")));
-			num->flag |= NUM_F_PLUS;
-			if (IS_DECIMAL(num))
-				num->flag |= NUM_F_PLUS_POST;
-			break;
-
-		case NUM_SG:
-			if (IS_LSIGN(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("cannot use \"S\" and \"SG\" together")));
-			num->flag |= NUM_F_MINUS;
-			num->flag |= NUM_F_PLUS;
-			break;
-
-		case NUM_PR:
-			if (IS_LSIGN(num) || IS_PLUS(num) || IS_MINUS(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("cannot use \"PR\" and \"S\"/\"PL\"/\"MI\"/\"SG\" together")));
-			num->flag |= NUM_F_BRACKET;
-			break;
-
-		case NUM_rn:
-		case NUM_RN:
-			num->flag |= NUM_F_ROMAN;
-			break;
-
-		case NUM_L:
-		case NUM_G:
-			num->need_locale = TRUE;
-			break;
-
-		case NUM_V:
-			if (IS_DECIMAL(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
+			case NUM_V:
+				if (IS_DECIMAL(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
 					 errmsg("cannot use \"V\" and decimal point together")));
-			num->flag |= NUM_F_MULTI;
-			break;
+				num->flag |= NUM_F_MULTI;
+				break;
 
-		case NUM_E:
-			if (IS_EEEE(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("cannot use \"EEEE\" twice")));
-			if (IS_BLANK(num) || IS_FILLMODE(num) || IS_LSIGN(num) ||
-				IS_BRACKET(num) || IS_MINUS(num) || IS_PLUS(num) ||
-				IS_ROMAN(num) || IS_MULTI(num))
-				ereport(ERROR,
-						(errcode(ERRCODE_SYNTAX_ERROR),
-						 errmsg("\"EEEE\" is incompatible with other formats"),
-						 errdetail("\"EEEE\" may only be used together with digit and decimal point patterns.")));
-			num->flag |= NUM_F_EEEE;
-			break;
-	}
+			case NUM_E:
+				if (IS_EEEE(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+							 errmsg("cannot use \"EEEE\" twice")));
+				if (IS_BLANK(num) || IS_FILLMODE(num) || IS_LSIGN(num) ||
+					IS_BRACKET(num) || IS_MINUS(num) || IS_PLUS(num) ||
+					IS_ROMAN(num) || IS_MULTI(num))
+					ereport(ERROR,
+							(errcode(ERRCODE_SYNTAX_ERROR),
+					   errmsg("\"EEEE\" is incompatible with other formats"),
+							 errdetail("\"EEEE\" may only be used together with digit and decimal point patterns.")));
+				num->flag |= NUM_F_EEEE;
+				break;
+		}
 	}
 	PG_CATCH();
 	{
@@ -2088,7 +2088,11 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out)
 				break;
 			case DCH_HH:
 			case DCH_HH12:
-				/* display time as shown on a 12-hour clock, even for intervals */
+
+				/*
+				 * display time as shown on a 12-hour clock, even for
+				 * intervals
+				 */
 				sprintf(s, "%0*d", S_FM(n->suffix) ? 0 : 2,
 						tm->tm_hour % (HOURS_PER_DAY / 2) == 0 ? 12 :
 						tm->tm_hour % (HOURS_PER_DAY / 2));
@@ -4652,8 +4656,8 @@ numeric_to_char(PG_FUNCTION_ARGS)
 		if (strcmp(orgnum, "NaN") == 0)
 		{
 			/*
-			 * Allow 6 characters for the leading sign, the decimal point, "e",
-			 * the exponent's sign and two exponent digits.
+			 * Allow 6 characters for the leading sign, the decimal point,
+			 * "e", the exponent's sign and two exponent digits.
 			 */
 			numstr = (char *) palloc(Num.pre + Num.post + 7);
 			fill_str(numstr, '#', Num.pre + Num.post + 6);
@@ -4757,7 +4761,7 @@ int4_to_char(PG_FUNCTION_ARGS)
 	else if (IS_EEEE(&Num))
 	{
 		/* we can do it easily because float8 won't lose any precision */
-		float8	val = (float8) value;
+		float8		val = (float8) value;
 
 		orgnum = (char *) palloc(MAXDOUBLEWIDTH + 1);
 		snprintf(orgnum, MAXDOUBLEWIDTH + 1, "%+.*e", Num.post, val);
@@ -4852,7 +4856,7 @@ int8_to_char(PG_FUNCTION_ARGS)
 	else if (IS_EEEE(&Num))
 	{
 		/* to avoid loss of precision, must go via numeric not float8 */
-		Numeric	val;
+		Numeric		val;
 
 		val = DatumGetNumeric(DirectFunctionCall1(int8_numeric,
 												  Int64GetDatum(value)));
@@ -4956,8 +4960,8 @@ float4_to_char(PG_FUNCTION_ARGS)
 		if (isnan(value) || is_infinite(value))
 		{
 			/*
-			 * Allow 6 characters for the leading sign, the decimal point, "e",
-			 * the exponent's sign and two exponent digits.
+			 * Allow 6 characters for the leading sign, the decimal point,
+			 * "e", the exponent's sign and two exponent digits.
 			 */
 			numstr = (char *) palloc(Num.pre + Num.post + 7);
 			fill_str(numstr, '#', Num.pre + Num.post + 6);
@@ -5060,8 +5064,8 @@ float8_to_char(PG_FUNCTION_ARGS)
 		if (isnan(value) || is_infinite(value))
 		{
 			/*
-			 * Allow 6 characters for the leading sign, the decimal point, "e",
-			 * the exponent's sign and two exponent digits.
+			 * Allow 6 characters for the leading sign, the decimal point,
+			 * "e", the exponent's sign and two exponent digits.
 			 */
 			numstr = (char *) palloc(Num.pre + Num.post + 7);
 			fill_str(numstr, '#', Num.pre + Num.post + 6);
diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c
index 35f1a13ab675ba6b21a354bd04e0a7969e8a4297..cd1d6c2cc6bdda51397342b3cc21b4229aa96727 100644
--- a/src/backend/utils/adt/geo_ops.c
+++ b/src/backend/utils/adt/geo_ops.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.107 2010/01/14 16:31:09 teodor Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.108 2010/02/26 02:01:08 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -67,7 +67,7 @@ static double dist_pl_internal(Point *pt, LINE *line);
 static double dist_ps_internal(Point *pt, LSEG *lseg);
 static Point *line_interpt_internal(LINE *l1, LINE *l2);
 static bool lseg_inside_poly(Point *a, Point *b, POLYGON *poly, int start);
-static Point* lseg_interpt_internal(LSEG *l1, LSEG *l2);
+static Point *lseg_interpt_internal(LSEG *l1, LSEG *l2);
 
 
 /*
@@ -2354,7 +2354,7 @@ lseg_center(PG_FUNCTION_ARGS)
 	PG_RETURN_POINT_P(result);
 }
 
-static Point*
+static Point *
 lseg_interpt_internal(LSEG *l1, LSEG *l2)
 {
 	Point	   *result;
@@ -2411,7 +2411,7 @@ lseg_interpt(PG_FUNCTION_ARGS)
 	LSEG	   *l1 = PG_GETARG_LSEG_P(0);
 	LSEG	   *l2 = PG_GETARG_LSEG_P(1);
 	Point	   *result;
-	
+
 	result = lseg_interpt_internal(l1, l2);
 	if (!PointerIsValid(result))
 		PG_RETURN_NULL();
@@ -2466,8 +2466,8 @@ dist_ps_internal(Point *pt, LSEG *lseg)
 	Point	   *ip;
 
 	/*
-	 * Construct a line perpendicular to the input segment
-	 * and through the input point
+	 * Construct a line perpendicular to the input segment and through the
+	 * input point
 	 */
 	if (lseg->p[1].x == lseg->p[0].x)
 		m = 0;
@@ -3203,7 +3203,7 @@ on_pb(PG_FUNCTION_ARGS)
 }
 
 Datum
-box_contain_pt(PG_FUNCTION_ARGS) 
+box_contain_pt(PG_FUNCTION_ARGS)
 {
 	BOX		   *box = PG_GETARG_BOX_P(0);
 	Point	   *pt = PG_GETARG_POINT_P(1);
@@ -3768,7 +3768,7 @@ poly_same(PG_FUNCTION_ARGS)
 }
 
 /*-----------------------------------------------------------------
- * Determine if polygon A overlaps polygon B 
+ * Determine if polygon A overlaps polygon B
  *-----------------------------------------------------------------*/
 Datum
 poly_overlap(PG_FUNCTION_ARGS)
@@ -3778,51 +3778,51 @@ poly_overlap(PG_FUNCTION_ARGS)
 	bool		result;
 
 	/* Quick check by bounding box */
-	result = (polya->npts > 0 && polyb->npts > 0 && 
-			box_ov(&polya->boundbox, &polyb->boundbox)) ? true : false;
+	result = (polya->npts > 0 && polyb->npts > 0 &&
+			  box_ov(&polya->boundbox, &polyb->boundbox)) ? true : false;
 
 	/*
-	 * Brute-force algorithm - try to find intersected edges,
-	 * if so then polygons are overlapped else check is one 
-	 * polygon inside other or not by testing single point 
-	 * of them.
+	 * Brute-force algorithm - try to find intersected edges, if so then
+	 * polygons are overlapped else check is one polygon inside other or not
+	 * by testing single point of them.
 	 */
 	if (result)
 	{
-		int		ia, ib;
-		LSEG	sa, sb;
+		int			ia,
+					ib;
+		LSEG		sa,
+					sb;
 
 		/* Init first of polya's edge with last point */
 		sa.p[0] = polya->p[polya->npts - 1];
 		result = false;
 
-		for(ia=0; ia<polya->npts && result == false; ia++)
+		for (ia = 0; ia < polya->npts && result == false; ia++)
 		{
-			/* Second point of polya's edge is a current one */ 
+			/* Second point of polya's edge is a current one */
 			sa.p[1] = polya->p[ia];
 
 			/* Init first of polyb's edge with last point */
 			sb.p[0] = polyb->p[polyb->npts - 1];
 
-			for(ib=0; ib<polyb->npts && result == false; ib++)
+			for (ib = 0; ib < polyb->npts && result == false; ib++)
 			{
 				sb.p[1] = polyb->p[ib];
 				result = lseg_intersect_internal(&sa, &sb);
 				sb.p[0] = sb.p[1];
 			}
 
-			/* 
-			 * move current endpoint to the first point
-			 * of next edge
+			/*
+			 * move current endpoint to the first point of next edge
 			 */
 			sa.p[0] = sa.p[1];
 		}
 
-		if (result==false)
+		if (result == false)
 		{
-			result = ( 	point_inside(polya->p, polyb->npts, polyb->p)
-						||
-						point_inside(polyb->p, polya->npts, polya->p) );
+			result = (point_inside(polya->p, polyb->npts, polyb->p)
+					  ||
+					  point_inside(polyb->p, polya->npts, polya->p));
 		}
 	}
 
@@ -3838,93 +3838,93 @@ poly_overlap(PG_FUNCTION_ARGS)
 /*
  * Tests special kind of segment for in/out of polygon.
  * Special kind means:
- *  - point a should be on segment s
- *  - segment (a,b) should not be contained by s
+ *	- point a should be on segment s
+ *	- segment (a,b) should not be contained by s
  * Returns true if:
- *  - segment (a,b) is collinear to s and (a,b) is in polygon
- *  - segment (a,b) s not collinear to s. Note: that doesn't
- *    mean that segment is in polygon! 
- */ 
+ *	- segment (a,b) is collinear to s and (a,b) is in polygon
+ *	- segment (a,b) s not collinear to s. Note: that doesn't
+ *	  mean that segment is in polygon!
+ */
 
 static bool
 touched_lseg_inside_poly(Point *a, Point *b, LSEG *s, POLYGON *poly, int start)
 {
 	/* point a is on s, b is not */
-	LSEG t;
+	LSEG		t;
 
 	t.p[0] = *a;
 	t.p[1] = *b;
-	
-#define POINTEQ(pt1, pt2)   (FPeq((pt1)->x, (pt2)->x) && FPeq((pt1)->y, (pt2)->y))
-	if ( POINTEQ(a, s->p) )
+
+#define POINTEQ(pt1, pt2)	(FPeq((pt1)->x, (pt2)->x) && FPeq((pt1)->y, (pt2)->y))
+	if (POINTEQ(a, s->p))
 	{
-		if ( on_ps_internal(s->p+1, &t) )
-			return lseg_inside_poly(b, s->p+1, poly, start);
+		if (on_ps_internal(s->p + 1, &t))
+			return lseg_inside_poly(b, s->p + 1, poly, start);
 	}
-	else if (POINTEQ(a, s->p+1))
+	else if (POINTEQ(a, s->p + 1))
 	{
-		if ( on_ps_internal(s->p, &t) )
+		if (on_ps_internal(s->p, &t))
 			return lseg_inside_poly(b, s->p, poly, start);
 	}
-	else if ( on_ps_internal(s->p, &t) )
+	else if (on_ps_internal(s->p, &t))
 	{
 		return lseg_inside_poly(b, s->p, poly, start);
 	}
-	else if ( on_ps_internal(s->p+1, &t) )
+	else if (on_ps_internal(s->p + 1, &t))
 	{
-		return lseg_inside_poly(b, s->p+1, poly, start);
+		return lseg_inside_poly(b, s->p + 1, poly, start);
 	}
 
-	return true; /* may be not true, but that will check later */
+	return true;				/* may be not true, but that will check later */
 }
 
 /*
  * Returns true if segment (a,b) is in polygon, option
- * start is used for optimization - function checks 
+ * start is used for optimization - function checks
  * polygon's edges started from start
  */
 static bool
 lseg_inside_poly(Point *a, Point *b, POLYGON *poly, int start)
 {
-	LSEG	s,
-			t;
-	int 	i;
-	bool 	res = true,
-			intersection = false;
+	LSEG		s,
+				t;
+	int			i;
+	bool		res = true,
+				intersection = false;
 
 	t.p[0] = *a;
 	t.p[1] = *b;
-	s.p[0] = poly->p[( start == 0) ? (poly->npts - 1) : (start - 1)];
+	s.p[0] = poly->p[(start == 0) ? (poly->npts - 1) : (start - 1)];
 
-	for(i=start; i<poly->npts && res == true; i++)
+	for (i = start; i < poly->npts && res == true; i++)
 	{
-		Point	*interpt;
+		Point	   *interpt;
 
 		s.p[1] = poly->p[i];
 
-		if ( on_ps_internal(t.p, &s) )
+		if (on_ps_internal(t.p, &s))
 		{
-			if ( on_ps_internal(t.p+1, &s) )
-				return true; /* t is contained by s */
+			if (on_ps_internal(t.p + 1, &s))
+				return true;	/* t is contained by s */
 
 			/* Y-cross */
-			res = touched_lseg_inside_poly(t.p, t.p+1, &s, poly, i+1);
-		} 
-		else if ( on_ps_internal(t.p+1, &s) )
+			res = touched_lseg_inside_poly(t.p, t.p + 1, &s, poly, i + 1);
+		}
+		else if (on_ps_internal(t.p + 1, &s))
 		{
 			/* Y-cross */
-			res = touched_lseg_inside_poly(t.p+1, t.p, &s, poly, i+1);
+			res = touched_lseg_inside_poly(t.p + 1, t.p, &s, poly, i + 1);
 		}
-		else if ( (interpt = lseg_interpt_internal(&t, &s)) != NULL )
+		else if ((interpt = lseg_interpt_internal(&t, &s)) != NULL)
 		{
 			/*
 			 * segments are X-crossing, go to check each subsegment
 			 */
 
 			intersection = true;
-			res = lseg_inside_poly(t.p, interpt, poly, i+1);
+			res = lseg_inside_poly(t.p, interpt, poly, i + 1);
 			if (res)
-				res = lseg_inside_poly(t.p+1, interpt, poly, i+1);
+				res = lseg_inside_poly(t.p + 1, interpt, poly, i + 1);
 			pfree(interpt);
 		}
 
@@ -3933,17 +3933,16 @@ lseg_inside_poly(Point *a, Point *b, POLYGON *poly, int start)
 
 	if (res && !intersection)
 	{
-		Point p;
+		Point		p;
 
 		/*
-		 * if X-intersection wasn't found  then check central point
-		 * of tested segment. In opposite case we already check all 
-		 * subsegments
+		 * if X-intersection wasn't found  then check central point of tested
+		 * segment. In opposite case we already check all subsegments
 		 */
-		p.x = (t.p[0].x + t.p[1].x) / 2.0; 
+		p.x = (t.p[0].x + t.p[1].x) / 2.0;
 		p.y = (t.p[0].y + t.p[1].y) / 2.0;
 
-		res = point_inside(&p, poly->npts, poly->p); 
+		res = point_inside(&p, poly->npts, poly->p);
 	}
 
 	return res;
@@ -3963,20 +3962,20 @@ poly_contain(PG_FUNCTION_ARGS)
 	 * Quick check to see if bounding box is contained.
 	 */
 	if (polya->npts > 0 && polyb->npts > 0 &&
-			DatumGetBool(DirectFunctionCall2(box_contain,
-											BoxPGetDatum(&polya->boundbox),
-											BoxPGetDatum(&polyb->boundbox))))
+		DatumGetBool(DirectFunctionCall2(box_contain,
+										 BoxPGetDatum(&polya->boundbox),
+										 BoxPGetDatum(&polyb->boundbox))))
 	{
-		int		i;
-		LSEG	s;
+		int			i;
+		LSEG		s;
 
 		s.p[0] = polyb->p[polyb->npts - 1];
 		result = true;
 
-		for(i=0; i<polyb->npts && result == true; i++)
+		for (i = 0; i < polyb->npts && result == true; i++)
 		{
 			s.p[1] = polyb->p[i];
-			result = lseg_inside_poly(s.p, s.p+1, polya, 0);
+			result = lseg_inside_poly(s.p, s.p + 1, polya, 0);
 			s.p[0] = s.p[1];
 		}
 	}
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index edb22e8cc6daf486f6dd5d1357827b6dbae7ed9a..ab8476b68ae042afb2b566c6996b3a59be5a81bf 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.88 2010/01/02 16:57:54 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.89 2010/02/26 02:01:08 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -149,7 +149,7 @@ int2vectorin(PG_FUNCTION_ARGS)
 		while (*intString && isspace((unsigned char) *intString))
 			intString++;
 		if (*intString == '\0')
-			break;		
+			break;
 		result->values[n] = pg_atoi(intString, sizeof(int16), ' ');
 		while (*intString && !isspace((unsigned char) *intString))
 			intString++;
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index 1482017561d859e8704d79a9c8fa95e198cbf015..78bd5fb2b8795d3449a80510591e0fae38085de2 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.78 2010/02/08 20:39:51 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.79 2010/02/26 02:01:08 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -654,9 +654,9 @@ int8inc(PG_FUNCTION_ARGS)
 	/*
 	 * When int8 is pass-by-reference, we provide this special case to avoid
 	 * palloc overhead for COUNT(): when called as an aggregate, we know that
-	 * the argument is modifiable local storage, so just update it
-	 * in-place. (If int8 is pass-by-value, then of course this is useless as
-	 * well as incorrect, so just ifdef it out.)
+	 * the argument is modifiable local storage, so just update it in-place.
+	 * (If int8 is pass-by-value, then of course this is useless as well as
+	 * incorrect, so just ifdef it out.)
 	 */
 #ifndef USE_FLOAT8_BYVAL		/* controls int8 too */
 	if (AggCheckCallContext(fcinfo, NULL))
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index ba7fb1ad6741ecb49f250ac7b70a4d2d9255edff..66c8598d17b9cd7a124d431c60e853571e0ba571 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.74 2010/01/12 02:42:52 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.75 2010/02/26 02:01:09 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -187,7 +187,7 @@ pg_tablespace_databases(PG_FUNCTION_ARGS)
 		 * size = tablespace dirname length + dir sep char + oid + terminator
 		 */
 		fctx->location = (char *) palloc(9 + 1 + OIDCHARS + 1 +
-						 strlen(TABLESPACE_VERSION_DIRECTORY) + 1);
+								   strlen(TABLESPACE_VERSION_DIRECTORY) + 1);
 		if (tablespaceOid == GLOBALTABLESPACE_OID)
 		{
 			fctx->dirdesc = NULL;
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index e06f72d6932972898fa087f3f338d6d084e07843..3c744ad8bbd01712f57b34d647fffec3e0195887 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -10,7 +10,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/nabstime.c,v 1.163 2010/01/02 16:57:54 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/nabstime.c,v 1.164 2010/02/26 02:01:09 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -786,7 +786,7 @@ tintervalrecv(PG_FUNCTION_ARGS)
 {
 	StringInfo	buf = (StringInfo) PG_GETARG_POINTER(0);
 	TimeInterval tinterval;
-	int32 status;
+	int32		status;
 
 	tinterval = (TimeInterval) palloc(sizeof(TimeIntervalData));
 
@@ -796,7 +796,7 @@ tintervalrecv(PG_FUNCTION_ARGS)
 
 	if (tinterval->data[0] == INVALID_ABSTIME ||
 		tinterval->data[1] == INVALID_ABSTIME)
-		status = T_INTERVAL_INVAL;	/* undefined  */
+		status = T_INTERVAL_INVAL;		/* undefined  */
 	else
 		status = T_INTERVAL_VALID;
 
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 1b9f7944959449386e1ce89d0ae8768859c360be..4b8271e45f5fc9e11f158e67f7d889bd84cd225e 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -14,7 +14,7 @@
  * Copyright (c) 1998-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.122 2010/02/08 20:39:51 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.123 2010/02/26 02:01:09 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -3403,7 +3403,7 @@ static char *
 get_str_from_var_sci(NumericVar *var, int rscale)
 {
 	int32		exponent;
-	NumericVar  denominator;
+	NumericVar	denominator;
 	NumericVar	significand;
 	int			denom_scale;
 	size_t		len;
@@ -3466,9 +3466,9 @@ get_str_from_var_sci(NumericVar *var, int rscale)
 	/*
 	 * Allocate space for the result.
 	 *
-	 * In addition to the significand, we need room for the exponent decoration
-	 * ("e"), the sign of the exponent, up to 10 digits for the exponent
-	 * itself, and of course the null terminator.
+	 * In addition to the significand, we need room for the exponent
+	 * decoration ("e"), the sign of the exponent, up to 10 digits for the
+	 * exponent itself, and of course the null terminator.
 	 */
 	len = strlen(sig_out) + 13;
 	str = palloc(len);
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index 93fad320c1600eef9e85ab00c39f6581b4c2b3cc..8b13c8adf040d00093b9da6d1277a11ad15a4062 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.59 2010/01/28 14:25:41 mha Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.60 2010/02/26 02:01:09 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1116,7 +1116,7 @@ pg_stat_reset(PG_FUNCTION_ARGS)
 Datum
 pg_stat_reset_shared(PG_FUNCTION_ARGS)
 {
-	char	*target = text_to_cstring(PG_GETARG_TEXT_PP(0));
+	char	   *target = text_to_cstring(PG_GETARG_TEXT_PP(0));
 
 	pgstat_reset_shared_counters(target);
 
@@ -1127,7 +1127,7 @@ pg_stat_reset_shared(PG_FUNCTION_ARGS)
 Datum
 pg_stat_reset_single_table_counters(PG_FUNCTION_ARGS)
 {
-	Oid		taboid = PG_GETARG_OID(0);
+	Oid			taboid = PG_GETARG_OID(0);
 
 	pgstat_reset_single_counter(taboid, RESET_TABLE);
 
@@ -1137,7 +1137,7 @@ pg_stat_reset_single_table_counters(PG_FUNCTION_ARGS)
 Datum
 pg_stat_reset_single_function_counters(PG_FUNCTION_ARGS)
 {
-	Oid		funcoid = PG_GETARG_OID(0);
+	Oid			funcoid = PG_GETARG_OID(0);
 
 	pgstat_reset_single_counter(funcoid, RESET_FUNCTION);
 
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index 750b0e3150d30d0bffa8e3d72c89683477a8c236..50a541912806c05e62622b5f991ec4bbccf13c51 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/rowtypes.c,v 1.27 2010/01/02 16:57:55 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/rowtypes.c,v 1.28 2010/02/26 02:01:09 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -97,10 +97,11 @@ record_in(PG_FUNCTION_ARGS)
 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
 		   errmsg("input of anonymous composite types is not implemented")));
 	tupTypmod = -1;				/* for all non-anonymous types */
+
 	/*
-	 *	This comes from the composite type's pg_type.oid and
-	 *	stores system oids in user tables, specifically DatumTupleFields.
-	 *	This oid must be preserved by binary upgrades.
+	 * This comes from the composite type's pg_type.oid and stores system oids
+	 * in user tables, specifically DatumTupleFields. This oid must be
+	 * preserved by binary upgrades.
 	 */
 	tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
 	ncolumns = tupdesc->natts;
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 6bf03dacd3683346e081c9adfee6fb3cdfe02afd..316562537bedafdc3b7234fbd8b02a8b6ced9a73 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.324 2010/02/18 22:43:31 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.325 2010/02/26 02:01:09 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -221,7 +221,7 @@ static Node *processIndirection(Node *node, deparse_context *context,
 static void printSubscripts(ArrayRef *aref, deparse_context *context);
 static char *generate_relation_name(Oid relid, List *namespaces);
 static char *generate_function_name(Oid funcid, int nargs, List *argnames,
-									Oid *argtypes, bool *is_variadic);
+					   Oid *argtypes, bool *is_variadic);
 static char *generate_operator_name(Oid operid, Oid arg1, Oid arg2);
 static text *string_to_text(char *str);
 static char *flatten_reloptions(Oid relid);
@@ -549,12 +549,12 @@ pg_get_triggerdef_worker(Oid trigid, bool pretty)
 		/* tgattr is first var-width field, so OK to access directly */
 		if (trigrec->tgattr.dim1 > 0)
 		{
-			int		i;
+			int			i;
 
 			appendStringInfoString(&buf, " OF ");
 			for (i = 0; i < trigrec->tgattr.dim1; i++)
 			{
-				char   *attname;
+				char	   *attname;
 
 				if (i > 0)
 					appendStringInfoString(&buf, ", ");
@@ -579,7 +579,7 @@ pg_get_triggerdef_worker(Oid trigid, bool pretty)
 	{
 		if (OidIsValid(trigrec->tgconstrrelid))
 			appendStringInfo(&buf, "FROM %s ",
-							 generate_relation_name(trigrec->tgconstrrelid, NIL));
+						generate_relation_name(trigrec->tgconstrrelid, NIL));
 		if (!trigrec->tgdeferrable)
 			appendStringInfo(&buf, "NOT ");
 		appendStringInfo(&buf, "DEFERRABLE INITIALLY ");
@@ -599,11 +599,11 @@ pg_get_triggerdef_worker(Oid trigid, bool pretty)
 						tgrel->rd_att, &isnull);
 	if (!isnull)
 	{
-		Node			   *qual;
-		deparse_context		context;
-		deparse_namespace	dpns;
-		RangeTblEntry	   *oldrte;
-		RangeTblEntry	   *newrte;
+		Node	   *qual;
+		deparse_context context;
+		deparse_namespace dpns;
+		RangeTblEntry *oldrte;
+		RangeTblEntry *newrte;
 
 		appendStringInfoString(&buf, "WHEN (");
 
@@ -848,7 +848,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno,
 							 quote_identifier(NameStr(idxrelrec->relname)),
 							 generate_relation_name(indrelid, NIL),
 							 quote_identifier(NameStr(amrec->amname)));
-		else					/* currently, must be EXCLUDE constraint */
+		else	/* currently, must be EXCLUDE constraint */
 			appendStringInfo(&buf, "EXCLUDE USING %s (",
 							 quote_identifier(NameStr(amrec->amname)));
 	}
@@ -1262,23 +1262,24 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
 				break;
 			}
 		case CONSTRAINT_TRIGGER:
+
 			/*
 			 * There isn't an ALTER TABLE syntax for creating a user-defined
-			 * constraint trigger, but it seems better to print something
-			 * than throw an error; if we throw error then this function
-			 * couldn't safely be applied to all rows of pg_constraint.
+			 * constraint trigger, but it seems better to print something than
+			 * throw an error; if we throw error then this function couldn't
+			 * safely be applied to all rows of pg_constraint.
 			 */
 			appendStringInfo(&buf, "TRIGGER");
 			break;
 		case CONSTRAINT_EXCLUSION:
 			{
-				Oid		 indexOid = conForm->conindid;
-				Datum	 val;
-				bool	 isnull;
-				Datum	*elems;
-				int		 nElems;
-				int		 i;
-				Oid		*operators;
+				Oid			indexOid = conForm->conindid;
+				Datum		val;
+				bool		isnull;
+				Datum	   *elems;
+				int			nElems;
+				int			i;
+				Oid		   *operators;
 
 				/* Extract operator OIDs from the pg_constraint tuple */
 				val = SysCacheGetAttr(CONSTROID, tup,
@@ -3497,10 +3498,10 @@ push_plan(deparse_namespace *dpns, Plan *subplan)
 	/*
 	 * We special-case Append to pretend that the first child plan is the
 	 * OUTER referent; we have to interpret OUTER Vars in the Append's tlist
-	 * according to one of the children, and the first one is the most
-	 * natural choice.  Likewise special-case ModifyTable to pretend that the
-	 * first child plan is the OUTER referent; this is to support RETURNING
-	 * lists containing references to non-target relations.
+	 * according to one of the children, and the first one is the most natural
+	 * choice.	Likewise special-case ModifyTable to pretend that the first
+	 * child plan is the OUTER referent; this is to support RETURNING lists
+	 * containing references to non-target relations.
 	 */
 	if (IsA(subplan, Append))
 		dpns->outer_plan = (Plan *) linitial(((Append *) subplan)->appendplans);
@@ -4470,10 +4471,10 @@ get_rule_expr(Node *node, deparse_context *context,
 
 				/*
 				 * If the argument is a CaseTestExpr, we must be inside a
-				 * FieldStore, ie, we are assigning to an element of an
-				 * array within a composite column.  Since we already punted
-				 * on displaying the FieldStore's target information, just
-				 * punt here too, and display only the assignment source
+				 * FieldStore, ie, we are assigning to an element of an array
+				 * within a composite column.  Since we already punted on
+				 * displaying the FieldStore's target information, just punt
+				 * here too, and display only the assignment source
 				 * expression.
 				 */
 				if (IsA(aref->refexpr, CaseTestExpr))
@@ -4498,23 +4499,23 @@ get_rule_expr(Node *node, deparse_context *context,
 					appendStringInfoChar(buf, ')');
 
 				/*
-				 * If there's a refassgnexpr, we want to print the node in
-				 * the format "array[subscripts] := refassgnexpr".  This is
-				 * not legal SQL, so decompilation of INSERT or UPDATE
-				 * statements should always use processIndirection as part
-				 * of the statement-level syntax.  We should only see this
-				 * when EXPLAIN tries to print the targetlist of a plan
-				 * resulting from such a statement.
+				 * If there's a refassgnexpr, we want to print the node in the
+				 * format "array[subscripts] := refassgnexpr".	This is not
+				 * legal SQL, so decompilation of INSERT or UPDATE statements
+				 * should always use processIndirection as part of the
+				 * statement-level syntax.	We should only see this when
+				 * EXPLAIN tries to print the targetlist of a plan resulting
+				 * from such a statement.
 				 */
 				if (aref->refassgnexpr)
 				{
-					Node   *refassgnexpr;
+					Node	   *refassgnexpr;
 
 					/*
-					 * Use processIndirection to print this node's
-					 * subscripts as well as any additional field selections
-					 * or subscripting in immediate descendants.  It returns
-					 * the RHS expr that is actually being "assigned".
+					 * Use processIndirection to print this node's subscripts
+					 * as well as any additional field selections or
+					 * subscripting in immediate descendants.  It returns the
+					 * RHS expr that is actually being "assigned".
 					 */
 					refassgnexpr = processIndirection(node, context, true);
 					appendStringInfoString(buf, " := ");
@@ -4724,14 +4725,14 @@ get_rule_expr(Node *node, deparse_context *context,
 				 * There is no good way to represent a FieldStore as real SQL,
 				 * so decompilation of INSERT or UPDATE statements should
 				 * always use processIndirection as part of the
-				 * statement-level syntax.  We should only get here when
+				 * statement-level syntax.	We should only get here when
 				 * EXPLAIN tries to print the targetlist of a plan resulting
 				 * from such a statement.  The plan case is even harder than
 				 * ordinary rules would be, because the planner tries to
 				 * collapse multiple assignments to the same field or subfield
 				 * into one FieldStore; so we can see a list of target fields
 				 * not just one, and the arguments could be FieldStores
-				 * themselves.  We don't bother to try to print the target
+				 * themselves.	We don't bother to try to print the target
 				 * field names; we just print the source arguments, with a
 				 * ROW() around them if there's more than one.  This isn't
 				 * terribly complete, but it's probably good enough for
@@ -5474,7 +5475,7 @@ get_func_expr(FuncExpr *expr, deparse_context *context,
 	argnames = NIL;
 	foreach(l, expr->args)
 	{
-		Node   *arg = (Node *) lfirst(l);
+		Node	   *arg = (Node *) lfirst(l);
 
 		if (IsA(arg, NamedArgExpr))
 			argnames = lappend(argnames, ((NamedArgExpr *) arg)->name);
@@ -5506,7 +5507,7 @@ get_agg_expr(Aggref *aggref, deparse_context *context)
 {
 	StringInfo	buf = context->buf;
 	Oid			argtypes[FUNC_MAX_ARGS];
-	List       *arglist;
+	List	   *arglist;
 	int			nargs;
 	ListCell   *l;
 
@@ -5516,12 +5517,12 @@ get_agg_expr(Aggref *aggref, deparse_context *context)
 	foreach(l, aggref->args)
 	{
 		TargetEntry *tle = (TargetEntry *) lfirst(l);
-		Node   *arg = (Node *) tle->expr;
+		Node	   *arg = (Node *) tle->expr;
 
 		Assert(!IsA(arg, NamedArgExpr));
 		if (tle->resjunk)
 			continue;
-		if (nargs >= FUNC_MAX_ARGS)				/* paranoia */
+		if (nargs >= FUNC_MAX_ARGS)		/* paranoia */
 			ereport(ERROR,
 					(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
 					 errmsg("too many arguments")));
@@ -5565,7 +5566,7 @@ get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context)
 	nargs = 0;
 	foreach(l, wfunc->args)
 	{
-		Node   *arg = (Node *) lfirst(l);
+		Node	   *arg = (Node *) lfirst(l);
 
 		Assert(!IsA(arg, NamedArgExpr));
 		argtypes[nargs] = exprType(arg);
@@ -6368,8 +6369,8 @@ processIndirection(Node *node, deparse_context *context, bool printit)
 					 format_type_be(fstore->resulttype));
 
 			/*
-			 * Print the field name.  There should only be one target field
-			 * in stored rules.  There could be more than that in executable
+			 * Print the field name.  There should only be one target field in
+			 * stored rules.  There could be more than that in executable
 			 * target lists, but this function cannot be used for that case.
 			 */
 			Assert(list_length(fstore->fieldnums) == 1);
@@ -6598,7 +6599,7 @@ generate_relation_name(Oid relid, List *namespaces)
  * generate_function_name
  *		Compute the name to display for a function specified by OID,
  *		given that it is being called with the specified actual arg names and
- *		types.  (Those matter because of ambiguous-function resolution rules.)
+ *		types.	(Those matter because of ambiguous-function resolution rules.)
  *
  * The result includes all necessary quoting and schema-prefixing.	We can
  * also pass back an indication of whether the function is variadic.
@@ -6628,7 +6629,7 @@ generate_function_name(Oid funcid, int nargs, List *argnames,
 	/*
 	 * The idea here is to schema-qualify only if the parser would fail to
 	 * resolve the correct function given the unqualified func name with the
-	 * specified argtypes.  If the function is variadic, we should presume
+	 * specified argtypes.	If the function is variadic, we should presume
 	 * that VARIADIC will be included in the call.
 	 */
 	p_result = func_get_detail(list_make1(makeString(proname)),
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index b83cd45d7c7545dca84fd3a0b6716f474166a2bc..5925a91373989c16e7c0bade2028f7bb003ac7d0 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -15,7 +15,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.269 2010/02/14 18:42:16 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.270 2010/02/26 02:01:10 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -167,9 +167,9 @@ static double convert_timevalue_to_scalar(Datum value, Oid typid);
 static bool get_variable_range(PlannerInfo *root, VariableStatData *vardata,
 				   Oid sortop, Datum *min, Datum *max);
 static bool get_actual_variable_range(PlannerInfo *root,
-									  VariableStatData *vardata,
-									  Oid sortop,
-									  Datum *min, Datum *max);
+						  VariableStatData *vardata,
+						  Oid sortop,
+						  Datum *min, Datum *max);
 static Selectivity prefix_selectivity(PlannerInfo *root,
 				   VariableStatData *vardata,
 				   Oid vartype, Oid opfamily, Const *prefixcon);
@@ -749,13 +749,13 @@ ineq_histogram_selectivity(PlannerInfo *root,
 			 * results ... but probably not any more garbage-y than you would
 			 * from the old linear search.)
 			 *
-			 * If the binary search accesses the first or last histogram entry,
-			 * we try to replace that endpoint with the true column min or max
-			 * as found by get_actual_variable_range().  This ameliorates
-			 * misestimates when the min or max is moving as a result of
-			 * changes since the last ANALYZE.  Note that this could result
-			 * in effectively including MCVs into the histogram that weren't
-			 * there before, but we don't try to correct for that.
+			 * If the binary search accesses the first or last histogram
+			 * entry, we try to replace that endpoint with the true column min
+			 * or max as found by get_actual_variable_range().	This
+			 * ameliorates misestimates when the min or max is moving as a
+			 * result of changes since the last ANALYZE.  Note that this could
+			 * result in effectively including MCVs into the histogram that
+			 * weren't there before, but we don't try to correct for that.
 			 */
 			double		histfrac;
 			int			lobound = 0;	/* first possible slot to search */
@@ -3727,8 +3727,7 @@ convert_string_datum(Datum value, Oid typid)
 		/*
 		 *
 		 * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?
-		 * FeedbackID=99694
-		 */
+		 * FeedbackID=99694 */
 		{
 			char		x[1];
 
@@ -4118,8 +4117,8 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
 		else if (rte->rtekind == RTE_RELATION)
 		{
 			vardata->statsTuple = SearchSysCache3(STATRELATTINH,
-												  ObjectIdGetDatum(rte->relid),
-												  Int16GetDatum(var->varattno),
+												ObjectIdGetDatum(rte->relid),
+												Int16GetDatum(var->varattno),
 												  BoolGetDatum(rte->inh));
 			vardata->freefunc = ReleaseSysCache;
 		}
@@ -4259,8 +4258,8 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
 							vardata->statsTuple =
 								SearchSysCache3(STATRELATTINH,
 										   ObjectIdGetDatum(index->indexoid),
-											    Int16GetDatum(pos + 1),
-											    BoolGetDatum(false));
+												Int16GetDatum(pos + 1),
+												BoolGetDatum(false));
 							vardata->freefunc = ReleaseSysCache;
 						}
 						if (vardata->statsTuple)
@@ -4407,11 +4406,11 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
 	int			i;
 
 	/*
-	 * XXX It's very tempting to try to use the actual column min and max,
-	 * if we can get them relatively-cheaply with an index probe.  However,
-	 * since this function is called many times during join planning,
-	 * that could have unpleasant effects on planning speed.  Need more
-	 * investigation before enabling this.
+	 * XXX It's very tempting to try to use the actual column min and max, if
+	 * we can get them relatively-cheaply with an index probe.	However, since
+	 * this function is called many times during join planning, that could
+	 * have unpleasant effects on planning speed.  Need more investigation
+	 * before enabling this.
 	 */
 #ifdef NOT_USED
 	if (get_actual_variable_range(root, vardata, sortop, min, max))
@@ -4550,8 +4549,8 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
 			continue;
 
 		/*
-		 * Ignore partial indexes --- we only want stats that cover the
-		 * entire relation.
+		 * Ignore partial indexes --- we only want stats that cover the entire
+		 * relation.
 		 */
 		if (index->indpred != NIL)
 			continue;
@@ -4577,8 +4576,8 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
 			continue;
 
 		/*
-		 * Found a suitable index to extract data from.  We'll need an
-		 * EState and a bunch of other infrastructure.
+		 * Found a suitable index to extract data from.  We'll need an EState
+		 * and a bunch of other infrastructure.
 		 */
 		{
 			EState	   *estate;
@@ -4622,7 +4621,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
 			/* set up an IS NOT NULL scan key so that we ignore nulls */
 			ScanKeyEntryInitialize(&scankeys[0],
 								   SK_ISNULL | SK_SEARCHNOTNULL,
-								   1,			/* index col to scan */
+								   1,	/* index col to scan */
 								   InvalidStrategy,		/* no strategy */
 								   InvalidOid,	/* no strategy subtype */
 								   InvalidOid,	/* no reg proc for this */
@@ -4641,7 +4640,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
 										 indexscandir)) != NULL)
 				{
 					/* Extract the index column values from the heap tuple */
-					ExecStoreTuple(tup,	slot, InvalidBuffer, false);
+					ExecStoreTuple(tup, slot, InvalidBuffer, false);
 					FormIndexDatum(indexInfo, slot, estate,
 								   values, isnull);
 
@@ -4672,7 +4671,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
 										 -indexscandir)) != NULL)
 				{
 					/* Extract the index column values from the heap tuple */
-					ExecStoreTuple(tup,	slot, InvalidBuffer, false);
+					ExecStoreTuple(tup, slot, InvalidBuffer, false);
 					FormIndexDatum(indexInfo, slot, estate,
 								   values, isnull);
 
@@ -4872,8 +4871,8 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
 
 	/*
 	 * Check for ARE director prefix.  It's worth our trouble to recognize
-	 * this because similar_escape() used to use it, and some other code
-	 * might still use it, to force ARE mode.
+	 * this because similar_escape() used to use it, and some other code might
+	 * still use it, to force ARE mode.
 	 */
 	pos = 0;
 	if (strncmp(patt, "***:", 4) == 0)
@@ -5808,7 +5807,7 @@ genericcostestimate(PlannerInfo *root,
 		 * since that's internal to the indexscan.)
 		 */
 		*indexTotalCost = (pages_fetched * spc_random_page_cost)
-							/ num_outer_scans;
+			/ num_outer_scans;
 	}
 	else
 	{
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 5289a2acfa1530dddb3ed7d813f69293acab0742..a0c5a6ab66a617cd756e22837527f8454077d11d 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.205 2010/01/02 16:57:55 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.206 2010/02/26 02:01:10 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -2780,11 +2780,11 @@ interval_mi(PG_FUNCTION_ARGS)
 }
 
 /*
- *  There is no interval_abs():  it is unclear what value to return:
- *    http://archives.postgresql.org/pgsql-general/2009-10/msg01031.php
- *    http://archives.postgresql.org/pgsql-general/2009-11/msg00041.php
+ *	There is no interval_abs():  it is unclear what value to return:
+ *	  http://archives.postgresql.org/pgsql-general/2009-10/msg01031.php
+ *	  http://archives.postgresql.org/pgsql-general/2009-11/msg00041.php
  */
- 
+
 Datum
 interval_mul(PG_FUNCTION_ARGS)
 {
diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c
index 31c78182017824973ab54d23514338083e9f12c9..db7ecd14b26ce965fea629016f924d8603e03dca 100644
--- a/src/backend/utils/adt/txid.c
+++ b/src/backend/utils/adt/txid.c
@@ -14,7 +14,7 @@
  *	Author: Jan Wieck, Afilias USA INC.
  *	64-bit txids: Marko Kreen, Skype Technologies
  *
- *	$PostgreSQL: pgsql/src/backend/utils/adt/txid.c,v 1.12 2010/02/20 21:24:02 tgl Exp $
+ *	$PostgreSQL: pgsql/src/backend/utils/adt/txid.c,v 1.13 2010/02/26 02:01:10 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -330,11 +330,10 @@ txid_current(PG_FUNCTION_ARGS)
 	TxidEpoch	state;
 
 	/*
-	 * Must prevent during recovery because if an xid is
-	 * not assigned we try to assign one, which would fail.
-	 * Programs already rely on this function to always
-	 * return a valid current xid, so we should not change
-	 * this to return NULL or similar invalid xid.
+	 * Must prevent during recovery because if an xid is not assigned we try
+	 * to assign one, which would fail. Programs already rely on this function
+	 * to always return a valid current xid, so we should not change this to
+	 * return NULL or similar invalid xid.
 	 */
 	PreventCommandDuringRecovery("txid_current()");
 
diff --git a/src/backend/utils/adt/varbit.c b/src/backend/utils/adt/varbit.c
index 4a550cdae2980a3ede53b08eb322b743ee6f1d93..d0cd0eba9064ea21d6690d519d5d72d1eb94bae7 100644
--- a/src/backend/utils/adt/varbit.c
+++ b/src/backend/utils/adt/varbit.c
@@ -9,7 +9,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.64 2010/01/25 20:55:32 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.65 2010/02/26 02:01:10 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -25,7 +25,7 @@
 
 static VarBit *bit_catenate(VarBit *arg1, VarBit *arg2);
 static VarBit *bitsubstring(VarBit *arg, int32 s, int32 l,
-							bool length_not_specified);
+			 bool length_not_specified);
 static VarBit *bit_overlay(VarBit *t1, VarBit *t2, int sp, int sl);
 
 
@@ -980,9 +980,10 @@ bitsubstring(VarBit *arg, int32 s, int32 l, bool length_not_specified)
 	else
 	{
 		e = s + l;
+
 		/*
-		 * A negative value for L is the only way for the end position
-		 * to be before the start. SQL99 says to throw an error.
+		 * A negative value for L is the only way for the end position to be
+		 * before the start. SQL99 says to throw an error.
 		 */
 		if (e < s)
 			ereport(ERROR,
@@ -1055,8 +1056,8 @@ bitoverlay(PG_FUNCTION_ARGS)
 {
 	VarBit	   *t1 = PG_GETARG_VARBIT_P(0);
 	VarBit	   *t2 = PG_GETARG_VARBIT_P(1);
-	int			sp = PG_GETARG_INT32(2); /* substring start position */
-	int			sl = PG_GETARG_INT32(3); /* substring length */
+	int			sp = PG_GETARG_INT32(2);		/* substring start position */
+	int			sl = PG_GETARG_INT32(3);		/* substring length */
 
 	PG_RETURN_VARBIT_P(bit_overlay(t1, t2, sp, sl));
 }
@@ -1066,10 +1067,10 @@ bitoverlay_no_len(PG_FUNCTION_ARGS)
 {
 	VarBit	   *t1 = PG_GETARG_VARBIT_P(0);
 	VarBit	   *t2 = PG_GETARG_VARBIT_P(1);
-	int			sp = PG_GETARG_INT32(2); /* substring start position */
+	int			sp = PG_GETARG_INT32(2);		/* substring start position */
 	int			sl;
 
-	sl = VARBITLEN(t2);				/* defaults to length(t2) */
+	sl = VARBITLEN(t2);			/* defaults to length(t2) */
 	PG_RETURN_VARBIT_P(bit_overlay(t1, t2, sp, sl));
 }
 
@@ -1082,9 +1083,9 @@ bit_overlay(VarBit *t1, VarBit *t2, int sp, int sl)
 	int			sp_pl_sl;
 
 	/*
-	 * Check for possible integer-overflow cases.  For negative sp,
-	 * throw a "substring length" error because that's what should be
-	 * expected according to the spec's definition of OVERLAY().
+	 * Check for possible integer-overflow cases.  For negative sp, throw a
+	 * "substring length" error because that's what should be expected
+	 * according to the spec's definition of OVERLAY().
 	 */
 	if (sp <= 0)
 		ereport(ERROR,
@@ -1096,7 +1097,7 @@ bit_overlay(VarBit *t1, VarBit *t2, int sp, int sl)
 				(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
 				 errmsg("integer out of range")));
 
-	s1 = bitsubstring(t1, 1, sp-1, false);
+	s1 = bitsubstring(t1, 1, sp - 1, false);
 	s2 = bitsubstring(t1, sp_pl_sl, -1, true);
 	result = bit_catenate(s1, t2);
 	result = bit_catenate(result, s2);
@@ -1446,7 +1447,7 @@ bitfromint4(PG_FUNCTION_ARGS)
 	/* store first fractional byte */
 	if (destbitsleft > srcbitsleft)
 	{
-		int		val = (int) (a >> (destbitsleft - 8));
+		int			val = (int) (a >> (destbitsleft - 8));
 
 		/* Force sign-fill in case the compiler implements >> as zero-fill */
 		if (a < 0)
@@ -1526,7 +1527,7 @@ bitfromint8(PG_FUNCTION_ARGS)
 	/* store first fractional byte */
 	if (destbitsleft > srcbitsleft)
 	{
-		int		val = (int) (a >> (destbitsleft - 8));
+		int			val = (int) (a >> (destbitsleft - 8));
 
 		/* Force sign-fill in case the compiler implements >> as zero-fill */
 		if (a < 0)
@@ -1708,6 +1709,7 @@ bitsetbit(PG_FUNCTION_ARGS)
 				(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
 				 errmsg("bit index %d out of valid range (0..%d)",
 						n, bitlen - 1)));
+
 	/*
 	 * sanity check!
 	 */
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 652e1e6add40985c173ec882e1710da05d6b885f..be41c977ffb865cc55a8295f7ce269c28a036ed6 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.176 2010/02/08 20:39:51 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.177 2010/02/26 02:01:10 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -30,7 +30,7 @@
 
 
 /* GUC variable */
-int		bytea_output = BYTEA_OUTPUT_HEX;
+int			bytea_output = BYTEA_OUTPUT_HEX;
 
 typedef struct varlena unknown;
 
@@ -205,12 +205,12 @@ byteain(PG_FUNCTION_ARGS)
 	/* Recognize hex input */
 	if (inputText[0] == '\\' && inputText[1] == 'x')
 	{
-		size_t len = strlen(inputText);
+		size_t		len = strlen(inputText);
 
-		bc = (len - 2)/2 + VARHDRSZ;		/* maximum possible length */
+		bc = (len - 2) / 2 + VARHDRSZ;	/* maximum possible length */
 		result = palloc(bc);
 		bc = hex_decode(inputText + 2, len - 2, VARDATA(result));
-		SET_VARSIZE(result, bc + VARHDRSZ);	/* actual length */
+		SET_VARSIZE(result, bc + VARHDRSZ);		/* actual length */
 
 		PG_RETURN_BYTEA_P(result);
 	}
@@ -306,47 +306,47 @@ byteaout(PG_FUNCTION_ARGS)
 	}
 	else if (bytea_output == BYTEA_OUTPUT_ESCAPE)
 	{
-	/* Print traditional escaped format */
-	char	   *vp;
-	int			len;
-	int			i;
+		/* Print traditional escaped format */
+		char	   *vp;
+		int			len;
+		int			i;
 
-	len = 1;					/* empty string has 1 char */
-	vp = VARDATA_ANY(vlena);
-	for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++)
-	{
-		if (*vp == '\\')
-			len += 2;
-		else if ((unsigned char) *vp < 0x20 || (unsigned char) *vp > 0x7e)
-			len += 4;
-		else
-			len++;
-	}
-	rp = result = (char *) palloc(len);
-	vp = VARDATA_ANY(vlena);
-	for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++)
-	{
-		if (*vp == '\\')
+		len = 1;				/* empty string has 1 char */
+		vp = VARDATA_ANY(vlena);
+		for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++)
 		{
-			*rp++ = '\\';
-			*rp++ = '\\';
+			if (*vp == '\\')
+				len += 2;
+			else if ((unsigned char) *vp < 0x20 || (unsigned char) *vp > 0x7e)
+				len += 4;
+			else
+				len++;
 		}
-		else if ((unsigned char) *vp < 0x20 || (unsigned char) *vp > 0x7e)
+		rp = result = (char *) palloc(len);
+		vp = VARDATA_ANY(vlena);
+		for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++)
 		{
-			int			val;			/* holds unprintable chars */
-
-			val = *vp;
-			rp[0] = '\\';
-			rp[3] = DIG(val & 07);
-			val >>= 3;
-			rp[2] = DIG(val & 07);
-			val >>= 3;
-			rp[1] = DIG(val & 03);
-			rp += 4;
+			if (*vp == '\\')
+			{
+				*rp++ = '\\';
+				*rp++ = '\\';
+			}
+			else if ((unsigned char) *vp < 0x20 || (unsigned char) *vp > 0x7e)
+			{
+				int			val;	/* holds unprintable chars */
+
+				val = *vp;
+				rp[0] = '\\';
+				rp[3] = DIG(val & 07);
+				val >>= 3;
+				rp[2] = DIG(val & 07);
+				val >>= 3;
+				rp[1] = DIG(val & 03);
+				rp += 4;
+			}
+			else
+				*rp++ = *vp;
 		}
-		else
-			*rp++ = *vp;
-	}
 	}
 	else
 	{
@@ -900,8 +900,8 @@ textoverlay(PG_FUNCTION_ARGS)
 {
 	text	   *t1 = PG_GETARG_TEXT_PP(0);
 	text	   *t2 = PG_GETARG_TEXT_PP(1);
-	int			sp = PG_GETARG_INT32(2); /* substring start position */
-	int			sl = PG_GETARG_INT32(3); /* substring length */
+	int			sp = PG_GETARG_INT32(2);		/* substring start position */
+	int			sl = PG_GETARG_INT32(3);		/* substring length */
 
 	PG_RETURN_TEXT_P(text_overlay(t1, t2, sp, sl));
 }
@@ -911,10 +911,10 @@ textoverlay_no_len(PG_FUNCTION_ARGS)
 {
 	text	   *t1 = PG_GETARG_TEXT_PP(0);
 	text	   *t2 = PG_GETARG_TEXT_PP(1);
-	int			sp = PG_GETARG_INT32(2); /* substring start position */
+	int			sp = PG_GETARG_INT32(2);		/* substring start position */
 	int			sl;
 
-	sl = text_length(PointerGetDatum(t2)); /* defaults to length(t2) */
+	sl = text_length(PointerGetDatum(t2));		/* defaults to length(t2) */
 	PG_RETURN_TEXT_P(text_overlay(t1, t2, sp, sl));
 }
 
@@ -927,9 +927,9 @@ text_overlay(text *t1, text *t2, int sp, int sl)
 	int			sp_pl_sl;
 
 	/*
-	 * Check for possible integer-overflow cases.  For negative sp,
-	 * throw a "substring length" error because that's what should be
-	 * expected according to the spec's definition of OVERLAY().
+	 * Check for possible integer-overflow cases.  For negative sp, throw a
+	 * "substring length" error because that's what should be expected
+	 * according to the spec's definition of OVERLAY().
 	 */
 	if (sp <= 0)
 		ereport(ERROR,
@@ -941,7 +941,7 @@ text_overlay(text *t1, text *t2, int sp, int sl)
 				(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
 				 errmsg("integer out of range")));
 
-	s1 = text_substring(PointerGetDatum(t1), 1, sp-1, false);
+	s1 = text_substring(PointerGetDatum(t1), 1, sp - 1, false);
 	s2 = text_substring(PointerGetDatum(t1), sp_pl_sl, -1, true);
 	result = text_catenate(s1, t2);
 	result = text_catenate(result, s2);
@@ -1823,8 +1823,8 @@ bytea_substring(Datum str,
 	if (length_not_specified)
 	{
 		/*
-		 * Not passed a length - DatumGetByteaPSlice() grabs everything to
-		 * the end of the string if we pass it a negative value for length.
+		 * Not passed a length - DatumGetByteaPSlice() grabs everything to the
+		 * end of the string if we pass it a negative value for length.
 		 */
 		L1 = -1;
 	}
@@ -1855,8 +1855,8 @@ bytea_substring(Datum str,
 
 	/*
 	 * If the start position is past the end of the string, SQL99 says to
-	 * return a zero-length string -- DatumGetByteaPSlice() will do that
-	 * for us. Convert to zero-based starting position
+	 * return a zero-length string -- DatumGetByteaPSlice() will do that for
+	 * us. Convert to zero-based starting position
 	 */
 	return DatumGetByteaPSlice(str, S1 - 1, L1);
 }
@@ -1873,8 +1873,8 @@ byteaoverlay(PG_FUNCTION_ARGS)
 {
 	bytea	   *t1 = PG_GETARG_BYTEA_PP(0);
 	bytea	   *t2 = PG_GETARG_BYTEA_PP(1);
-	int			sp = PG_GETARG_INT32(2); /* substring start position */
-	int			sl = PG_GETARG_INT32(3); /* substring length */
+	int			sp = PG_GETARG_INT32(2);		/* substring start position */
+	int			sl = PG_GETARG_INT32(3);		/* substring length */
 
 	PG_RETURN_BYTEA_P(bytea_overlay(t1, t2, sp, sl));
 }
@@ -1884,10 +1884,10 @@ byteaoverlay_no_len(PG_FUNCTION_ARGS)
 {
 	bytea	   *t1 = PG_GETARG_BYTEA_PP(0);
 	bytea	   *t2 = PG_GETARG_BYTEA_PP(1);
-	int			sp = PG_GETARG_INT32(2); /* substring start position */
+	int			sp = PG_GETARG_INT32(2);		/* substring start position */
 	int			sl;
 
-	sl = VARSIZE_ANY_EXHDR(t2);			/* defaults to length(t2) */
+	sl = VARSIZE_ANY_EXHDR(t2); /* defaults to length(t2) */
 	PG_RETURN_BYTEA_P(bytea_overlay(t1, t2, sp, sl));
 }
 
@@ -1900,9 +1900,9 @@ bytea_overlay(bytea *t1, bytea *t2, int sp, int sl)
 	int			sp_pl_sl;
 
 	/*
-	 * Check for possible integer-overflow cases.  For negative sp,
-	 * throw a "substring length" error because that's what should be
-	 * expected according to the spec's definition of OVERLAY().
+	 * Check for possible integer-overflow cases.  For negative sp, throw a
+	 * "substring length" error because that's what should be expected
+	 * according to the spec's definition of OVERLAY().
 	 */
 	if (sp <= 0)
 		ereport(ERROR,
@@ -1914,7 +1914,7 @@ bytea_overlay(bytea *t1, bytea *t2, int sp, int sl)
 				(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
 				 errmsg("integer out of range")));
 
-	s1 = bytea_substring(PointerGetDatum(t1), 1, sp-1, false);
+	s1 = bytea_substring(PointerGetDatum(t1), 1, sp - 1, false);
 	s2 = bytea_substring(PointerGetDatum(t1), sp_pl_sl, -1, true);
 	result = bytea_catenate(s1, t2);
 	result = bytea_catenate(result, s2);
@@ -3331,9 +3331,9 @@ pg_column_size(PG_FUNCTION_ARGS)
 static StringInfo
 makeStringAggState(FunctionCallInfo fcinfo)
 {
-	StringInfo		state;
-	MemoryContext	aggcontext;
-	MemoryContext	oldcontext;
+	StringInfo	state;
+	MemoryContext aggcontext;
+	MemoryContext oldcontext;
 
 	if (!AggCheckCallContext(fcinfo, &aggcontext))
 	{
@@ -3355,7 +3355,7 @@ makeStringAggState(FunctionCallInfo fcinfo)
 Datum
 string_agg_transfn(PG_FUNCTION_ARGS)
 {
-	StringInfo		state;
+	StringInfo	state;
 
 	state = PG_ARGISNULL(0) ? NULL : (StringInfo) PG_GETARG_POINTER(0);
 
@@ -3364,20 +3364,20 @@ string_agg_transfn(PG_FUNCTION_ARGS)
 	{
 		if (state == NULL)
 			state = makeStringAggState(fcinfo);
-		appendStringInfoText(state, PG_GETARG_TEXT_PP(1));	/* value */
+		appendStringInfoText(state, PG_GETARG_TEXT_PP(1));		/* value */
 	}
 
 	/*
-	 * The transition type for string_agg() is declared to be "internal", which
-	 * is a pass-by-value type the same size as a pointer.	
+	 * The transition type for string_agg() is declared to be "internal",
+	 * which is a pass-by-value type the same size as a pointer.
 	 */
 	PG_RETURN_POINTER(state);
 }
 
-Datum 
+Datum
 string_agg_delim_transfn(PG_FUNCTION_ARGS)
 {
-	StringInfo		state;
+	StringInfo	state;
 
 	state = PG_ARGISNULL(0) ? NULL : (StringInfo) PG_GETARG_POINTER(0);
 
@@ -3390,12 +3390,12 @@ string_agg_delim_transfn(PG_FUNCTION_ARGS)
 		else if (!PG_ARGISNULL(2))
 			appendStringInfoText(state, PG_GETARG_TEXT_PP(2));	/* delimiter */
 
-		appendStringInfoText(state, PG_GETARG_TEXT_PP(1));	/* value */
+		appendStringInfoText(state, PG_GETARG_TEXT_PP(1));		/* value */
 	}
 
 	/*
-	 * The transition type for string_agg() is declared to be "internal", which
-	 * is a pass-by-value type the same size as a pointer.
+	 * The transition type for string_agg() is declared to be "internal",
+	 * which is a pass-by-value type the same size as a pointer.
 	 */
 	PG_RETURN_POINTER(state);
 }
@@ -3403,7 +3403,7 @@ string_agg_delim_transfn(PG_FUNCTION_ARGS)
 Datum
 string_agg_finalfn(PG_FUNCTION_ARGS)
 {
-	StringInfo		state;
+	StringInfo	state;
 
 	/* cannot be called directly because of internal-type argument */
 	Assert(AggCheckCallContext(fcinfo, NULL));
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index e5f4dfcbbd412218a49196fc354c34cfbf457e33..335688606be7710412fe21d257bfab261a5587a8 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -10,7 +10,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/cache/attoptcache.c,v 1.2 2010/02/14 18:42:17 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/cache/attoptcache.c,v 1.3 2010/02/26 02:01:11 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -48,7 +48,7 @@ typedef struct
  *		Flush all cache entries when pg_attribute is updated.
  *
  * When pg_attribute is updated, we must flush the cache entry at least
- * for that attribute.  Currently, we just flush them all.  Since attribute
+ * for that attribute.	Currently, we just flush them all.	Since attribute
  * options are not currently used in performance-critical paths (such as
  * query execution), this seems OK.
  */
@@ -78,7 +78,7 @@ InvalidateAttoptCacheCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
 static void
 InitializeAttoptCache(void)
 {
-	HASHCTL ctl;
+	HASHCTL		ctl;
 
 	/* Initialize the hash table. */
 	MemSet(&ctl, 0, sizeof(ctl));
@@ -87,7 +87,7 @@ InitializeAttoptCache(void)
 	ctl.hash = tag_hash;
 	AttoptCacheHash =
 		hash_create("Attopt cache", 256, &ctl,
-				    HASH_ELEM | HASH_FUNCTION);
+					HASH_ELEM | HASH_FUNCTION);
 
 	/* Make sure we've initialized CacheMemoryContext. */
 	if (!CacheMemoryContext)
@@ -108,18 +108,19 @@ get_attribute_options(Oid attrelid, int attnum)
 {
 	AttoptCacheKey key;
 	AttoptCacheEntry *attopt;
-	AttributeOpts  *result;
+	AttributeOpts *result;
 	HeapTuple	tp;
 
 	/* Find existing cache entry, if any. */
 	if (!AttoptCacheHash)
 		InitializeAttoptCache();
-	memset(&key, 0, sizeof(key));	/* make sure any padding bits are unset */
+	memset(&key, 0, sizeof(key));		/* make sure any padding bits are
+										 * unset */
 	key.attrelid = attrelid;
 	key.attnum = attnum;
 	attopt =
 		(AttoptCacheEntry *) hash_search(AttoptCacheHash,
-								         (void *) &key,
+										 (void *) &key,
 										 HASH_FIND,
 										 NULL);
 
@@ -141,8 +142,8 @@ get_attribute_options(Oid attrelid, int attnum)
 			opts = NULL;
 		else
 		{
-			Datum	datum;
-			bool	isNull;
+			Datum		datum;
+			bool		isNull;
 
 			datum = SysCacheGetAttr(ATTNUM,
 									tp,
@@ -152,7 +153,8 @@ get_attribute_options(Oid attrelid, int attnum)
 				opts = NULL;
 			else
 			{
-				bytea *bytea_opts = attribute_reloptions(datum, false);
+				bytea	   *bytea_opts = attribute_reloptions(datum, false);
+
 				opts = MemoryContextAlloc(CacheMemoryContext,
 										  VARSIZE(bytea_opts));
 				memcpy(opts, bytea_opts, VARSIZE(bytea_opts));
@@ -161,13 +163,13 @@ get_attribute_options(Oid attrelid, int attnum)
 		}
 
 		/*
-		 * It's important to create the actual cache entry only after
-		 * reading pg_attribute, since the read could cause a cache flush.
+		 * It's important to create the actual cache entry only after reading
+		 * pg_attribute, since the read could cause a cache flush.
 		 */
 		attopt = (AttoptCacheEntry *) hash_search(AttoptCacheHash,
-												   (void *) &key,
-												   HASH_ENTER,
-												   NULL);
+												  (void *) &key,
+												  HASH_ENTER,
+												  NULL);
 		attopt->opts = opts;
 	}
 
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 2f1aefcc34e0546e3428306e934c2c01904dfea5..7a67f4a85e8174c9c748b13b9c11aa3a6a446ce8 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -80,7 +80,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.97 2010/02/14 18:42:17 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.98 2010/02/26 02:01:11 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -158,8 +158,8 @@ typedef struct TransInvalidationInfo
 static TransInvalidationInfo *transInvalInfo = NULL;
 
 static SharedInvalidationMessage *SharedInvalidMessagesArray;
-static int 					numSharedInvalidMessagesArray;
-static int 					maxSharedInvalidMessagesArray;
+static int	numSharedInvalidMessagesArray;
+static int	maxSharedInvalidMessagesArray;
 
 
 /*
@@ -775,7 +775,7 @@ MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
 		 * We're so close to EOXact that we now we're going to lose it anyhow.
 		 */
 		SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray
-											* sizeof(SharedInvalidationMessage));
+										* sizeof(SharedInvalidationMessage));
 	}
 
 	if ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray)
@@ -784,15 +784,15 @@ MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
 			maxSharedInvalidMessagesArray *= 2;
 
 		SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray,
-											maxSharedInvalidMessagesArray
-											* sizeof(SharedInvalidationMessage));
+											  maxSharedInvalidMessagesArray
+										* sizeof(SharedInvalidationMessage));
 	}
 
 	/*
 	 * Append the next chunk onto the array
 	 */
 	memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray,
-			msgs, n * sizeof(SharedInvalidationMessage));
+		   msgs, n * sizeof(SharedInvalidationMessage));
 	numSharedInvalidMessagesArray += n;
 }
 
@@ -820,18 +820,18 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
 
 	/*
 	 * Relcache init file invalidation requires processing both before and
-	 * after we send the SI messages.  However, we need not do anything
-	 * unless we committed.
+	 * after we send the SI messages.  However, we need not do anything unless
+	 * we committed.
 	 */
 	*RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
 
 	/*
-	 * Walk through TransInvalidationInfo to collect all the messages
-	 * into a single contiguous array of invalidation messages. It must
-	 * be contiguous so we can copy directly into WAL message. Maintain the
-	 * order that they would be processed in by AtEOXact_Inval(), to ensure
-	 * emulated behaviour in redo is as similar as possible to original.
-	 * We want the same bugs, if any, not new ones.
+	 * Walk through TransInvalidationInfo to collect all the messages into a
+	 * single contiguous array of invalidation messages. It must be contiguous
+	 * so we can copy directly into WAL message. Maintain the order that they
+	 * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour
+	 * in redo is as similar as possible to original. We want the same bugs,
+	 * if any, not new ones.
 	 */
 	oldcontext = MemoryContextSwitchTo(CurTransactionContext);
 
@@ -877,7 +877,7 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
 		return;
 
 	elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
-					(RelcacheInitFileInval ? " and relcache file invalidation" : ""));
+		 (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
 
 	if (RelcacheInitFileInval)
 		RecoveryRelationCacheInitFileInvalidate(dbid, tsid, true);
@@ -1149,7 +1149,7 @@ CacheInvalidateRelcacheByRelid(Oid relid)
  *
  * Sending this type of invalidation msg forces other backends to close open
  * smgr entries for the rel.  This should be done to flush dangling open-file
- * references when the physical rel is being dropped or truncated.  Because
+ * references when the physical rel is being dropped or truncated.	Because
  * these are nontransactional (i.e., not-rollback-able) operations, we just
  * send the inval message immediately without any queuing.
  *
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 4769f6f35eb37b2b5d6251920f465b0f4ee67fa1..63dde8f9cb9392ea108d820987b80b47249e18e6 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.167 2010/02/14 18:42:17 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.168 2010/02/26 02:01:11 momjian Exp $
  *
  * NOTES
  *	  Eventually, the index information should go through here, too.
@@ -622,7 +622,7 @@ get_op_btree_interpretation(Oid opno, List **opfamilies, List **opstrats)
 		{
 			op_negated = true;
 			ReleaseSysCacheList(catlist);
-			catlist = SearchSysCacheList1(AMOPOPID, 
+			catlist = SearchSysCacheList1(AMOPOPID,
 										  ObjectIdGetDatum(op_negator));
 		}
 	}
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 114cd9b9756c2e9282d244b89948a395e50b9d0b..95f010f682d511d2e94e62528733a044ed6a466e 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -35,7 +35,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/cache/plancache.c,v 1.34 2010/01/15 22:36:34 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/cache/plancache.c,v 1.35 2010/02/26 02:01:11 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -284,7 +284,7 @@ FastCreateCachedPlan(Node *raw_parse_tree,
  * CachedPlanSetParserHook: set up to use parser callback hooks
  *
  * Use this when a caller wants to manage parameter information via parser
- * callbacks rather than a fixed parameter-types list.  Beware that the
+ * callbacks rather than a fixed parameter-types list.	Beware that the
  * information pointed to by parserSetupArg must be valid for as long as
  * the cached plan might be replanned!
  */
@@ -360,9 +360,9 @@ StoreCachedPlan(CachedPlanSource *plansource,
 	if (plansource->fully_planned)
 	{
 		/*
-		 * Planner already extracted dependencies, we don't have to ...
-		 * except in the case of EXPLAIN.  We assume here that EXPLAIN
-		 * can't appear in a list with other commands.
+		 * Planner already extracted dependencies, we don't have to ... except
+		 * in the case of EXPLAIN.	We assume here that EXPLAIN can't appear
+		 * in a list with other commands.
 		 */
 		plan->relationOids = plan->invalItems = NIL;
 
@@ -552,12 +552,12 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner)
 			/*
 			 * Generate plans for queries.
 			 *
-			 * The planner may try to call SPI-using functions, which causes
-			 * a problem if we're already inside one.  Rather than expect
-			 * all SPI-using code to do SPI_push whenever a replan could
-			 * happen, it seems best to take care of the case here.
+			 * The planner may try to call SPI-using functions, which causes a
+			 * problem if we're already inside one.  Rather than expect all
+			 * SPI-using code to do SPI_push whenever a replan could happen,
+			 * it seems best to take care of the case here.
 			 */
-			bool	pushed;
+			bool		pushed;
 
 			pushed = SPI_push_conditional();
 
@@ -1134,9 +1134,9 @@ ResetPlanCache(void)
 		 * aborted transactions when we can't revalidate them (cf bug #5269).
 		 * In general there is no point in invalidating utility statements
 		 * since they have no plans anyway.  So mark it dead only if it
-		 * contains at least one non-utility statement.  (EXPLAIN counts as
-		 * a non-utility statement, though, since it contains an analyzed
-		 * query that might have dependencies.)
+		 * contains at least one non-utility statement.  (EXPLAIN counts as a
+		 * non-utility statement, though, since it contains an analyzed query
+		 * that might have dependencies.)
 		 */
 		if (plan->fully_planned)
 		{
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index f015f5b842b704f77362b77bc57632142f33a551..7075cdbb43589c8fd6f8f921168c08e61b46c173 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.307 2010/02/17 04:19:39 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.308 2010/02/26 02:01:11 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -977,7 +977,7 @@ RelationInitIndexAccessInfo(Relation relation)
 	 * honestly rather than just treating it as a Form_pg_index struct.
 	 */
 	tuple = SearchSysCache1(INDEXRELID,
-						    ObjectIdGetDatum(RelationGetRelid(relation)));
+							ObjectIdGetDatum(RelationGetRelid(relation)));
 	if (!HeapTupleIsValid(tuple))
 		elog(ERROR, "cache lookup failed for index %u",
 			 RelationGetRelid(relation));
@@ -1427,9 +1427,9 @@ formrdesc(const char *relationName, Oid relationReltype,
 	 *
 	 * The data we insert here is pretty incomplete/bogus, but it'll serve to
 	 * get us launched.  RelationCacheInitializePhase3() will read the real
-	 * data from pg_class and replace what we've done here.  Note in particular
-	 * that relowner is left as zero; this cues RelationCacheInitializePhase3
-	 * that the real data isn't there yet.
+	 * data from pg_class and replace what we've done here.  Note in
+	 * particular that relowner is left as zero; this cues
+	 * RelationCacheInitializePhase3 that the real data isn't there yet.
 	 */
 	relation->rd_rel = (Form_pg_class) palloc0(CLASS_TUPLE_SIZE);
 
@@ -1707,11 +1707,11 @@ RelationReloadIndexInfo(Relation relation)
 	relation->rd_amcache = NULL;
 
 	/*
-	 * If it's a shared index, we might be called before backend startup
-	 * has finished selecting a database, in which case we have no way to
-	 * read pg_class yet.  However, a shared index can never have any
-	 * significant schema updates, so it's okay to ignore the invalidation
-	 * signal.  Just mark it valid and return without doing anything more.
+	 * If it's a shared index, we might be called before backend startup has
+	 * finished selecting a database, in which case we have no way to read
+	 * pg_class yet.  However, a shared index can never have any significant
+	 * schema updates, so it's okay to ignore the invalidation signal.  Just
+	 * mark it valid and return without doing anything more.
 	 */
 	if (relation->rd_rel->relisshared && !criticalRelcachesBuilt)
 	{
@@ -1755,7 +1755,7 @@ RelationReloadIndexInfo(Relation relation)
 		Form_pg_index index;
 
 		tuple = SearchSysCache1(INDEXRELID,
-							 	ObjectIdGetDatum(RelationGetRelid(relation)));
+								ObjectIdGetDatum(RelationGetRelid(relation)));
 		if (!HeapTupleIsValid(tuple))
 			elog(ERROR, "cache lookup failed for index %u",
 				 RelationGetRelid(relation));
@@ -1793,8 +1793,8 @@ RelationDestroyRelation(Relation relation)
 	RelationCloseSmgr(relation);
 
 	/*
-	 * Free all the subsidiary data structures of the relcache entry,
-	 * then the entry itself.
+	 * Free all the subsidiary data structures of the relcache entry, then the
+	 * entry itself.
 	 */
 	if (relation->rd_rel)
 		pfree(relation->rd_rel);
@@ -1908,21 +1908,21 @@ RelationClearRelation(Relation relation, bool rebuild)
 	else
 	{
 		/*
-		 * Our strategy for rebuilding an open relcache entry is to build
-		 * a new entry from scratch, swap its contents with the old entry,
-		 * and finally delete the new entry (along with any infrastructure
-		 * swapped over from the old entry).  This is to avoid trouble in case
-		 * an error causes us to lose control partway through.  The old entry
+		 * Our strategy for rebuilding an open relcache entry is to build a
+		 * new entry from scratch, swap its contents with the old entry, and
+		 * finally delete the new entry (along with any infrastructure swapped
+		 * over from the old entry).  This is to avoid trouble in case an
+		 * error causes us to lose control partway through.  The old entry
 		 * will still be marked !rd_isvalid, so we'll try to rebuild it again
-		 * on next access.  Meanwhile it's not any less valid than it was
+		 * on next access.	Meanwhile it's not any less valid than it was
 		 * before, so any code that might expect to continue accessing it
 		 * isn't hurt by the rebuild failure.  (Consider for example a
 		 * subtransaction that ALTERs a table and then gets cancelled partway
 		 * through the cache entry rebuild.  The outer transaction should
 		 * still see the not-modified cache entry as valid.)  The worst
-		 * consequence of an error is leaking the necessarily-unreferenced
-		 * new entry, and this shouldn't happen often enough for that to be
-		 * a big problem.
+		 * consequence of an error is leaking the necessarily-unreferenced new
+		 * entry, and this shouldn't happen often enough for that to be a big
+		 * problem.
 		 *
 		 * When rebuilding an open relcache entry, we must preserve ref count,
 		 * rd_createSubid/rd_newRelfilenodeSubid, and rd_toastoid state.  Also
@@ -1959,13 +1959,13 @@ RelationClearRelation(Relation relation, bool rebuild)
 
 		/*
 		 * Perform swapping of the relcache entry contents.  Within this
-		 * process the old entry is momentarily invalid, so there *must*
-		 * be no possibility of CHECK_FOR_INTERRUPTS within this sequence.
-		 * Do it in all-in-line code for safety.
+		 * process the old entry is momentarily invalid, so there *must* be no
+		 * possibility of CHECK_FOR_INTERRUPTS within this sequence. Do it in
+		 * all-in-line code for safety.
 		 *
-		 * Since the vast majority of fields should be swapped, our method
-		 * is to swap the whole structures and then re-swap those few fields
-		 * we didn't want swapped.
+		 * Since the vast majority of fields should be swapped, our method is
+		 * to swap the whole structures and then re-swap those few fields we
+		 * didn't want swapped.
 		 */
 #define SWAPFIELD(fldtype, fldname) \
 		do { \
@@ -2536,8 +2536,8 @@ RelationBuildLocalRelation(const char *relname,
 	 * Insert relation physical and logical identifiers (OIDs) into the right
 	 * places.	Note that the physical ID (relfilenode) is initially the same
 	 * as the logical ID (OID); except that for a mapped relation, we set
-	 * relfilenode to zero and rely on RelationInitPhysicalAddr to consult
-	 * the map.
+	 * relfilenode to zero and rely on RelationInitPhysicalAddr to consult the
+	 * map.
 	 */
 	rel->rd_rel->relisshared = shared_relation;
 	rel->rd_rel->relistemp = rel->rd_istemp;
@@ -2648,8 +2648,8 @@ RelationSetNewRelfilenode(Relation relation, TransactionId freezeXid)
 
 	/*
 	 * Now update the pg_class row.  However, if we're dealing with a mapped
-	 * index, pg_class.relfilenode doesn't change; instead we have to send
-	 * the update to the relation mapper.
+	 * index, pg_class.relfilenode doesn't change; instead we have to send the
+	 * update to the relation mapper.
 	 */
 	if (RelationIsMapped(relation))
 		RelationMapUpdateMap(RelationGetRelid(relation),
@@ -2660,7 +2660,7 @@ RelationSetNewRelfilenode(Relation relation, TransactionId freezeXid)
 		classform->relfilenode = newrelfilenode;
 
 	/* These changes are safe even for a mapped relation */
-	classform->relpages = 0;		/* it's empty until further notice */
+	classform->relpages = 0;	/* it's empty until further notice */
 	classform->reltuples = 0;
 	classform->relfrozenxid = freezeXid;
 
@@ -2679,8 +2679,8 @@ RelationSetNewRelfilenode(Relation relation, TransactionId freezeXid)
 
 	/*
 	 * Mark the rel as having been given a new relfilenode in the current
-	 * (sub) transaction.  This is a hint that can be used to optimize
-	 * later operations on the rel in the same transaction.
+	 * (sub) transaction.  This is a hint that can be used to optimize later
+	 * operations on the rel in the same transaction.
 	 */
 	relation->rd_newRelfilenodeSubid = GetCurrentSubTransactionId();
 	/* ... and now we have eoxact cleanup work to do */
@@ -2761,8 +2761,8 @@ RelationCacheInitializePhase2(void)
 	oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
 
 	/*
-	 * Try to load the shared relcache cache file.  If unsuccessful,
-	 * bootstrap the cache with a pre-made descriptor for pg_database.
+	 * Try to load the shared relcache cache file.	If unsuccessful, bootstrap
+	 * the cache with a pre-made descriptor for pg_database.
 	 */
 	if (!load_relcache_init_file(true))
 	{
@@ -2808,9 +2808,9 @@ RelationCacheInitializePhase3(void)
 	oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
 
 	/*
-	 * Try to load the local relcache cache file.  If unsuccessful,
-	 * bootstrap the cache with pre-made descriptors for the critical
-	 * "nailed-in" system catalogs.
+	 * Try to load the local relcache cache file.  If unsuccessful, bootstrap
+	 * the cache with pre-made descriptors for the critical "nailed-in" system
+	 * catalogs.
 	 */
 	if (IsBootstrapProcessingMode() ||
 		!load_relcache_init_file(false))
@@ -2826,7 +2826,7 @@ RelationCacheInitializePhase3(void)
 		formrdesc("pg_type", TypeRelation_Rowtype_Id, false,
 				  true, Natts_pg_type, Desc_pg_type);
 
-#define NUM_CRITICAL_LOCAL_RELS	4	/* fix if you change list above */
+#define NUM_CRITICAL_LOCAL_RELS 4		/* fix if you change list above */
 	}
 
 	MemoryContextSwitchTo(oldcxt);
@@ -2881,7 +2881,7 @@ RelationCacheInitializePhase3(void)
 		load_critical_index(TriggerRelidNameIndexId,
 							TriggerRelationId);
 
-#define NUM_CRITICAL_LOCAL_INDEXES	9		/* fix if you change list above */
+#define NUM_CRITICAL_LOCAL_INDEXES	9	/* fix if you change list above */
 
 		criticalRelcachesBuilt = true;
 	}
@@ -2889,10 +2889,10 @@ RelationCacheInitializePhase3(void)
 	/*
 	 * Process critical shared indexes too.
 	 *
-	 * DatabaseNameIndexId isn't critical for relcache loading, but rather
-	 * for initial lookup of MyDatabaseId, without which we'll never find
-	 * any non-shared catalogs at all.  Autovacuum calls InitPostgres with
-	 * a database OID, so it instead depends on DatabaseOidIndexId.
+	 * DatabaseNameIndexId isn't critical for relcache loading, but rather for
+	 * initial lookup of MyDatabaseId, without which we'll never find any
+	 * non-shared catalogs at all.	Autovacuum calls InitPostgres with a
+	 * database OID, so it instead depends on DatabaseOidIndexId.
 	 */
 	if (!criticalSharedRelcachesBuilt)
 	{
@@ -2901,7 +2901,7 @@ RelationCacheInitializePhase3(void)
 		load_critical_index(DatabaseOidIndexId,
 							DatabaseRelationId);
 
-#define NUM_CRITICAL_SHARED_INDEXES	2		/* fix if you change list above */
+#define NUM_CRITICAL_SHARED_INDEXES 2	/* fix if you change list above */
 
 		criticalSharedRelcachesBuilt = true;
 	}
@@ -2914,8 +2914,8 @@ RelationCacheInitializePhase3(void)
 	 * relcache entries have rules or triggers, load that info the hard way
 	 * since it isn't recorded in the cache file.
 	 *
-	 * Whenever we access the catalogs to read data, there is a possibility
-	 * of a shared-inval cache flush causing relcache entries to be removed.
+	 * Whenever we access the catalogs to read data, there is a possibility of
+	 * a shared-inval cache flush causing relcache entries to be removed.
 	 * Since hash_seq_search only guarantees to still work after the *current*
 	 * entry is removed, it's unsafe to continue the hashtable scan afterward.
 	 * We handle this by restarting the scan from scratch after each access.
@@ -2943,7 +2943,7 @@ RelationCacheInitializePhase3(void)
 			Form_pg_class relp;
 
 			htup = SearchSysCache1(RELOID,
-								ObjectIdGetDatum(RelationGetRelid(relation)));
+							   ObjectIdGetDatum(RelationGetRelid(relation)));
 			if (!HeapTupleIsValid(htup))
 				elog(FATAL, "cache lookup failed for relation %u",
 					 RelationGetRelid(relation));
@@ -2962,9 +2962,9 @@ RelationCacheInitializePhase3(void)
 
 			/*
 			 * Check the values in rd_att were set up correctly.  (We cannot
-			 * just copy them over now: formrdesc must have set up the
-			 * rd_att data correctly to start with, because it may already
-			 * have been copied into one or more catcache entries.)
+			 * just copy them over now: formrdesc must have set up the rd_att
+			 * data correctly to start with, because it may already have been
+			 * copied into one or more catcache entries.)
 			 */
 			Assert(relation->rd_att->tdtypeid == relp->reltype);
 			Assert(relation->rd_att->tdtypmod == -1);
@@ -3701,8 +3701,8 @@ RelationGetExclusionInfo(Relation indexRelation,
 	Oid		   *funcs;
 	uint16	   *strats;
 	Relation	conrel;
-	SysScanDesc	conscan;
-	ScanKeyData	skey[1];
+	SysScanDesc conscan;
+	ScanKeyData skey[1];
 	HeapTuple	htup;
 	bool		found;
 	MemoryContext oldcxt;
@@ -3723,9 +3723,9 @@ RelationGetExclusionInfo(Relation indexRelation,
 	}
 
 	/*
-	 * Search pg_constraint for the constraint associated with the index.
-	 * To make this not too painfully slow, we use the index on conrelid;
-	 * that will hold the parent relation's OID not the index's own OID.
+	 * Search pg_constraint for the constraint associated with the index. To
+	 * make this not too painfully slow, we use the index on conrelid; that
+	 * will hold the parent relation's OID not the index's own OID.
 	 */
 	ScanKeyInit(&skey[0],
 				Anum_pg_constraint_conrelid,
@@ -3739,7 +3739,7 @@ RelationGetExclusionInfo(Relation indexRelation,
 
 	while (HeapTupleIsValid(htup = systable_getnext(conscan)))
 	{
-		Form_pg_constraint	 conform = (Form_pg_constraint) GETSTRUCT(htup);
+		Form_pg_constraint conform = (Form_pg_constraint) GETSTRUCT(htup);
 		Datum		val;
 		bool		isnull;
 		ArrayType  *arr;
@@ -4483,7 +4483,7 @@ RelationCacheInitFileInvalidate(bool beforeSend)
  *
  * We used to keep the init files across restarts, but that is unsafe in PITR
  * scenarios, and even in simple crash-recovery cases there are windows for
- * the init files to become out-of-sync with the database.  So now we just
+ * the init files to become out-of-sync with the database.	So now we just
  * remove them during startup and expect the first backend launch to rebuild
  * them.  Of course, this has to happen in each database of the cluster.
  */
diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c
index 4a34e7eb7e102212c37a3f8d8153dff62054afa2..0320da113b484bf6771f315c36111aff8ea40740 100644
--- a/src/backend/utils/cache/relmapper.c
+++ b/src/backend/utils/cache/relmapper.c
@@ -23,7 +23,7 @@
  * mapped catalogs can only be relocated by operations such as VACUUM FULL
  * and CLUSTER, which make no transactionally-significant changes: it must be
  * safe for the new file to replace the old, even if the transaction itself
- * aborts.  An important factor here is that the indexes and toast table of
+ * aborts.	An important factor here is that the indexes and toast table of
  * a mapped catalog must also be mapped, so that the rewrites/relocations of
  * all these files commit in a single map file update rather than being tied
  * to transaction commit.
@@ -33,7 +33,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/cache/relmapper.c,v 1.2 2010/02/07 22:00:53 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/cache/relmapper.c,v 1.3 2010/02/26 02:01:12 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -58,21 +58,21 @@
 /*
  * The map file is critical data: we have no automatic method for recovering
  * from loss or corruption of it.  We use a CRC so that we can detect
- * corruption.  To minimize the risk of failed updates, the map file should
+ * corruption.	To minimize the risk of failed updates, the map file should
  * be kept to no more than one standard-size disk sector (ie 512 bytes),
  * and we use overwrite-in-place rather than playing renaming games.
  * The struct layout below is designed to occupy exactly 512 bytes, which
  * might make filesystem updates a bit more efficient.
  *
- * Entries in the mappings[] array are in no particular order.  We could
+ * Entries in the mappings[] array are in no particular order.	We could
  * speed searching by insisting on OID order, but it really shouldn't be
  * worth the trouble given the intended size of the mapping sets.
  */
 #define RELMAPPER_FILENAME		"pg_filenode.map"
 
-#define RELMAPPER_FILEMAGIC		0x592717	/* version ID value */
+#define RELMAPPER_FILEMAGIC		0x592717		/* version ID value */
 
-#define MAX_MAPPINGS			62			/* 62 * 8 + 16 = 512 */
+#define MAX_MAPPINGS			62		/* 62 * 8 + 16 = 512 */
 
 typedef struct RelMapping
 {
@@ -91,7 +91,7 @@ typedef struct RelMapFile
 
 /*
  * The currently known contents of the shared map file and our database's
- * local map file are stored here.  These can be reloaded from disk
+ * local map file are stored here.	These can be reloaded from disk
  * immediately whenever we receive an update sinval message.
  */
 static RelMapFile shared_map;
@@ -118,9 +118,9 @@ static RelMapFile pending_local_updates;
 
 /* non-export function prototypes */
 static void apply_map_update(RelMapFile *map, Oid relationId, Oid fileNode,
-							 bool add_okay);
+				 bool add_okay);
 static void merge_map_updates(RelMapFile *map, const RelMapFile *updates,
-							  bool add_okay);
+				  bool add_okay);
 static void load_relmap_file(bool shared);
 static void write_relmap_file(bool shared, RelMapFile *newmap,
 				  bool write_wal, bool send_sinval, bool preserve_files,
@@ -208,9 +208,9 @@ RelationMapUpdateMap(Oid relationId, Oid fileNode, bool shared,
 	else
 	{
 		/*
-		 * We don't currently support map changes within subtransactions.
-		 * This could be done with more bookkeeping infrastructure, but it
-		 * doesn't presently seem worth it.
+		 * We don't currently support map changes within subtransactions. This
+		 * could be done with more bookkeeping infrastructure, but it doesn't
+		 * presently seem worth it.
 		 */
 		if (GetCurrentTransactionNestLevel() > 1)
 			elog(ERROR, "cannot change relation mapping within subtransaction");
@@ -294,7 +294,7 @@ merge_map_updates(RelMapFile *map, const RelMapFile *updates, bool add_okay)
  * RelationMapRemoveMapping
  *
  * Remove a relation's entry in the map.  This is only allowed for "active"
- * (but not committed) local mappings.  We need it so we can back out the
+ * (but not committed) local mappings.	We need it so we can back out the
  * entry for the transient target file when doing VACUUM FULL/CLUSTER on
  * a mapped relation.
  */
@@ -322,7 +322,7 @@ RelationMapRemoveMapping(Oid relationId)
  * RelationMapInvalidate
  *
  * This routine is invoked for SI cache flush messages.  We must re-read
- * the indicated map file.  However, we might receive a SI message in a
+ * the indicated map file.	However, we might receive a SI message in a
  * process that hasn't yet, and might never, load the mapping files;
  * for example the autovacuum launcher, which *must not* try to read
  * a local map since it is attached to no particular database.
@@ -390,7 +390,7 @@ AtCCI_RelationMap(void)
  *
  * During commit, this must be called as late as possible before the actual
  * transaction commit, so as to minimize the window where the transaction
- * could still roll back after committing map changes.  Although nothing
+ * could still roll back after committing map changes.	Although nothing
  * critically bad happens in such a case, we still would prefer that it
  * not happen, since we'd possibly be losing useful updates to the relations'
  * pg_class row(s).
@@ -457,7 +457,7 @@ AtPrepare_RelationMap(void)
 /*
  * CheckPointRelationMap
  *
- * This is called during a checkpoint.  It must ensure that any relation map
+ * This is called during a checkpoint.	It must ensure that any relation map
  * updates that were WAL-logged before the start of the checkpoint are
  * securely flushed to disk and will not need to be replayed later.  This
  * seems unlikely to be a performance-critical issue, so we use a simple
@@ -599,10 +599,9 @@ load_relmap_file(bool shared)
 	/*
 	 * Note: we could take RelationMappingLock in shared mode here, but it
 	 * seems unnecessary since our read() should be atomic against any
-	 * concurrent updater's write().  If the file is updated shortly after
-	 * we look, the sinval signaling mechanism will make us re-read it
-	 * before we are able to access any relation that's affected by the
-	 * change.
+	 * concurrent updater's write().  If the file is updated shortly after we
+	 * look, the sinval signaling mechanism will make us re-read it before we
+	 * are able to access any relation that's affected by the change.
 	 */
 	if (read(fd, map, sizeof(RelMapFile)) != sizeof(RelMapFile))
 		ereport(FATAL,
@@ -627,8 +626,8 @@ load_relmap_file(bool shared)
 
 	if (!EQ_CRC32(crc, map->crc))
 		ereport(FATAL,
-				(errmsg("relation mapping file \"%s\" contains incorrect checksum",
-						mapfilename)));
+		  (errmsg("relation mapping file \"%s\" contains incorrect checksum",
+				  mapfilename)));
 }
 
 /*
@@ -648,7 +647,7 @@ load_relmap_file(bool shared)
  *
  * Because this may be called during WAL replay when MyDatabaseId,
  * DatabasePath, etc aren't valid, we require the caller to pass in suitable
- * values.  The caller is also responsible for being sure no concurrent
+ * values.	The caller is also responsible for being sure no concurrent
  * map update could be happening.
  */
 static void
@@ -676,10 +675,10 @@ write_relmap_file(bool shared, RelMapFile *newmap,
 	 * critical section, so that an open() failure need not force PANIC.
 	 *
 	 * Note: since we use BasicOpenFile, we are nominally responsible for
-	 * ensuring the fd is closed on error.  In practice, this isn't important
-	 * because either an error happens inside the critical section, or we
-	 * are in bootstrap or WAL replay; so an error past this point is always
-	 * fatal anyway.
+	 * ensuring the fd is closed on error.	In practice, this isn't important
+	 * because either an error happens inside the critical section, or we are
+	 * in bootstrap or WAL replay; so an error past this point is always fatal
+	 * anyway.
 	 */
 	if (shared)
 	{
@@ -773,11 +772,11 @@ write_relmap_file(bool shared, RelMapFile *newmap,
 		CacheInvalidateRelmap(dbid);
 
 	/*
-	 * Make sure that the files listed in the map are not deleted if the
-	 * outer transaction aborts.  This had better be within the critical
-	 * section too: it's not likely to fail, but if it did, we'd arrive
-	 * at transaction abort with the files still vulnerable.  PANICing
-	 * will leave things in a good state on-disk.
+	 * Make sure that the files listed in the map are not deleted if the outer
+	 * transaction aborts.	This had better be within the critical section
+	 * too: it's not likely to fail, but if it did, we'd arrive at transaction
+	 * abort with the files still vulnerable.  PANICing will leave things in a
+	 * good state on-disk.
 	 *
 	 * Note: we're cheating a little bit here by assuming that mapped files
 	 * are either in pg_global or the database's default tablespace.
@@ -816,13 +815,13 @@ perform_relmap_update(bool shared, const RelMapFile *updates)
 	RelMapFile	newmap;
 
 	/*
-	 * Anyone updating a relation's mapping info should take exclusive lock
-	 * on that rel and hold it until commit.  This ensures that there will
-	 * not be concurrent updates on the same mapping value; but there could
-	 * easily be concurrent updates on different values in the same file.
-	 * We cover that by acquiring the RelationMappingLock, re-reading the
-	 * target file to ensure it's up to date, applying the updates, and
-	 * writing the data before releasing RelationMappingLock.
+	 * Anyone updating a relation's mapping info should take exclusive lock on
+	 * that rel and hold it until commit.  This ensures that there will not be
+	 * concurrent updates on the same mapping value; but there could easily be
+	 * concurrent updates on different values in the same file. We cover that
+	 * by acquiring the RelationMappingLock, re-reading the target file to
+	 * ensure it's up to date, applying the updates, and writing the data
+	 * before releasing RelationMappingLock.
 	 *
 	 * There is only one RelationMappingLock.  In principle we could try to
 	 * have one per mapping file, but it seems unlikely to be worth the
@@ -866,8 +865,8 @@ relmap_redo(XLogRecPtr lsn, XLogRecord *record)
 	if (info == XLOG_RELMAP_UPDATE)
 	{
 		xl_relmap_update *xlrec = (xl_relmap_update *) XLogRecGetData(record);
-		RelMapFile newmap;
-		char   *dbpath;
+		RelMapFile	newmap;
+		char	   *dbpath;
 
 		if (xlrec->nbytes != sizeof(RelMapFile))
 			elog(PANIC, "relmap_redo: wrong size %u in relmap update record",
@@ -878,14 +877,13 @@ relmap_redo(XLogRecPtr lsn, XLogRecord *record)
 		dbpath = GetDatabasePath(xlrec->dbid, xlrec->tsid);
 
 		/*
-		 * Write out the new map and send sinval, but of course don't
-		 * write a new WAL entry.  There's no surrounding transaction
-		 * to tell to preserve files, either.
+		 * Write out the new map and send sinval, but of course don't write a
+		 * new WAL entry.  There's no surrounding transaction to tell to
+		 * preserve files, either.
 		 *
 		 * There shouldn't be anyone else updating relmaps during WAL replay,
-		 * so we don't bother to take the RelationMappingLock.  We would
-		 * need to do so if load_relmap_file needed to interlock against
-		 * writers.
+		 * so we don't bother to take the RelationMappingLock.  We would need
+		 * to do so if load_relmap_file needed to interlock against writers.
 		 */
 		write_relmap_file((xlrec->dbid == InvalidOid), &newmap,
 						  false, true, false,
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index 8a60fe4f4286fa47ed04c5493a3d7bce6a917087..3eaafe898c86d09f8cfe334f06c713281fed5a22 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -4,7 +4,7 @@
  *	  Tablespace cache management.
  *
  * We cache the parsed version of spcoptions for each tablespace to avoid
- * needing to reparse on every lookup.  Right now, there doesn't appear to
+ * needing to reparse on every lookup.	Right now, there doesn't appear to
  * be a measurable performance gain from doing this, but that might change
  * in the future as we add more options.
  *
@@ -12,7 +12,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/cache/spccache.c,v 1.5 2010/02/14 18:42:17 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/cache/spccache.c,v 1.6 2010/02/26 02:01:12 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -75,7 +75,7 @@ InvalidateTableSpaceCacheCallback(Datum arg, int cacheid, ItemPointer tuplePtr)
 static void
 InitializeTableSpaceCache(void)
 {
-	HASHCTL ctl;
+	HASHCTL		ctl;
 
 	/* Initialize the hash table. */
 	MemSet(&ctl, 0, sizeof(ctl));
@@ -84,7 +84,7 @@ InitializeTableSpaceCache(void)
 	ctl.hash = oid_hash;
 	TableSpaceCacheHash =
 		hash_create("TableSpace cache", 16, &ctl,
-				    HASH_ELEM | HASH_FUNCTION);
+					HASH_ELEM | HASH_FUNCTION);
 
 	/* Make sure we've initialized CacheMemoryContext. */
 	if (!CacheMemoryContext)
@@ -128,18 +128,18 @@ get_tablespace(Oid spcid)
 		return spc;
 
 	/*
-	 * Not found in TableSpace cache.  Check catcache.  If we don't find a
+	 * Not found in TableSpace cache.  Check catcache.	If we don't find a
 	 * valid HeapTuple, it must mean someone has managed to request tablespace
-	 * details for a non-existent tablespace.  We'll just treat that case as if
-	 * no options were specified.
+	 * details for a non-existent tablespace.  We'll just treat that case as
+	 * if no options were specified.
 	 */
 	tp = SearchSysCache1(TABLESPACEOID, ObjectIdGetDatum(spcid));
 	if (!HeapTupleIsValid(tp))
 		opts = NULL;
 	else
 	{
-		Datum	datum;
-		bool	isNull;
+		Datum		datum;
+		bool		isNull;
 
 		datum = SysCacheGetAttr(TABLESPACEOID,
 								tp,
@@ -149,7 +149,8 @@ get_tablespace(Oid spcid)
 			opts = NULL;
 		else
 		{
-			bytea *bytea_opts = tablespace_reloptions(datum, false);
+			bytea	   *bytea_opts = tablespace_reloptions(datum, false);
+
 			opts = MemoryContextAlloc(CacheMemoryContext, VARSIZE(bytea_opts));
 			memcpy(opts, bytea_opts, VARSIZE(bytea_opts));
 		}
@@ -157,7 +158,7 @@ get_tablespace(Oid spcid)
 	}
 
 	/*
-	 * Now create the cache entry.  It's important to do this only after
+	 * Now create the cache entry.	It's important to do this only after
 	 * reading the pg_tablespace entry, since doing so could cause a cache
 	 * flush.
 	 */
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index a689e302429e1387d99792b9edacbe12eb4abb25..a6992e65d94544899be6bb201dced1bf73b9d37c 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -42,7 +42,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.222 2010/02/17 04:19:39 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.223 2010/02/26 02:01:12 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -76,7 +76,8 @@
 #undef _
 #define _(x) err_gettext(x)
 
-static const char *err_gettext(const char *str)
+static const char *
+err_gettext(const char *str)
 /* This extension allows gcc to check the format string for consistency with
    the supplied arguments. */
 __attribute__((format_arg(1)));
@@ -1572,9 +1573,9 @@ write_syslog(int level, const char *line)
 static void
 write_eventlog(int level, const char *line, int len)
 {
-	WCHAR		   *utf16;
-	int				eventlevel = EVENTLOG_ERROR_TYPE;
-	static HANDLE	evtHandle = INVALID_HANDLE_VALUE;
+	WCHAR	   *utf16;
+	int			eventlevel = EVENTLOG_ERROR_TYPE;
+	static HANDLE evtHandle = INVALID_HANDLE_VALUE;
 
 	if (evtHandle == INVALID_HANDLE_VALUE)
 	{
@@ -1611,11 +1612,11 @@ write_eventlog(int level, const char *line, int len)
 	}
 
 	/*
-	 * Convert message to UTF16 text and write it with ReportEventW,
-	 * but fall-back into ReportEventA if conversion failed.
+	 * Convert message to UTF16 text and write it with ReportEventW, but
+	 * fall-back into ReportEventA if conversion failed.
 	 *
-	 * Also verify that we are not on our way into error recursion trouble
-	 * due to error messages thrown deep inside pgwin32_toUTF16().
+	 * Also verify that we are not on our way into error recursion trouble due
+	 * to error messages thrown deep inside pgwin32_toUTF16().
 	 */
 	if (GetDatabaseEncoding() != GetPlatformEncoding() &&
 		!in_error_recursion_trouble())
@@ -1624,28 +1625,28 @@ write_eventlog(int level, const char *line, int len)
 		if (utf16)
 		{
 			ReportEventW(evtHandle,
-					eventlevel,
-					0,
-					0,				/* All events are Id 0 */
-					NULL,
-					1,
-					0,
-					(LPCWSTR *) &utf16,
-					NULL);
+						 eventlevel,
+						 0,
+						 0,		/* All events are Id 0 */
+						 NULL,
+						 1,
+						 0,
+						 (LPCWSTR *) &utf16,
+						 NULL);
 
 			pfree(utf16);
 			return;
 		}
 	}
 	ReportEventA(evtHandle,
-				eventlevel,
-				0,
-				0,				/* All events are Id 0 */
-				NULL,
-				1,
-				0,
-				&line,
-				NULL);
+				 eventlevel,
+				 0,
+				 0,				/* All events are Id 0 */
+				 NULL,
+				 1,
+				 0,
+				 &line,
+				 NULL);
 }
 #endif   /* WIN32 */
 
@@ -1653,6 +1654,7 @@ static void
 write_console(const char *line, int len)
 {
 #ifdef WIN32
+
 	/*
 	 * WriteConsoleW() will fail of stdout is redirected, so just fall through
 	 * to writing unconverted to the logfile in this case.
@@ -1678,17 +1680,18 @@ write_console(const char *line, int len)
 			}
 
 			/*
-			 * In case WriteConsoleW() failed, fall back to writing the message
-			 * unconverted.
+			 * In case WriteConsoleW() failed, fall back to writing the
+			 * message unconverted.
 			 */
 			pfree(utf16);
 		}
 	}
 #else
+
 	/*
-	 * Conversion on non-win32 platform is not implemented yet.
-	 * It requires non-throw version of pg_do_encoding_conversion(),
-	 * that converts unconvertable characters to '?' without errors.
+	 * Conversion on non-win32 platform is not implemented yet. It requires
+	 * non-throw version of pg_do_encoding_conversion(), that converts
+	 * unconvertable characters to '?' without errors.
 	 */
 #endif
 
@@ -2733,8 +2736,9 @@ void
 write_stderr(const char *fmt,...)
 {
 	va_list		ap;
+
 #ifdef WIN32
-	char		errbuf[2048];		/* Arbitrary size? */
+	char		errbuf[2048];	/* Arbitrary size? */
 #endif
 
 	fmt = _(fmt);
@@ -2808,7 +2812,7 @@ trace_recovery(int trace_level)
 {
 	if (trace_level < LOG &&
 		trace_level >= trace_recovery_messages)
-			return LOG;
+		return LOG;
 
 	return trace_level;
 }
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index f98469801eaf9c8e0cc31e8ad8a332cddc8a2119..c51d3d0222901601e5188c1e700f9e945b3328dd 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.101 2010/01/02 16:57:56 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.102 2010/02/26 02:01:13 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -443,7 +443,7 @@ internal_unload_library(const char *libname)
 		else
 			prv = file_scanner;
 	}
-#endif /* NOT_USED */
+#endif   /* NOT_USED */
 }
 
 static bool
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 196fb2a0a42ae328fa9616d6509a887a12ca4258..04f91f1cea02c26e7e74dd41c4aa0a4dd6457f66 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.130 2010/02/14 18:42:17 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.131 2010/02/26 02:01:13 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -930,7 +930,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
 
 	if (OidIsValid(fcache->userid))
 		SetUserIdAndSecContext(fcache->userid,
-							   save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
+							save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
 
 	if (fcache->proconfig)
 	{
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index 6bb9e1da00bc4f6eab4cdad8955cadb1a5742ef2..d946aabbb58c7b141cfebd294476bc6ffa66b896 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -7,7 +7,7 @@
  * Copyright (c) 2002-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/fmgr/funcapi.c,v 1.48 2010/02/14 18:42:17 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/fmgr/funcapi.c,v 1.49 2010/02/26 02:01:13 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -795,8 +795,8 @@ get_func_input_arg_names(Datum proargnames, Datum proargmodes,
 
 	/*
 	 * We expect the arrays to be 1-D arrays of the right types; verify that.
-	 * For proargmodes, we don't need to use deconstruct_array()
-	 * since the array data is just going to look like a C array of values.
+	 * For proargmodes, we don't need to use deconstruct_array() since the
+	 * array data is just going to look like a C array of values.
 	 */
 	arr = DatumGetArrayTypeP(proargnames);		/* ensure not toasted */
 	if (ARR_NDIM(arr) != 1 ||
diff --git a/src/backend/utils/hash/pg_crc.c b/src/backend/utils/hash/pg_crc.c
index cb317aa896d16bf950a607d7adf3749a53429fd4..0777faab4ece142d5ee5fe6968aa0fe4d0991673 100644
--- a/src/backend/utils/hash/pg_crc.c
+++ b/src/backend/utils/hash/pg_crc.c
@@ -19,7 +19,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/hash/pg_crc.c,v 1.23 2010/01/07 04:53:34 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/hash/pg_crc.c,v 1.24 2010/02/26 02:01:13 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -115,7 +115,7 @@ const uint32 pg_crc32_table[256] = {
  * (ECMA-182, available from http://www.ecma.ch/ecma1/STAND/ECMA-182.HTM)
  */
 
-#if SIZEOF_VOID_P < 8		/* this test must match the one in pg_crc.h */
+#if SIZEOF_VOID_P < 8			/* this test must match the one in pg_crc.h */
 
 const uint32 pg_crc64_table0[256] = {
 	0x00000000, 0xA9EA3693,
@@ -378,7 +378,6 @@ const uint32 pg_crc64_table1[256] = {
 	0x5DEDC41A, 0x1F1D25F1,
 	0xD80C07CD, 0x9AFCE626
 };
-
 #else							/* use int64 implementation */
 
 const uint64 pg_crc64_table[256] = {
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index fd653bcce58f1729bd45a2f4b4ca143da3af2bb4..2551e81bf7c3916137713b98698d2fd99418ab9b 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.182 2010/02/14 18:42:17 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.183 2010/02/26 02:01:13 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -299,7 +299,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
  * Currently there are two valid bits in SecurityRestrictionContext:
  *
  * SECURITY_LOCAL_USERID_CHANGE indicates that we are inside an operation
- * that is temporarily changing CurrentUserId via these functions.  This is
+ * that is temporarily changing CurrentUserId via these functions.	This is
  * needed to indicate that the actual value of CurrentUserId is not in sync
  * with guc.c's internal state, so SET ROLE has to be disallowed.
  *
@@ -360,7 +360,7 @@ InSecurityRestrictedOperation(void)
 /*
  * These are obsolete versions of Get/SetUserIdAndSecContext that are
  * only provided for bug-compatibility with some rather dubious code in
- * pljava.  We allow the userid to be set, but only when not inside a
+ * pljava.	We allow the userid to be set, but only when not inside a
  * security restriction context.
  */
 void
@@ -690,9 +690,10 @@ CreateLockFile(const char *filename, bool amPostmaster,
 #ifndef WIN32
 	my_p_pid = getppid();
 #else
+
 	/*
-	 * Windows hasn't got getppid(), but doesn't need it since it's not
-	 * using real kill() either...
+	 * Windows hasn't got getppid(), but doesn't need it since it's not using
+	 * real kill() either...
 	 */
 	my_p_pid = 0;
 #endif
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 034c615403d46b8f04e91e522250c43b93f1d8c3..01cb07da2d3c05d48c3ec8016176f12f49112fcc 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.203 2010/02/14 18:42:18 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.204 2010/02/26 02:01:13 momjian Exp $
  *
  *
  *-------------------------------------------------------------------------
@@ -75,7 +75,7 @@ static void process_settings(Oid databaseid, Oid roleid);
  * GetDatabaseTuple -- fetch the pg_database row for a database
  *
  * This is used during backend startup when we don't yet have any access to
- * system catalogs in general.  In the worst case, we can seqscan pg_database
+ * system catalogs in general.	In the worst case, we can seqscan pg_database
  * using nothing but the hard-wired descriptor that relcache.c creates for
  * pg_database.  In more typical cases, relcache.c was able to load
  * descriptors for both pg_database and its indexes from the shared relcache
@@ -99,7 +99,7 @@ GetDatabaseTuple(const char *dbname)
 				CStringGetDatum(dbname));
 
 	/*
-	 * Open pg_database and fetch a tuple.  Force heap scan if we haven't yet
+	 * Open pg_database and fetch a tuple.	Force heap scan if we haven't yet
 	 * built the critical shared relcache entries (i.e., we're starting up
 	 * without a shared relcache cache file).
 	 */
@@ -142,7 +142,7 @@ GetDatabaseTupleByOid(Oid dboid)
 				ObjectIdGetDatum(dboid));
 
 	/*
-	 * Open pg_database and fetch a tuple.  Force heap scan if we haven't yet
+	 * Open pg_database and fetch a tuple.	Force heap scan if we haven't yet
 	 * built the critical shared relcache entries (i.e., we're starting up
 	 * without a shared relcache cache file).
 	 */
@@ -179,9 +179,9 @@ PerformAuthentication(Port *port)
 
 	/*
 	 * In EXEC_BACKEND case, we didn't inherit the contents of pg_hba.conf
-	 * etcetera from the postmaster, and have to load them ourselves.  Note
-	 * we are loading them into the startup transaction's memory context,
-	 * not PostmasterContext, but that shouldn't matter.
+	 * etcetera from the postmaster, and have to load them ourselves.  Note we
+	 * are loading them into the startup transaction's memory context, not
+	 * PostmasterContext, but that shouldn't matter.
 	 *
 	 * FIXME: [fork/exec] Ugh.	Is there a way around this overhead?
 	 */
@@ -377,7 +377,7 @@ InitCommunication(void)
 /*
  * pg_split_opts -- split a string of options and append it to an argv array
  *
- * NB: the input string is destructively modified!  Also, caller is responsible
+ * NB: the input string is destructively modified!	Also, caller is responsible
  * for ensuring the argv array is large enough.  The maximum possible number
  * of arguments added by this routine is (strlen(optstr) + 1) / 2.
  *
@@ -495,8 +495,8 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 	InitBufferPoolBackend();
 
 	/*
-	 * Initialize local process's access to XLOG, if appropriate.  In bootstrap
-	 * case we skip this since StartupXLOG() was run instead.
+	 * Initialize local process's access to XLOG, if appropriate.  In
+	 * bootstrap case we skip this since StartupXLOG() was run instead.
 	 */
 	if (!bootstrap)
 		(void) RecoveryInProgress();
@@ -519,8 +519,8 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 		pgstat_initialize();
 
 	/*
-	 * Load relcache entries for the shared system catalogs.  This must
-	 * create at least an entry for pg_database.
+	 * Load relcache entries for the shared system catalogs.  This must create
+	 * at least an entry for pg_database.
 	 */
 	RelationCacheInitializePhase2();
 
@@ -542,10 +542,10 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 	/*
 	 * Start a new transaction here before first access to db, and get a
 	 * snapshot.  We don't have a use for the snapshot itself, but we're
-	 * interested in the secondary effect that it sets RecentGlobalXmin.
-	 * (This is critical for anything that reads heap pages, because HOT
-	 * may decide to prune them even if the process doesn't attempt to
-	 * modify any tuples.)
+	 * interested in the secondary effect that it sets RecentGlobalXmin. (This
+	 * is critical for anything that reads heap pages, because HOT may decide
+	 * to prune them even if the process doesn't attempt to modify any
+	 * tuples.)
 	 */
 	if (!bootstrap)
 	{
@@ -567,7 +567,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 	}
 	else if (in_dbname != NULL)
 	{
-		HeapTuple tuple;
+		HeapTuple	tuple;
 		Form_pg_database dbform;
 
 		tuple = GetDatabaseTuple(in_dbname);
@@ -584,7 +584,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 	else
 	{
 		/* caller specified database by OID */
-		HeapTuple tuple;
+		HeapTuple	tuple;
 		Form_pg_database dbform;
 
 		tuple = GetDatabaseTupleByOid(dboid);
@@ -608,8 +608,8 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 
 	/*
 	 * Now, take a writer's lock on the database we are trying to connect to.
-	 * If there is a concurrently running DROP DATABASE on that database,
-	 * this will block us until it finishes (and has committed its update of
+	 * If there is a concurrently running DROP DATABASE on that database, this
+	 * will block us until it finishes (and has committed its update of
 	 * pg_database).
 	 *
 	 * Note that the lock is not held long, only until the end of this startup
@@ -634,7 +634,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 	 */
 	if (!bootstrap && !am_walsender)
 	{
-		HeapTuple tuple;
+		HeapTuple	tuple;
 
 		tuple = GetDatabaseTuple(dbname);
 		if (!HeapTupleIsValid(tuple) ||
@@ -722,8 +722,8 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 	process_settings(MyDatabaseId, GetSessionUserId());
 
 	/*
-	 * Re-read the pg_database row for our database, check permissions and
-	 * set up database-specific GUC settings.  We can't do this until all the
+	 * Re-read the pg_database row for our database, check permissions and set
+	 * up database-specific GUC settings.  We can't do this until all the
 	 * database-access infrastructure is up.  (Also, it wants to know if the
 	 * user is a superuser, so the above stuff has to happen first.)
 	 */
@@ -752,7 +752,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 
 	/*
 	 * Now process any command-line switches that were included in the startup
-	 * packet, if we are in a regular backend.  We couldn't do this before
+	 * packet, if we are in a regular backend.	We couldn't do this before
 	 * because we didn't know if client is a superuser.
 	 */
 	gucctx = am_superuser ? PGC_SUSET : PGC_BACKEND;
@@ -846,7 +846,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 static void
 process_settings(Oid databaseid, Oid roleid)
 {
-	Relation		relsetting;
+	Relation	relsetting;
 
 	if (!IsUnderPostmaster)
 		return;
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 9064c85fc6e1c1e1cc7227844be8183bd874c71b..2dc537fd058b71c5ed8c93b982baabe1aade5e1d 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -4,7 +4,7 @@
  *
  * Tatsuo Ishii
  *
- * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.93 2010/02/14 18:42:18 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.94 2010/02/26 02:01:14 momjian Exp $
  */
 #include "postgres.h"
 
@@ -484,7 +484,7 @@ length_in_encoding(PG_FUNCTION_ARGS)
 Datum
 pg_encoding_max_length_sql(PG_FUNCTION_ARGS)
 {
-	int encoding = PG_GETARG_INT32(0);
+	int			encoding = PG_GETARG_INT32(0);
 
 	if (PG_VALID_ENCODING(encoding))
 		PG_RETURN_INT32(pg_wchar_table[encoding].maxmblen);
@@ -984,7 +984,7 @@ GetPlatformEncoding(void)
 	if (PlatformEncoding == NULL)
 	{
 		/* try to determine encoding of server's environment locale */
-		int		encoding = pg_get_encoding_from_locale("");
+		int			encoding = pg_get_encoding_from_locale("");
 
 		if (encoding < 0)
 			encoding = PG_SQL_ASCII;
@@ -1016,7 +1016,7 @@ pgwin32_toUTF16(const char *str, int len, int *utf16len)
 	{
 		utf16 = (WCHAR *) palloc(sizeof(WCHAR) * (len + 1));
 		dstlen = MultiByteToWideChar(codepage, 0, str, len, utf16, len);
-		utf16[dstlen] = L'\0';
+		utf16[dstlen] = L '\0';
 	}
 	else
 	{
@@ -1029,7 +1029,7 @@ pgwin32_toUTF16(const char *str, int len, int *utf16len)
 
 		utf16 = (WCHAR *) palloc(sizeof(WCHAR) * (len + 1));
 		dstlen = MultiByteToWideChar(CP_UTF8, 0, utf8, len, utf16, len);
-		utf16[dstlen] = L'\0';
+		utf16[dstlen] = L '\0';
 
 		if (utf8 != str)
 			pfree(utf8);
@@ -1038,7 +1038,7 @@ pgwin32_toUTF16(const char *str, int len, int *utf16len)
 	if (dstlen == 0 && len > 0)
 	{
 		pfree(utf16);
-		return NULL;	/* error */
+		return NULL;			/* error */
 	}
 
 	if (utf16len)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index d5330788452d40609d543818484d70a950be443f..5aefacc8d72b51798882e275ee0a9a3c551b9555 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -10,7 +10,7 @@
  * Written by Peter Eisentraut <peter_e@gmx.net>.
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.542 2010/02/25 13:26:15 mha Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.543 2010/02/26 02:01:14 momjian Exp $
  *
  *--------------------------------------------------------------------
  */
@@ -119,7 +119,7 @@ extern bool fullPageWrites;
 extern int	vacuum_defer_cleanup_age;
 extern int	ssl_renegotiation_limit;
 
-int	trace_recovery_messages = LOG;
+int			trace_recovery_messages = LOG;
 
 #ifdef TRACE_SORT
 extern bool trace_sort;
@@ -1215,8 +1215,8 @@ static struct config_bool ConfigureNamesBool[] =
 	{
 		{"recovery_connections", PGC_POSTMASTER, WAL_SETTINGS,
 			gettext_noop("During recovery, allows connections and queries. "
-						 " During normal running, causes additional info to be written"
-						 " to WAL to enable hot standby mode on WAL standby nodes."),
+			   " During normal running, causes additional info to be written"
+				 " to WAL to enable hot standby mode on WAL standby nodes."),
 			NULL
 		},
 		&XLogRequestRecoveryConnections,
@@ -1248,7 +1248,7 @@ static struct config_bool ConfigureNamesBool[] =
 		{"lo_compat_privileges", PGC_SUSET, COMPAT_OPTIONS_PREVIOUS,
 			gettext_noop("Enables backward compatibility mode for privilege checks on large objects"),
 			gettext_noop("Skips privilege checks when reading or modifying large objects, "
-						 "for compatibility with PostgreSQL releases prior to 9.0.")
+				  "for compatibility with PostgreSQL releases prior to 9.0.")
 		},
 		&lo_compat_privileges,
 		false, NULL, NULL
@@ -2614,9 +2614,9 @@ static struct config_string ConfigureNamesString[] =
 
 	{
 		{"application_name", PGC_USERSET, LOGGING,
-		 gettext_noop("Sets the application name to be reported in statistics and logs."),
-		 NULL,
-		 GUC_IS_NAME | GUC_REPORT | GUC_NOT_IN_SAMPLE
+			gettext_noop("Sets the application name to be reported in statistics and logs."),
+			NULL,
+			GUC_IS_NAME | GUC_REPORT | GUC_NOT_IN_SAMPLE
 		},
 		&application_name,
 		"", assign_application_name, NULL
@@ -4687,16 +4687,16 @@ set_config_option(const char *name, const char *value,
 				if (changeVal && !is_newvalue_equal(record, value))
 					ereport(elevel,
 							(errcode(ERRCODE_CANT_CHANGE_RUNTIME_PARAM),
-					   errmsg("parameter \"%s\" cannot be changed without restarting the server",
-							  name)));
+							 errmsg("parameter \"%s\" cannot be changed without restarting the server",
+									name)));
 				return true;
 			}
 			if (context != PGC_POSTMASTER)
 			{
 				ereport(elevel,
 						(errcode(ERRCODE_CANT_CHANGE_RUNTIME_PARAM),
-					   errmsg("parameter \"%s\" cannot be changed without restarting the server",
-							  name)));
+						 errmsg("parameter \"%s\" cannot be changed without restarting the server",
+								name)));
 				return false;
 			}
 			break;
@@ -4758,20 +4758,20 @@ set_config_option(const char *name, const char *value,
 
 	/*
 	 * Disallow changing GUC_NOT_WHILE_SEC_REST values if we are inside a
-	 * security restriction context.  We can reject this regardless of
-	 * the GUC context or source, mainly because sources that it might be
-	 * reasonable to override for won't be seen while inside a function.
+	 * security restriction context.  We can reject this regardless of the GUC
+	 * context or source, mainly because sources that it might be reasonable
+	 * to override for won't be seen while inside a function.
 	 *
 	 * Note: variables marked GUC_NOT_WHILE_SEC_REST should usually be marked
 	 * GUC_NO_RESET_ALL as well, because ResetAllOptions() doesn't check this.
 	 * An exception might be made if the reset value is assumed to be "safe".
 	 *
 	 * Note: this flag is currently used for "session_authorization" and
-	 * "role".  We need to prohibit changing these inside a local userid
+	 * "role".	We need to prohibit changing these inside a local userid
 	 * context because when we exit it, GUC won't be notified, leaving things
 	 * out of sync.  (This could be fixed by forcing a new GUC nesting level,
-	 * but that would change behavior in possibly-undesirable ways.)  Also,
-	 * we prohibit changing these in a security-restricted operation because
+	 * but that would change behavior in possibly-undesirable ways.)  Also, we
+	 * prohibit changing these in a security-restricted operation because
 	 * otherwise RESET could be used to regain the session user's privileges.
 	 */
 	if (record->flags & GUC_NOT_WHILE_SEC_REST)
@@ -4779,8 +4779,8 @@ set_config_option(const char *name, const char *value,
 		if (InLocalUserIdChange())
 		{
 			/*
-			 * Phrasing of this error message is historical, but it's the
-			 * most common case.
+			 * Phrasing of this error message is historical, but it's the most
+			 * common case.
 			 */
 			ereport(elevel,
 					(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
@@ -6132,8 +6132,8 @@ ShowAllGUCConfig(DestReceiver *dest)
 	int			i;
 	TupOutputState *tstate;
 	TupleDesc	tupdesc;
-	Datum	    values[3];
-	bool		isnull[3] = { false, false, false };
+	Datum		values[3];
+	bool		isnull[3] = {false, false, false};
 
 	/* need a tuple descriptor representing three TEXT columns */
 	tupdesc = CreateTemplateTupleDesc(3, false);
@@ -6150,7 +6150,7 @@ ShowAllGUCConfig(DestReceiver *dest)
 	for (i = 0; i < num_guc_variables; i++)
 	{
 		struct config_generic *conf = guc_variables[i];
-		char   *setting;
+		char	   *setting;
 
 		if ((conf->flags & GUC_NO_SHOW_ALL) ||
 			((conf->flags & GUC_SUPERUSER_ONLY) && !am_superuser))
@@ -7591,7 +7591,7 @@ assign_transaction_read_only(bool newval, bool doit, GucSource source)
 	{
 		ereport(GUC_complaint_elevel(source),
 				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-				 errmsg("cannot set transaction read-write mode during recovery")));
+		  errmsg("cannot set transaction read-write mode during recovery")));
 		/* source == PGC_S_OVERRIDE means do it anyway, eg at xact abort */
 		if (source != PGC_S_OVERRIDE)
 			return false;
diff --git a/src/backend/utils/misc/rbtree.c b/src/backend/utils/misc/rbtree.c
index 9211a8704b3cab17dfb1a979de4d137ccd592c5c..b5da48dd9c04af191e2ace7b9a73aa8df5854061 100644
--- a/src/backend/utils/misc/rbtree.c
+++ b/src/backend/utils/misc/rbtree.c
@@ -13,14 +13,14 @@
  *
  * Red-black trees are a type of balanced binary tree wherein (1) any child of
  * a red node is always black, and (2) every path from root to leaf traverses
- * an equal number of black nodes.  From these properties, it follows that the
+ * an equal number of black nodes.	From these properties, it follows that the
  * longest path from root to leaf is only about twice as long as the shortest,
  * so lookups are guaranteed to run in O(lg n) time.
  *
  * Copyright (c) 1996-2009, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/misc/rbtree.c,v 1.2 2010/02/11 22:17:27 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/misc/rbtree.c,v 1.3 2010/02/26 02:01:14 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -35,7 +35,7 @@
 /*
  * Values for RBNode->iteratorState
  */
-#define InitialState 	(0)
+#define InitialState	(0)
 #define FirstStepDone	(1)
 #define SecondStepDone	(2)
 #define ThirdStepDone	(3)
@@ -49,13 +49,13 @@
 typedef struct RBNode
 {
 	uint32		iteratorState:2,
-				color:	1 ,
-				unused: 29;
+	color:		1,
+				unused:29;
 	struct RBNode *left;
 	struct RBNode *right;
 	struct RBNode *parent;
 	void	   *data;
-}	RBNode;
+} RBNode;
 
 struct RBTree
 {
@@ -86,7 +86,7 @@ RBNode		sentinel = {InitialState, RBBLACK, 0, RBNIL, RBNIL, NULL, NULL};
 
 RBTree *
 rb_create(rb_comparator comparator, rb_appendator appendator,
-				  rb_freefunc freefunc, void *arg)
+		  rb_freefunc freefunc, void *arg)
 {
 	RBTree	   *tree = palloc(sizeof(RBTree));
 
@@ -94,6 +94,7 @@ rb_create(rb_comparator comparator, rb_appendator appendator,
 	tree->comparator = comparator;
 	tree->appendator = appendator;
 	tree->freefunc = freefunc;
+
 	tree->arg = arg;
 
 	return tree;
@@ -205,10 +206,10 @@ rb_rotate_right(RBTree *rb, RBNode *x)
 /*
  * Maintain Red-Black tree balance after inserting node x.
  *
- * The newly inserted node is always initially marked red.  That may lead to
+ * The newly inserted node is always initially marked red.	That may lead to
  * a situation where a red node has a red child, which is prohibited.  We can
  * always fix the problem by a series of color changes and/or "rotations",
- * which move the problem progressively higher up in the tree.  If one of the
+ * which move the problem progressively higher up in the tree.	If one of the
  * two red nodes is the root, we can always fix the problem by changing the
  * root from red to black.
  *
@@ -219,8 +220,8 @@ static void
 rb_insert_fixup(RBTree *rb, RBNode *x)
 {
 	/*
-	 * x is always a red node.  Initially, it is the newly inserted node.
-	 * Each iteration of this loop moves it higher up in the tree.
+	 * x is always a red node.	Initially, it is the newly inserted node. Each
+	 * iteration of this loop moves it higher up in the tree.
 	 */
 	while (x != rb->root && x->parent->color == RBRED)
 	{
@@ -234,11 +235,11 @@ rb_insert_fixup(RBTree *rb, RBNode *x)
 		 * grandparent still has a problem.
 		 *
 		 * If the uncle is black, we will perform one or two "rotations" to
-		 * balance the tree.  Either x or x->parent will take the grandparent's
-		 * position in the tree and recolored black, and the original
-		 * grandparent will be recolored red and become a child of that node.
-		 * This always leaves us with a valid red-black tree, so the loop
-		 * will terminate.
+		 * balance the tree.  Either x or x->parent will take the
+		 * grandparent's position in the tree and recolored black, and the
+		 * original grandparent will be recolored red and become a child of
+		 * that node. This always leaves us with a valid red-black tree, so
+		 * the loop will terminate.
 		 */
 		if (x->parent == x->parent->parent->left)
 		{
@@ -250,6 +251,7 @@ rb_insert_fixup(RBTree *rb, RBNode *x)
 				x->parent->color = RBBLACK;
 				y->color = RBBLACK;
 				x->parent->parent->color = RBRED;
+
 				x = x->parent->parent;
 			}
 			else
@@ -265,6 +267,7 @@ rb_insert_fixup(RBTree *rb, RBNode *x)
 				/* recolor and rotate */
 				x->parent->color = RBBLACK;
 				x->parent->parent->color = RBRED;
+
 				rb_rotate_right(rb, x->parent->parent);
 			}
 		}
@@ -279,6 +282,7 @@ rb_insert_fixup(RBTree *rb, RBNode *x)
 				x->parent->color = RBBLACK;
 				y->color = RBBLACK;
 				x->parent->parent->color = RBRED;
+
 				x = x->parent->parent;
 			}
 			else
@@ -291,6 +295,7 @@ rb_insert_fixup(RBTree *rb, RBNode *x)
 				}
 				x->parent->color = RBBLACK;
 				x->parent->parent->color = RBRED;
+
 				rb_rotate_left(rb, x->parent->parent);
 			}
 		}
@@ -355,6 +360,7 @@ rb_insert(RBTree *rb, void *data)
 	x->left = RBNIL;
 	x->right = RBNIL;
 	x->color = RBRED;
+
 	x->iteratorState = InitialState;
 
 	/* insert node in tree */
@@ -392,11 +398,11 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
 	while (x != rb->root && x->color == RBBLACK)
 	{
 		/*
-		 * Left and right cases are symmetric.  Any nodes that are children
-		 * of x have a black-height one less than the remainder of the nodes
-		 * in the tree.  We rotate and recolor nodes to move the problem up
-		 * the tree: at some stage we'll either fix the problem, or reach the
-		 * root (where the black-height is allowed to decrease).
+		 * Left and right cases are symmetric.	Any nodes that are children of
+		 * x have a black-height one less than the remainder of the nodes in
+		 * the tree.  We rotate and recolor nodes to move the problem up the
+		 * tree: at some stage we'll either fix the problem, or reach the root
+		 * (where the black-height is allowed to decrease).
 		 */
 		if (x == x->parent->left)
 		{
@@ -406,6 +412,7 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
 			{
 				w->color = RBBLACK;
 				x->parent->color = RBRED;
+
 				rb_rotate_left(rb, x->parent);
 				w = x->parent->right;
 			}
@@ -413,6 +420,7 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
 			if (w->left->color == RBBLACK && w->right->color == RBBLACK)
 			{
 				w->color = RBRED;
+
 				x = x->parent;
 			}
 			else
@@ -421,14 +429,16 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
 				{
 					w->left->color = RBBLACK;
 					w->color = RBRED;
+
 					rb_rotate_right(rb, w);
 					w = x->parent->right;
 				}
 				w->color = x->parent->color;
 				x->parent->color = RBBLACK;
 				w->right->color = RBBLACK;
+
 				rb_rotate_left(rb, x->parent);
-				x = rb->root;		/* Arrange for loop to terminate. */
+				x = rb->root;	/* Arrange for loop to terminate. */
 			}
 		}
 		else
@@ -439,6 +449,7 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
 			{
 				w->color = RBBLACK;
 				x->parent->color = RBRED;
+
 				rb_rotate_right(rb, x->parent);
 				w = x->parent->left;
 			}
@@ -446,6 +457,7 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
 			if (w->right->color == RBBLACK && w->left->color == RBBLACK)
 			{
 				w->color = RBRED;
+
 				x = x->parent;
 			}
 			else
@@ -454,14 +466,16 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
 				{
 					w->right->color = RBBLACK;
 					w->color = RBRED;
+
 					rb_rotate_left(rb, w);
 					w = x->parent->left;
 				}
 				w->color = x->parent->color;
 				x->parent->color = RBBLACK;
 				w->left->color = RBBLACK;
+
 				rb_rotate_right(rb, x->parent);
-				x = rb->root;		/* Arrange for loop to terminate. */
+				x = rb->root;	/* Arrange for loop to terminate. */
 			}
 		}
 	}
@@ -519,9 +533,8 @@ rb_delete_node(RBTree *rb, RBNode *z)
 	}
 
 	/*
-	 * If we removed the tree successor of z rather than z itself, then
-	 * attach the data for the removed node to the one we were supposed to
-	 * remove.
+	 * If we removed the tree successor of z rather than z itself, then attach
+	 * the data for the removed node to the one we were supposed to remove.
 	 */
 	if (y != z)
 		z->data = y->data;
@@ -550,7 +563,8 @@ rb_delete(RBTree *rb, void *data)
 		{
 			/* found node to delete */
 			if (rb->freefunc)
-				rb->freefunc(node->data);
+				rb->freefunc (node->data);
+
 			node->data = NULL;
 			rb_delete_node(rb, node);
 			return;
@@ -756,16 +770,16 @@ rb_begin_iterate(RBTree *rb, RBOrderControl ctrl)
 
 	switch (ctrl)
 	{
-		case LeftRightWalk:			/* visit left, then self, then right */
+		case LeftRightWalk:		/* visit left, then self, then right */
 			iterator->iterate = rb_left_right_iterator;
 			break;
-		case RightLeftWalk:			/* visit right, then self, then left */
+		case RightLeftWalk:		/* visit right, then self, then left */
 			iterator->iterate = rb_right_left_iterator;
 			break;
-		case DirectWalk:			/* visit self, then left, then right */
+		case DirectWalk:		/* visit self, then left, then right */
 			iterator->iterate = rb_direct_iterator;
 			break;
-		case InvertedWalk:			/* visit left, then right, then self */
+		case InvertedWalk:		/* visit left, then right, then self */
 			iterator->iterate = rb_inverted_iterator;
 			break;
 		default:
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 718451a2b4f10dbec55b0738eff311cad2575b88..197f1fcd141cfe77b76a90247b5ec1bb84c8ec84 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -11,7 +11,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.82 2010/01/02 16:57:58 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.83 2010/02/26 02:01:14 momjian Exp $
  *
  * NOTE:
  *	This is a new (Feb. 05, 1999) implementation of the allocation set
@@ -286,13 +286,13 @@ AllocSetFreeIndex(Size size)
 		tsize = (size - 1) >> ALLOC_MINBITS;
 
 		/*
-		 * At this point we need to obtain log2(tsize)+1, ie, the number
-		 * of not-all-zero bits at the right.  We used to do this with a
-		 * shift-and-count loop, but this function is enough of a hotspot
-		 * to justify micro-optimization effort.  The best approach seems
-		 * to be to use a lookup table.  Note that this code assumes that
-		 * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes
-		 * of the tsize value.
+		 * At this point we need to obtain log2(tsize)+1, ie, the number of
+		 * not-all-zero bits at the right.	We used to do this with a
+		 * shift-and-count loop, but this function is enough of a hotspot to
+		 * justify micro-optimization effort.  The best approach seems to be
+		 * to use a lookup table.  Note that this code assumes that
+		 * ALLOCSET_NUM_FREELISTS <= 17, since we only cope with two bytes of
+		 * the tsize value.
 		 */
 		t = tsize >> 8;
 		idx = t ? LogTable256[t] + 8 : LogTable256[tsize];
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 58d9da4301f213e13bed5ff92f8c63f84289e388..f7b26cdd974e55573f20f7e50c2e7fcadeb42787 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -12,7 +12,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.117 2010/02/18 03:06:46 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.118 2010/02/26 02:01:14 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -330,9 +330,9 @@ PortalReleaseCachedPlan(Portal portal)
 		portal->cplan = NULL;
 
 		/*
-		 * We must also clear portal->stmts which is now a dangling
-		 * reference to the cached plan's plan list.  This protects any
-		 * code that might try to examine the Portal later.
+		 * We must also clear portal->stmts which is now a dangling reference
+		 * to the cached plan's plan list.  This protects any code that might
+		 * try to examine the Portal later.
 		 */
 		portal->stmts = NIL;
 	}
@@ -822,16 +822,16 @@ AtSubAbort_Portals(SubTransactionId mySubid,
 
 		/*
 		 * Any resources belonging to the portal will be released in the
-		 * upcoming transaction-wide cleanup; they will be gone before we
-		 * run PortalDrop.
+		 * upcoming transaction-wide cleanup; they will be gone before we run
+		 * PortalDrop.
 		 */
 		portal->resowner = NULL;
 
 		/*
-		 * Although we can't delete the portal data structure proper, we
-		 * can release any memory in subsidiary contexts, such as executor
-		 * state.  The cleanup hook was the last thing that might have
-		 * needed data there.
+		 * Although we can't delete the portal data structure proper, we can
+		 * release any memory in subsidiary contexts, such as executor state.
+		 * The cleanup hook was the last thing that might have needed data
+		 * there.
 		 */
 		MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
 	}
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 08e19967876ee20c8c3a1160f285052c9f986ea6..11ce8edad5d3c9b227d0860aabb674193f838a30 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -91,7 +91,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.94 2010/01/02 16:57:58 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.95 2010/02/26 02:01:15 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -2797,8 +2797,8 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b,
 	 */
 	if (state->enforceUnique && !equal_hasnull && tuple1 != tuple2)
 	{
-		Datum	values[INDEX_MAX_KEYS];
-		bool	isnull[INDEX_MAX_KEYS];
+		Datum		values[INDEX_MAX_KEYS];
+		bool		isnull[INDEX_MAX_KEYS];
 
 		index_deform_tuple(tuple1, tupDes, values, isnull);
 		ereport(ERROR,
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 6bc35153ab8e3c166f533fb0be9ce7442025e187..b752d677718183db7b1c4b478f33fe2c53354f87 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -47,7 +47,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.50 2010/01/02 16:57:58 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.51 2010/02/26 02:01:15 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -586,7 +586,7 @@ tuplestore_putvalues(Tuplestorestate *state, TupleDesc tdesc,
 
 	tuplestore_puttuple_common(state, (void *) tuple);
 
-	MemoryContextSwitchTo(oldcxt);	
+	MemoryContextSwitchTo(oldcxt);
 }
 
 static void
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index c10472b892c48062e0c6416073d12d20eec459e5..6d22e6cd63cfdf970a8453be6270f1e7ebfdd5a3 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -19,7 +19,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/time/snapmgr.c,v 1.14 2010/01/02 16:57:58 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/time/snapmgr.c,v 1.15 2010/02/26 02:01:15 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -226,9 +226,9 @@ CopySnapshot(Snapshot snapshot)
 
 	/*
 	 * Setup subXID array. Don't bother to copy it if it had overflowed,
-	 * though, because it's not used anywhere in that case. Except if it's
-	 * a snapshot taken during recovery; all the top-level XIDs are in subxip
-	 * as well in that case, so we mustn't lose them.
+	 * though, because it's not used anywhere in that case. Except if it's a
+	 * snapshot taken during recovery; all the top-level XIDs are in subxip as
+	 * well in that case, so we mustn't lose them.
 	 */
 	if (snapshot->subxcnt > 0 &&
 		(!snapshot->suboverflowed || snapshot->takenDuringRecovery))
@@ -263,7 +263,7 @@ FreeSnapshot(Snapshot snapshot)
  *
  * If the passed snapshot is a statically-allocated one, or it is possibly
  * subject to a future command counter update, create a new long-lived copy
- * with active refcount=1.  Otherwise, only increment the refcount.
+ * with active refcount=1.	Otherwise, only increment the refcount.
  */
 void
 PushActiveSnapshot(Snapshot snap)
@@ -275,8 +275,8 @@ PushActiveSnapshot(Snapshot snap)
 	newactive = MemoryContextAlloc(TopTransactionContext, sizeof(ActiveSnapshotElt));
 
 	/*
-	 * Checking SecondarySnapshot is probably useless here, but it seems better
-	 * to be sure.
+	 * Checking SecondarySnapshot is probably useless here, but it seems
+	 * better to be sure.
 	 */
 	if (snap == CurrentSnapshot || snap == SecondarySnapshot || !snap->copied)
 		newactive->as_snap = CopySnapshot(snap);
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 794007c5e5488980f5749e83be17234a30646b60..bc19df813f3652fc38af0482cac875bb53e2c77a 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -50,7 +50,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.117 2010/02/08 14:10:21 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.118 2010/02/26 02:01:15 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -93,7 +93,7 @@ static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
  * Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then
  * we can always set the hint bits, since pre-9.0 VACUUM FULL always used
  * synchronous commits and didn't move tuples that weren't previously
- * hinted.  (This is not known by this subroutine, but is applied by its
+ * hinted.	(This is not known by this subroutine, but is applied by its
  * callers.)  Note: old-style VACUUM FULL is gone, but we have to keep this
  * module's support for MOVED_OFF/MOVED_IN flag bits for as long as we
  * support in-place update from pre-9.0 databases.
@@ -1274,17 +1274,17 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
 		return true;
 
 	/*
-	 * Snapshot information is stored slightly differently in snapshots
-	 * taken during recovery.
+	 * Snapshot information is stored slightly differently in snapshots taken
+	 * during recovery.
 	 */
 	if (!snapshot->takenDuringRecovery)
 	{
 		/*
-		 * If the snapshot contains full subxact data, the fastest way to check
-		 * things is just to compare the given XID against both subxact XIDs and
-		 * top-level XIDs.	If the snapshot overflowed, we have to use pg_subtrans
-		 * to convert a subxact XID to its parent XID, but then we need only look
-		 * at top-level XIDs not subxacts.
+		 * If the snapshot contains full subxact data, the fastest way to
+		 * check things is just to compare the given XID against both subxact
+		 * XIDs and top-level XIDs.  If the snapshot overflowed, we have to
+		 * use pg_subtrans to convert a subxact XID to its parent XID, but
+		 * then we need only look at top-level XIDs not subxacts.
 		 */
 		if (!snapshot->suboverflowed)
 		{
@@ -1305,8 +1305,9 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
 			xid = SubTransGetTopmostTransaction(xid);
 
 			/*
-			 * If xid was indeed a subxact, we might now have an xid < xmin, so
-			 * recheck to avoid an array scan.	No point in rechecking xmax.
+			 * If xid was indeed a subxact, we might now have an xid < xmin,
+			 * so recheck to avoid an array scan.  No point in rechecking
+			 * xmax.
 			 */
 			if (TransactionIdPrecedes(xid, snapshot->xmin))
 				return false;
@@ -1323,9 +1324,9 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
 		int32		j;
 
 		/*
-		 * In recovery we store all xids in the subxact array because it
-		 * is by far the bigger array, and we mostly don't know which xids
-		 * are top-level and which are subxacts. The xip array is empty.
+		 * In recovery we store all xids in the subxact array because it is by
+		 * far the bigger array, and we mostly don't know which xids are
+		 * top-level and which are subxacts. The xip array is empty.
 		 *
 		 * We start by searching subtrans, if we overflowed.
 		 */
@@ -1335,8 +1336,9 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
 			xid = SubTransGetTopmostTransaction(xid);
 
 			/*
-			 * If xid was indeed a subxact, we might now have an xid < xmin, so
-			 * recheck to avoid an array scan.	No point in rechecking xmax.
+			 * If xid was indeed a subxact, we might now have an xid < xmin,
+			 * so recheck to avoid an array scan.  No point in rechecking
+			 * xmax.
 			 */
 			if (TransactionIdPrecedes(xid, snapshot->xmin))
 				return false;
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index 733d8ef74d2088f8d7b04c68d224a11a493b9ed0..0aee70de1d66c520fb4a3f5d7873b1af8a00f29a 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -42,7 +42,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  * Portions taken from FreeBSD.
  *
- * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.185 2010/02/16 22:34:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.186 2010/02/26 02:01:15 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1215,8 +1215,8 @@ setup_config(void)
 		if (err != 0 ||
 			getaddrinfo("::1", NULL, &hints, &gai_result) != 0)
 			conflines = replace_token(conflines,
-									  "host    all             all             ::1",
-									  "#host    all             all             ::1");
+							   "host    all             all             ::1",
+							 "#host    all             all             ::1");
 	}
 #else							/* !HAVE_IPV6 */
 	/* If we didn't compile IPV6 support at all, always comment it out */
@@ -2345,7 +2345,7 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo)
 	}
 
 #ifndef __CYGWIN__
-    AddUserToTokenDacl(restrictedToken);
+	AddUserToTokenDacl(restrictedToken);
 #endif
 
 	if (!CreateProcessAsUser(restrictedToken,
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index 23d32e64697e6aaaac4e4127c4bdbfd9b44decf7..04b8b2f0a0a50945d1e580163c7e4b1f902f94eb 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -4,7 +4,7 @@
  *
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.119 2010/02/19 14:12:19 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.120 2010/02/26 02:01:16 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -651,7 +651,7 @@ find_other_exec_or_die(const char *argv0, const char *target, const char *versio
 static void
 do_init(void)
 {
-	char cmd[MAXPGPATH];
+	char		cmd[MAXPGPATH];
 
 	if (exec_path == NULL)
 		exec_path = find_other_exec_or_die(argv0, "initdb", "initdb (PostgreSQL) " PG_VERSION "\n");
@@ -668,7 +668,7 @@ do_init(void)
 	else
 		snprintf(cmd, MAXPGPATH, SYSTEMQUOTE "\"%s\" %s%s > \"%s\"" SYSTEMQUOTE,
 				 exec_path, pgdata_opt, post_opts, DEVNULL);
-	
+
 	if (system(cmd) != 0)
 	{
 		write_stderr(_("%s: database system initialization failed\n"), progname);
@@ -1605,7 +1605,7 @@ do_help(void)
 #endif
 	printf(_("  -l, --log FILENAME     write (or append) server log to FILENAME\n"));
 	printf(_("  -o OPTIONS             command line options to pass to postgres\n"
-			 "                         (PostgreSQL server executable) or initdb\n"));
+	 "                         (PostgreSQL server executable) or initdb\n"));
 	printf(_("  -p PATH-TO-POSTGRES    normally not necessary\n"));
 	printf(_("\nOptions for stop or restart:\n"));
 	printf(_("  -m SHUTDOWN-MODE   can be \"smart\", \"fast\", or \"immediate\"\n"));
diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c
index 3452944a426badb2d8e754e7465db3275f68a59a..82b3cb2688368fdffa0d3a7bd46cc1f0d291d072 100644
--- a/src/bin/pg_dump/dumputils.c
+++ b/src/bin/pg_dump/dumputils.c
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.54 2010/02/18 01:29:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.55 2010/02/26 02:01:16 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -343,10 +343,10 @@ appendByteaLiteral(PQExpBuffer buf, const unsigned char *str, size_t length,
 	static const char hextbl[] = "0123456789abcdef";
 
 	/*
-	 * This implementation is hard-wired to produce hex-format output.
-	 * We do not know the server version the output will be loaded into,
-	 * so making an intelligent format choice is impossible.  It might be
-	 * better to always use the old escaped format.
+	 * This implementation is hard-wired to produce hex-format output. We do
+	 * not know the server version the output will be loaded into, so making
+	 * an intelligent format choice is impossible.	It might be better to
+	 * always use the old escaped format.
 	 */
 	if (!enlargePQExpBuffer(buf, 2 * length + 5))
 		return;
@@ -611,7 +611,7 @@ buildACLCommands(const char *name, const char *subname,
 										  fmtId(grantee->data));
 					if (privswgo->len > 0)
 						appendPQExpBuffer(firstsql,
-							  "%sGRANT %s ON %s %s TO %s WITH GRANT OPTION;\n",
+							"%sGRANT %s ON %s %s TO %s WITH GRANT OPTION;\n",
 										  prefix, privswgo->data, type, name,
 										  fmtId(grantee->data));
 				}
@@ -712,9 +712,9 @@ buildDefaultACLCommands(const char *type, const char *nspname,
 
 	/*
 	 * We incorporate the target role directly into the command, rather than
-	 * playing around with SET ROLE or anything like that.  This is so that
-	 * a permissions error leads to nothing happening, rather than
-	 * changing default privileges for the wrong user.
+	 * playing around with SET ROLE or anything like that.	This is so that a
+	 * permissions error leads to nothing happening, rather than changing
+	 * default privileges for the wrong user.
 	 */
 	appendPQExpBuffer(prefix, "ALTER DEFAULT PRIVILEGES FOR ROLE %s ",
 					  fmtId(owner));
diff --git a/src/bin/pg_dump/dumputils.h b/src/bin/pg_dump/dumputils.h
index f8cb3778974cab296247924d6808ca8c66bf2e6b..298b687cd1da3970541d38337145513414d9907e 100644
--- a/src/bin/pg_dump/dumputils.h
+++ b/src/bin/pg_dump/dumputils.h
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.h,v 1.28 2010/01/02 16:57:59 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.h,v 1.29 2010/02/26 02:01:16 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -28,8 +28,8 @@ extern void appendStringLiteralConn(PQExpBuffer buf, const char *str,
 extern void appendStringLiteralDQ(PQExpBuffer buf, const char *str,
 					  const char *dqprefix);
 extern void appendByteaLiteral(PQExpBuffer buf,
-							   const unsigned char *str, size_t length,
-							   bool std_strings);
+				   const unsigned char *str, size_t length,
+				   bool std_strings);
 extern int	parse_version(const char *versionString);
 extern bool parsePGArray(const char *atext, char ***itemarray, int *nitems);
 extern bool buildACLCommands(const char *name, const char *subname,
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index af290363ff9cae27ff86548cbf72c1a2af41766b..4d1205d2d145b703f46355032e2989314c2a11b2 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -15,7 +15,7 @@
  *
  *
  * IDENTIFICATION
- *		$PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.181 2010/02/24 02:42:54 tgl Exp $
+ *		$PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.182 2010/02/26 02:01:16 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -138,7 +138,7 @@ static void identify_locking_dependencies(TocEntry *te,
 							  TocEntry **tocsByDumpId,
 							  DumpId maxDumpId);
 static void reduce_dependencies(ArchiveHandle *AH, TocEntry *te,
-								TocEntry *ready_list);
+					TocEntry *ready_list);
 static void mark_create_done(ArchiveHandle *AH, TocEntry *te);
 static void inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te);
 static ArchiveHandle *CloneArchive(ArchiveHandle *AH);
@@ -339,7 +339,7 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt)
 
 			reqs = _tocEntryRequired(te, ropt, false /* needn't drop ACLs */ );
 			/* We want anything that's selected and has a dropStmt */
-			if (((reqs & (REQ_SCHEMA|REQ_DATA)) != 0) && te->dropStmt)
+			if (((reqs & (REQ_SCHEMA | REQ_DATA)) != 0) && te->dropStmt)
 			{
 				ahlog(AH, 1, "dropping %s %s\n", te->desc, te->tag);
 				/* Select owner and schema as necessary */
@@ -391,7 +391,7 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt)
 		reqs = _tocEntryRequired(te, ropt, true);
 
 		/* Both schema and data objects might now have ownership/ACLs */
-		if ((reqs & (REQ_SCHEMA|REQ_DATA)) != 0)
+		if ((reqs & (REQ_SCHEMA | REQ_DATA)) != 0)
 		{
 			ahlog(AH, 1, "setting owner and privileges for %s %s\n",
 				  te->desc, te->tag);
@@ -2311,11 +2311,11 @@ _tocEntryRequired(TocEntry *te, RestoreOptions *ropt, bool include_acls)
 	if (!te->hadDumper)
 	{
 		/*
-		 * Special Case: If 'SEQUENCE SET' or anything to do with BLOBs,
-		 * then it is considered a data entry.  We don't need to check for
-		 * the BLOBS entry or old-style BLOB COMMENTS, because they will
-		 * have hadDumper = true ... but we do need to check new-style
-		 * BLOB comments.
+		 * Special Case: If 'SEQUENCE SET' or anything to do with BLOBs, then
+		 * it is considered a data entry.  We don't need to check for the
+		 * BLOBS entry or old-style BLOB COMMENTS, because they will have
+		 * hadDumper = true ... but we do need to check new-style BLOB
+		 * comments.
 		 */
 		if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
 			strcmp(te->desc, "BLOB") == 0 ||
@@ -3197,13 +3197,13 @@ restore_toc_entries_parallel(ArchiveHandle *AH)
 	AH->currWithOids = -1;
 
 	/*
-	 * Initialize the lists of pending and ready items.  After this setup,
-	 * the pending list is everything that needs to be done but is blocked
-	 * by one or more dependencies, while the ready list contains items that
-	 * have no remaining dependencies.  Note: we don't yet filter out entries
-	 * that aren't going to be restored.  They might participate in
-	 * dependency chains connecting entries that should be restored, so we
-	 * treat them as live until we actually process them.
+	 * Initialize the lists of pending and ready items.  After this setup, the
+	 * pending list is everything that needs to be done but is blocked by one
+	 * or more dependencies, while the ready list contains items that have no
+	 * remaining dependencies.	Note: we don't yet filter out entries that
+	 * aren't going to be restored.  They might participate in dependency
+	 * chains connecting entries that should be restored, so we treat them as
+	 * live until we actually process them.
 	 */
 	par_list_header_init(&pending_list);
 	par_list_header_init(&ready_list);
@@ -3716,8 +3716,8 @@ fix_dependencies(ArchiveHandle *AH)
 	 * repeatedly.	Entries for dump IDs not present in the TOC will be NULL.
 	 *
 	 * NOTE: because maxDumpId is just the highest dump ID defined in the
-	 * archive, there might be dependencies for IDs > maxDumpId.  All uses
-	 * of this array must guard against out-of-range dependency numbers.
+	 * archive, there might be dependencies for IDs > maxDumpId.  All uses of
+	 * this array must guard against out-of-range dependency numbers.
 	 *
 	 * Also, initialize the depCount fields, and make sure all the TOC items
 	 * are marked as not being in any parallel-processing list.
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index dc2ccb5e95a70bd9e2e3308a82cc7ec5dc3090cf..0a135ee126f474f98d728e60269c0df5403f62b4 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -17,7 +17,7 @@
  *
  *
  * IDENTIFICATION
- *		$PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.84 2010/02/18 01:29:10 tgl Exp $
+ *		$PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.85 2010/02/26 02:01:16 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -317,8 +317,8 @@ typedef struct _tocEntry
 	void	   *formatData;		/* TOC Entry data specific to file format */
 
 	/* working state (needed only for parallel restore) */
-	struct _tocEntry *par_prev;	/* list links for pending/ready items; */
-	struct _tocEntry *par_next;	/* these are NULL if not in either list */
+	struct _tocEntry *par_prev; /* list links for pending/ready items; */
+	struct _tocEntry *par_next; /* these are NULL if not in either list */
 	bool		created;		/* set for DATA member if TABLE was created */
 	int			depCount;		/* number of dependencies not yet restored */
 	DumpId	   *lockDeps;		/* dumpIds of objects this one needs lock on */
@@ -374,7 +374,7 @@ extern void InitArchiveFmt_Tar(ArchiveHandle *AH);
 extern bool isValidTarHeader(char *header);
 
 extern int	ReconnectToServer(ArchiveHandle *AH, const char *dbname, const char *newUser);
-extern void	DropBlobIfExists(ArchiveHandle *AH, Oid oid);
+extern void DropBlobIfExists(ArchiveHandle *AH, Oid oid);
 
 int			ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH);
 int			ahprintf(ArchiveHandle *AH, const char *fmt,...) __attribute__((format(printf, 2, 3)));
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index 37e6d2005df245c1e6c9c16739114fa118de30dd..37d1b742e418ab67fd21bf2fb39f74d7b3438a0f 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -5,7 +5,7 @@
  *	Implements the basic DB functions used by the archiver.
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.89 2010/02/24 02:42:55 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.90 2010/02/26 02:01:16 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -163,20 +163,20 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
 		if (!keywords || !values)
 			die_horribly(AH, modulename, "out of memory\n");
 
-		keywords[0]	= "host";
-		values[0]	= PQhost(AH->connection);
-		keywords[1]	= "port";
-		values[1]	= PQport(AH->connection);
-		keywords[2]	= "user";
-		values[2]	= newuser;
-		keywords[3]	= "password";
-		values[3]	= password;
-		keywords[4]	= "dbname";
-		values[4]	= newdb;
-		keywords[5]	= "fallback_application_name";
-		values[5]	= progname;
-		keywords[6]	= NULL;
-		values[6]	= NULL;
+		keywords[0] = "host";
+		values[0] = PQhost(AH->connection);
+		keywords[1] = "port";
+		values[1] = PQport(AH->connection);
+		keywords[2] = "user";
+		values[2] = newuser;
+		keywords[3] = "password";
+		values[3] = password;
+		keywords[4] = "dbname";
+		values[4] = newdb;
+		keywords[5] = "fallback_application_name";
+		values[5] = progname;
+		keywords[6] = NULL;
+		values[6] = NULL;
 
 		new_pass = false;
 		newConn = PQconnectdbParams(keywords, values, true);
@@ -270,20 +270,20 @@ ConnectDatabase(Archive *AHX,
 		if (!keywords || !values)
 			die_horribly(AH, modulename, "out of memory\n");
 
-		keywords[0]	= "host";
-		values[0]	= pghost;
-		keywords[1]	= "port";
-		values[1]	= pgport;
-		keywords[2]	= "user";
-		values[2]	= username;
-		keywords[3]	= "password";
-		values[3]	= password;
-		keywords[4]	= "dbname";
-		values[4]	= dbname;
-		keywords[5]	= "fallback_application_name";
-		values[5]	= progname;
-		keywords[6]	= NULL;
-		values[6]	= NULL;
+		keywords[0] = "host";
+		values[0] = pghost;
+		keywords[1] = "port";
+		values[1] = pgport;
+		keywords[2] = "user";
+		values[2] = username;
+		keywords[3] = "password";
+		values[3] = password;
+		keywords[4] = "dbname";
+		values[4] = dbname;
+		keywords[5] = "fallback_application_name";
+		values[5] = progname;
+		keywords[6] = NULL;
+		values[6] = NULL;
 
 		new_pass = false;
 		AH->connection = PQconnectdbParams(keywords, values, true);
@@ -757,4 +757,3 @@ _isDQChar(unsigned char c, bool atStart)
 	else
 		return false;
 }
-
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index da35334636141dbca515a301dc3cd36427a5259c..2871fee15d37fa5af02059aad028fb32fad64a6c 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -16,7 +16,7 @@
  *
  *
  * IDENTIFICATION
- *		$PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.68 2010/02/23 16:55:22 tgl Exp $
+ *		$PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.69 2010/02/26 02:01:16 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -350,8 +350,8 @@ tarOpen(ArchiveHandle *AH, const char *filename, char mode)
 			if (filename)
 			{
 				/*
-				 * Couldn't find the requested file. Future:
-				 * do SEEK(0) and retry.
+				 * Couldn't find the requested file. Future: do SEEK(0) and
+				 * retry.
 				 */
 				die_horribly(AH, modulename, "could not find file \"%s\" in archive\n", filename);
 			}
@@ -1178,7 +1178,7 @@ _tarPositionTo(ArchiveHandle *AH, const char *filename)
 		id = atoi(th->targetFile);
 		if ((TocIDRequired(AH, id, AH->ropt) & REQ_DATA) != 0)
 			die_horribly(AH, modulename, "restoring data out of order is not supported in this archive format: "
-				"\"%s\" is required, but comes before \"%s\" in the archive file.\n",
+						 "\"%s\" is required, but comes before \"%s\" in the archive file.\n",
 						 th->targetFile, filename);
 
 		/* Header doesn't match, so read to next header */
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 95c08f11f97cff7d9d5346f70f5474a39c9fb218..950f7ffdbda6daabc4e8facd762b0ef891d989e3 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -12,7 +12,7 @@
  *	by PostgreSQL
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.574 2010/02/24 02:15:58 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.575 2010/02/26 02:01:16 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -196,11 +196,11 @@ static void dumpDatabase(Archive *AH);
 static void dumpEncoding(Archive *AH);
 static void dumpStdStrings(Archive *AH);
 static void binary_upgrade_set_type_oids_by_type_oid(
-					PQExpBuffer upgrade_buffer, Oid pg_type_oid);
+								PQExpBuffer upgrade_buffer, Oid pg_type_oid);
 static bool binary_upgrade_set_type_oids_by_rel_oid(
-					PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
+								 PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
 static void binary_upgrade_set_relfilenodes(PQExpBuffer upgrade_buffer,
-					Oid pg_class_oid, bool is_index);
+								Oid pg_class_oid, bool is_index);
 static const char *getAttrName(int attrnum, TableInfo *tblInfo);
 static const char *fmtCopyColumnList(const TableInfo *ti);
 static void do_sql_command(PGconn *conn, const char *query);
@@ -1778,8 +1778,8 @@ dumpDatabase(Archive *AH)
 				 NULL);			/* Dumper Arg */
 
 	/*
-	 *	pg_largeobject comes from the old system intact, so set
-	 *	its relfrozenxid.
+	 * pg_largeobject comes from the old system intact, so set its
+	 * relfrozenxid.
 	 */
 	if (binary_upgrade)
 	{
@@ -1789,9 +1789,9 @@ dumpDatabase(Archive *AH)
 		int			i_relfrozenxid;
 
 		appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid\n"
-							"FROM pg_catalog.pg_class\n"
-							"WHERE oid = %u;\n",
-							LargeObjectRelationId);
+						  "FROM pg_catalog.pg_class\n"
+						  "WHERE oid = %u;\n",
+						  LargeObjectRelationId);
 
 		lo_res = PQexec(g_conn, loFrozenQry->data);
 		check_sql_result(lo_res, g_conn, loFrozenQry->data, PGRES_TUPLES_OK);
@@ -1926,10 +1926,10 @@ dumpStdStrings(Archive *AH)
 static void
 getBlobs(Archive *AH)
 {
-	PQExpBuffer		blobQry = createPQExpBuffer();
-	BlobInfo	   *binfo;
+	PQExpBuffer blobQry = createPQExpBuffer();
+	BlobInfo   *binfo;
 	DumpableObject *bdata;
-	PGresult	   *res;
+	PGresult   *res;
 	int			ntups;
 	int			i;
 
@@ -2007,8 +2007,8 @@ getBlobs(Archive *AH)
 static void
 dumpBlob(Archive *AH, BlobInfo *binfo)
 {
-	PQExpBuffer		cquery = createPQExpBuffer();
-	PQExpBuffer		dquery = createPQExpBuffer();
+	PQExpBuffer cquery = createPQExpBuffer();
+	PQExpBuffer dquery = createPQExpBuffer();
 
 	appendPQExpBuffer(cquery,
 					  "SELECT pg_catalog.lo_create('%s');\n",
@@ -2068,8 +2068,8 @@ dumpBlobs(Archive *AH, void *arg)
 	selectSourceSchema("pg_catalog");
 
 	/*
-	 * Currently, we re-fetch all BLOB OIDs using a cursor.  Consider
-	 * scanning the already-in-memory dumpable objects instead...
+	 * Currently, we re-fetch all BLOB OIDs using a cursor.  Consider scanning
+	 * the already-in-memory dumpable objects instead...
 	 */
 	if (AH->remoteVersion >= 90000)
 		blobQry = "DECLARE bloboid CURSOR FOR SELECT oid FROM pg_largeobject_metadata";
@@ -2138,17 +2138,17 @@ dumpBlobs(Archive *AH, void *arg)
 
 static void
 binary_upgrade_set_type_oids_by_type_oid(PQExpBuffer upgrade_buffer,
-											   Oid pg_type_oid)
+										 Oid pg_type_oid)
 {
 	PQExpBuffer upgrade_query = createPQExpBuffer();
 	int			ntups;
 	PGresult   *upgrade_res;
 	Oid			pg_type_array_oid;
-			
+
 	appendPQExpBuffer(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
 	appendPQExpBuffer(upgrade_buffer,
-		"SELECT binary_upgrade.set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
-		pg_type_oid);
+	 "SELECT binary_upgrade.set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
+					  pg_type_oid);
 
 	/* we only support old >= 8.3 for binary upgrades */
 	appendPQExpBuffer(upgrade_query,
@@ -2176,10 +2176,10 @@ binary_upgrade_set_type_oids_by_type_oid(PQExpBuffer upgrade_buffer,
 	if (OidIsValid(pg_type_array_oid))
 	{
 		appendPQExpBuffer(upgrade_buffer,
-							"\n-- For binary upgrade, must preserve pg_type array oid\n");
+			   "\n-- For binary upgrade, must preserve pg_type array oid\n");
 		appendPQExpBuffer(upgrade_buffer,
-			"SELECT binary_upgrade.set_next_pg_type_array_oid('%u'::pg_catalog.oid);\n\n",
-			pg_type_array_oid);
+						  "SELECT binary_upgrade.set_next_pg_type_array_oid('%u'::pg_catalog.oid);\n\n",
+						  pg_type_array_oid);
 	}
 
 	PQclear(upgrade_res);
@@ -2188,14 +2188,14 @@ binary_upgrade_set_type_oids_by_type_oid(PQExpBuffer upgrade_buffer,
 
 static bool
 binary_upgrade_set_type_oids_by_rel_oid(PQExpBuffer upgrade_buffer,
-											   Oid pg_rel_oid)
+										Oid pg_rel_oid)
 {
 	PQExpBuffer upgrade_query = createPQExpBuffer();
 	int			ntups;
 	PGresult   *upgrade_res;
 	Oid			pg_type_oid;
 	bool		toast_set = false;
-	
+
 	/* we only support old >= 8.3 for binary upgrades */
 	appendPQExpBuffer(upgrade_query,
 					  "SELECT c.reltype AS crel, t.reltype AS trel "
@@ -2226,13 +2226,13 @@ binary_upgrade_set_type_oids_by_rel_oid(PQExpBuffer upgrade_buffer,
 	if (!PQgetisnull(upgrade_res, 0, PQfnumber(upgrade_res, "trel")))
 	{
 		/* Toast tables do not have pg_type array rows */
-		Oid pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
-										PQfnumber(upgrade_res, "trel")));
+		Oid			pg_type_toast_oid = atooid(PQgetvalue(upgrade_res, 0,
+											PQfnumber(upgrade_res, "trel")));
 
 		appendPQExpBuffer(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type toast oid\n");
 		appendPQExpBuffer(upgrade_buffer,
-			"SELECT binary_upgrade.set_next_pg_type_toast_oid('%u'::pg_catalog.oid);\n\n",
-			pg_type_toast_oid);
+						  "SELECT binary_upgrade.set_next_pg_type_toast_oid('%u'::pg_catalog.oid);\n\n",
+						  pg_type_toast_oid);
 
 		toast_set = true;
 	}
@@ -2256,12 +2256,12 @@ binary_upgrade_set_relfilenodes(PQExpBuffer upgrade_buffer, Oid pg_class_oid,
 
 	/*
 	 * Note: we don't need to use pg_relation_filenode() here because this
-	 * function is not intended to be used against system catalogs.
-	 * Otherwise we'd have to worry about which versions pg_relation_filenode
-	 * is available in.
+	 * function is not intended to be used against system catalogs. Otherwise
+	 * we'd have to worry about which versions pg_relation_filenode is
+	 * available in.
 	 */
 	appendPQExpBuffer(upgrade_query,
-					  "SELECT c.relfilenode, c.reltoastrelid, t.reltoastidxid "
+					"SELECT c.relfilenode, c.reltoastrelid, t.reltoastidxid "
 					  "FROM pg_catalog.pg_class c LEFT JOIN "
 					  "pg_catalog.pg_class t ON (c.reltoastrelid = t.oid) "
 					  "WHERE c.oid = '%u'::pg_catalog.oid;",
@@ -2286,37 +2286,36 @@ binary_upgrade_set_relfilenodes(PQExpBuffer upgrade_buffer, Oid pg_class_oid,
 	pg_class_reltoastidxid = atooid(PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "reltoastidxid")));
 
 	appendPQExpBuffer(upgrade_buffer,
-						"\n-- For binary upgrade, must preserve relfilenodes\n");
+					"\n-- For binary upgrade, must preserve relfilenodes\n");
 
 	if (!is_index)
 		appendPQExpBuffer(upgrade_buffer,
-			"SELECT binary_upgrade.set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
-			pg_class_relfilenode);
+						  "SELECT binary_upgrade.set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
+						  pg_class_relfilenode);
 	else
 		appendPQExpBuffer(upgrade_buffer,
-			"SELECT binary_upgrade.set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
-			pg_class_relfilenode);
-	
+						  "SELECT binary_upgrade.set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
+						  pg_class_relfilenode);
+
 	if (OidIsValid(pg_class_reltoastrelid))
 	{
 		/*
-		 *  One complexity is that the table definition might not require
-		 *	the creation of a TOAST table, and the TOAST table might have
-		 *	been created long after table creation, when the table was
-		 *	loaded with wide data.  By setting the TOAST relfilenode we
-		 *	force creation of the TOAST heap and TOAST index by the
-		 *	backend so we can cleanly migrate the files during binary
-		 *	migration.
+		 * One complexity is that the table definition might not require the
+		 * creation of a TOAST table, and the TOAST table might have been
+		 * created long after table creation, when the table was loaded with
+		 * wide data.  By setting the TOAST relfilenode we force creation of
+		 * the TOAST heap and TOAST index by the backend so we can cleanly
+		 * migrate the files during binary migration.
 		 */
 
 		appendPQExpBuffer(upgrade_buffer,
-			"SELECT binary_upgrade.set_next_toast_relfilenode('%u'::pg_catalog.oid);\n",
-			pg_class_reltoastrelid);
+						  "SELECT binary_upgrade.set_next_toast_relfilenode('%u'::pg_catalog.oid);\n",
+						  pg_class_reltoastrelid);
 
 		/* every toast table has an index */
 		appendPQExpBuffer(upgrade_buffer,
-			"SELECT binary_upgrade.set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
-			pg_class_reltoastidxid);
+						  "SELECT binary_upgrade.set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
+						  pg_class_reltoastidxid);
 	}
 	appendPQExpBuffer(upgrade_buffer, "\n");
 
@@ -2612,7 +2611,7 @@ getTypes(int *numTypes)
 		AssignDumpId(&tyinfo[i].dobj);
 		tyinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_typname));
 		tyinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_typnamespace)),
-												tyinfo[i].dobj.catId.oid);
+												 tyinfo[i].dobj.catId.oid);
 		tyinfo[i].rolname = strdup(PQgetvalue(res, i, i_rolname));
 		tyinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem));
 		tyinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid));
@@ -3958,7 +3957,7 @@ getIndexes(TableInfo tblinfo[], int numTables)
 							  "c.condeferrable, c.condeferred, "
 							  "c.tableoid AS contableoid, "
 							  "c.oid AS conoid, "
-							  "pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
+				  "pg_catalog.pg_get_constraintdef(c.oid, false) AS condef, "
 							  "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) AS tablespace, "
 							"array_to_string(t.reloptions, ', ') AS options "
 							  "FROM pg_catalog.pg_index i "
@@ -4586,7 +4585,7 @@ getTriggers(TableInfo tblinfo[], int numTables)
 			appendPQExpBuffer(query,
 							  "SELECT tgname, "
 							  "tgfoid::pg_catalog.regproc AS tgfname, "
-							  "pg_catalog.pg_get_triggerdef(oid, false) AS tgdef, "
+						"pg_catalog.pg_get_triggerdef(oid, false) AS tgdef, "
 							  "tgenabled, tableoid, oid "
 							  "FROM pg_catalog.pg_trigger t "
 							  "WHERE tgrelid = '%u'::pg_catalog.oid "
@@ -5112,8 +5111,8 @@ getTableAttrs(TableInfo *tblinfo, int numTables)
 							  "a.attstattarget, a.attstorage, t.typstorage, "
 							  "a.attnotnull, a.atthasdef, a.attisdropped, "
 							  "a.attlen, a.attalign, a.attislocal, "
-				   "pg_catalog.format_type(t.oid,a.atttypmod) AS atttypname, "
-							"array_to_string(attoptions, ', ') AS attoptions "
+				  "pg_catalog.format_type(t.oid,a.atttypmod) AS atttypname, "
+						   "array_to_string(attoptions, ', ') AS attoptions "
 			 "FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_type t "
 							  "ON a.atttypid = t.oid "
 							  "WHERE a.attrelid = '%u'::pg_catalog.oid "
@@ -5128,7 +5127,7 @@ getTableAttrs(TableInfo *tblinfo, int numTables)
 							  "a.attstattarget, a.attstorage, t.typstorage, "
 							  "a.attnotnull, a.atthasdef, a.attisdropped, "
 							  "a.attlen, a.attalign, a.attislocal, "
-				   "pg_catalog.format_type(t.oid,a.atttypmod) AS atttypname, "
+				  "pg_catalog.format_type(t.oid,a.atttypmod) AS atttypname, "
 							  "'' AS attoptions "
 			 "FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_type t "
 							  "ON a.atttypid = t.oid "
@@ -6035,7 +6034,7 @@ getDefaultACLs(int *numDefaultACLs)
 
 	for (i = 0; i < ntups; i++)
 	{
-		Oid		nspid = atooid(PQgetvalue(res, i, i_defaclnamespace));
+		Oid			nspid = atooid(PQgetvalue(res, i, i_defaclnamespace));
 
 		daclinfo[i].dobj.objType = DO_DEFAULT_ACL;
 		daclinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
@@ -6046,7 +6045,7 @@ getDefaultACLs(int *numDefaultACLs)
 
 		if (nspid != InvalidOid)
 			daclinfo[i].dobj.namespace = findNamespace(nspid,
-													   daclinfo[i].dobj.catId.oid);
+												 daclinfo[i].dobj.catId.oid);
 		else
 			daclinfo[i].dobj.namespace = NULL;
 
@@ -6651,9 +6650,9 @@ dumpEnumType(Archive *fout, TypeInfo *tyinfo)
 			if (i == 0)
 				appendPQExpBuffer(q, "\n-- For binary upgrade, must preserve pg_enum oids\n");
 			appendPQExpBuffer(q,
-				"SELECT binary_upgrade.add_pg_enum_label('%u'::pg_catalog.oid, "
-				"'%u'::pg_catalog.oid, ",
-				enum_oid, tyinfo->dobj.catId.oid);
+			 "SELECT binary_upgrade.add_pg_enum_label('%u'::pg_catalog.oid, "
+							  "'%u'::pg_catalog.oid, ",
+							  enum_oid, tyinfo->dobj.catId.oid);
 			appendStringLiteralAH(q, label, fout);
 			appendPQExpBuffer(q, ");\n");
 		}
@@ -7208,8 +7207,8 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
 	/* We assume here that remoteVersion must be at least 70300 */
 
 	appendPQExpBuffer(query, "SELECT a.attname, "
-			 "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
-			 		  "typrelid "
+			"pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
+					  "typrelid "
 					  "FROM pg_catalog.pg_type t, pg_catalog.pg_attribute a "
 					  "WHERE t.oid = '%u'::pg_catalog.oid "
 					  "AND a.attrelid = t.typrelid "
@@ -7234,8 +7233,8 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
 
 	if (binary_upgrade)
 	{
-		Oid typrelid = atooid(PQgetvalue(res, 0, i_typrelid));
-		
+		Oid			typrelid = atooid(PQgetvalue(res, 0, i_typrelid));
+
 		binary_upgrade_set_type_oids_by_type_oid(q, tyinfo->dobj.catId.oid);
 		binary_upgrade_set_relfilenodes(q, typrelid, false);
 	}
@@ -7302,15 +7301,15 @@ static void
 dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo)
 {
 	CommentItem *comments;
-	int ncomments;
-	PGresult *res;
+	int			ncomments;
+	PGresult   *res;
 	PQExpBuffer query;
 	PQExpBuffer target;
-	Oid pgClassOid;
-	int i;
-	int ntups;
-	int i_attname;
-	int i_attnum;
+	Oid			pgClassOid;
+	int			i;
+	int			ntups;
+	int			i_attname;
+	int			i_attnum;
 
 	query = createPQExpBuffer();
 
@@ -7431,7 +7430,7 @@ dumpShellType(Archive *fout, ShellTypeInfo *stinfo)
 
 	if (binary_upgrade)
 		binary_upgrade_set_type_oids_by_type_oid(q,
-								stinfo->baseType->dobj.catId.oid);
+										   stinfo->baseType->dobj.catId.oid);
 
 	appendPQExpBuffer(q, "CREATE TYPE %s;\n",
 					  fmtId(stinfo->dobj.name));
@@ -7561,7 +7560,7 @@ dumpProcLang(Archive *fout, ProcLangInfo *plang)
 			/* Cope with possibility that inline is in different schema */
 			if (inlineInfo->dobj.namespace != funcInfo->dobj.namespace)
 				appendPQExpBuffer(defqry, "%s.",
-							fmtId(inlineInfo->dobj.namespace->dobj.name));
+							   fmtId(inlineInfo->dobj.namespace->dobj.name));
 			appendPQExpBuffer(defqry, "%s",
 							  fmtId(inlineInfo->dobj.name));
 		}
@@ -7579,10 +7578,10 @@ dumpProcLang(Archive *fout, ProcLangInfo *plang)
 	else
 	{
 		/*
-		 * If not dumping parameters, then use CREATE OR REPLACE so that
-		 * the command will not fail if the language is preinstalled in the
-		 * target database.  We restrict the use of REPLACE to this case so
-		 * as to eliminate the risk of replacing a language with incompatible
+		 * If not dumping parameters, then use CREATE OR REPLACE so that the
+		 * command will not fail if the language is preinstalled in the target
+		 * database.  We restrict the use of REPLACE to this case so as to
+		 * eliminate the risk of replacing a language with incompatible
 		 * parameter settings: this command will only succeed at all if there
 		 * is a pg_pltemplate entry, and if there is one, the existing entry
 		 * must match it too.
@@ -10333,7 +10332,7 @@ dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo)
 
 	ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
 				 tag->data,
-				 daclinfo->dobj.namespace ? daclinfo->dobj.namespace->dobj.name : NULL,
+	   daclinfo->dobj.namespace ? daclinfo->dobj.namespace->dobj.name : NULL,
 				 NULL,
 				 daclinfo->defaclrole,
 				 false, "DEFAULT ACL", SECTION_NONE,
@@ -10489,13 +10488,13 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 	int			j,
 				k;
 	bool		toast_set = false;
-	
+
 	/* Make sure we are in proper schema */
 	selectSourceSchema(tbinfo->dobj.namespace->dobj.name);
 
 	if (binary_upgrade)
 		toast_set = binary_upgrade_set_type_oids_by_rel_oid(q,
-												tbinfo->dobj.catId.oid);
+													 tbinfo->dobj.catId.oid);
 
 	/* Is it a table or a view? */
 	if (tbinfo->relkind == RELKIND_VIEW)
@@ -10597,15 +10596,16 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 				 * binary-upgrade case, where we're not doing normal
 				 * inheritance) or if it's to be printed separately.
 				 */
-				bool has_default = (tbinfo->attrdefs[j] != NULL
-									&& (!tbinfo->inhAttrDef[j] || binary_upgrade)
-									&& !tbinfo->attrdefs[j]->separate);
+				bool		has_default = (tbinfo->attrdefs[j] != NULL
+								&& (!tbinfo->inhAttrDef[j] || binary_upgrade)
+										   && !tbinfo->attrdefs[j]->separate);
+
 				/*
-				 * Not Null constraint --- suppress if inherited, except
-				 * in binary-upgrade case.
+				 * Not Null constraint --- suppress if inherited, except in
+				 * binary-upgrade case.
 				 */
-				bool has_notnull =  (tbinfo->notnull[j]
-									 &&	(!tbinfo->inhNotNull[j] || binary_upgrade));
+				bool		has_notnull = (tbinfo->notnull[j]
+							  && (!tbinfo->inhNotNull[j] || binary_upgrade));
 
 				if (tbinfo->reloftype && !has_default && !has_notnull)
 					continue;
@@ -10734,15 +10734,15 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 		appendPQExpBuffer(q, ";\n");
 
 		/*
-		 * To create binary-compatible heap files, we have to ensure the
-		 * same physical column order, including dropped columns, as in the
-		 * original.  Therefore, we create dropped columns above and drop
-		 * them here, also updating their attlen/attalign values so that
-		 * the dropped column can be skipped properly.  (We do not bother
-		 * with restoring the original attbyval setting.)  Also, inheritance
+		 * To create binary-compatible heap files, we have to ensure the same
+		 * physical column order, including dropped columns, as in the
+		 * original.  Therefore, we create dropped columns above and drop them
+		 * here, also updating their attlen/attalign values so that the
+		 * dropped column can be skipped properly.	(We do not bother with
+		 * restoring the original attbyval setting.)  Also, inheritance
 		 * relationships are set up by doing ALTER INHERIT rather than using
-		 * an INHERITS clause --- the latter would possibly mess up the
-		 * column order.  That also means we have to take care about setting
+		 * an INHERITS clause --- the latter would possibly mess up the column
+		 * order.  That also means we have to take care about setting
 		 * attislocal correctly, plus fix up any inherited CHECK constraints.
 		 */
 		if (binary_upgrade)
@@ -10814,7 +10814,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 									  fmtId(tbinfo->dobj.name));
 					if (parentRel->dobj.namespace != tbinfo->dobj.namespace)
 						appendPQExpBuffer(q, "%s.",
-										  fmtId(parentRel->dobj.namespace->dobj.name));
+								fmtId(parentRel->dobj.namespace->dobj.name));
 					appendPQExpBuffer(q, "%s;\n",
 									  fmtId(parentRel->dobj.name));
 				}
@@ -11142,7 +11142,7 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo)
 		else
 		{
 			appendPQExpBuffer(q, "%s (",
-							  coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
+						 coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
 			for (k = 0; k < indxinfo->indnkeys; k++)
 			{
 				int			indkey = (int) indxinfo->indkeys[k];
@@ -11579,8 +11579,8 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
 
 		appendPQExpBuffer(query, ";\n");
 
-		/* binary_upgrade:  no need to clear TOAST table oid */
-		
+		/* binary_upgrade:	no need to clear TOAST table oid */
+
 		ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
 					 tbinfo->dobj.name,
 					 tbinfo->dobj.namespace->dobj.name,
@@ -11785,7 +11785,7 @@ dumpTrigger(Archive *fout, TriggerInfo *tginfo)
 		for (findx = 0; findx < tginfo->tgnargs; findx++)
 		{
 			/* find the embedded null that terminates this trigger argument */
-			size_t	tlen = strlen(p);
+			size_t		tlen = strlen(p);
 
 			if (p + tlen >= tgargs + lentgargs)
 			{
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index e71d03604b7f69626377a22820eb6c0a84e50fee..c309f69f7262006477556dac89c9c48f231dc09e 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.h,v 1.163 2010/02/18 01:29:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.h,v 1.164 2010/02/26 02:01:17 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -338,7 +338,7 @@ typedef struct _triggerInfo
  * to sort them the way we want.
  *
  * Note: condeferrable and condeferred are currently only valid for
- * unique/primary-key constraints.  Otherwise that info is in condef.
+ * unique/primary-key constraints.	Otherwise that info is in condef.
  */
 typedef struct _constraintInfo
 {
@@ -439,13 +439,13 @@ typedef struct _defaultACLInfo
 {
 	DumpableObject dobj;
 	char	   *defaclrole;
-	char	    defaclobjtype;
+	char		defaclobjtype;
 	char	   *defaclacl;
 } DefaultACLInfo;
 
 typedef struct _blobInfo
 {
-	DumpableObject	dobj;
+	DumpableObject dobj;
 	char	   *rolname;
 	char	   *blobacl;
 } BlobInfo;
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index f3761217d1c9ff92ceb9a84dc9dd4889b059badc..0c1efcdeb3eb49487e525570becaea31861efb66 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump_sort.c,v 1.29 2010/02/18 01:29:10 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump_sort.c,v 1.30 2010/02/26 02:01:17 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -164,10 +164,10 @@ DOTypeNameCompare(const void *p1, const void *p2)
 		return cmpval;
 
 	/* To have a stable sort order, break ties for some object types */
-    if (obj1->objType == DO_FUNC || obj1->objType == DO_AGG)
+	if (obj1->objType == DO_FUNC || obj1->objType == DO_AGG)
 	{
-		FuncInfo *fobj1 = *(FuncInfo **) p1;
-		FuncInfo *fobj2 = *(FuncInfo **) p2;
+		FuncInfo   *fobj1 = *(FuncInfo **) p1;
+		FuncInfo   *fobj2 = *(FuncInfo **) p2;
 
 		cmpval = fobj1->nargs - fobj2->nargs;
 		if (cmpval != 0)
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 53a1e25d7adf0b270e3bd1807045b70bf94e1e58..275a22ab88d2a19726f81caa5930eea9a65d8b66 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dumpall.c,v 1.133 2010/02/17 04:19:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dumpall.c,v 1.134 2010/02/26 02:01:17 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -1397,7 +1397,7 @@ dumpUserConfig(PGconn *conn, const char *username)
 		if (server_version >= 90000)
 			printfPQExpBuffer(buf, "SELECT setconfig[%d] FROM pg_db_role_setting WHERE "
 							  "setdatabase = 0 AND setrole = "
-							  "(SELECT oid FROM pg_authid WHERE rolname = ", count);
+					   "(SELECT oid FROM pg_authid WHERE rolname = ", count);
 		else if (server_version >= 80100)
 			printfPQExpBuffer(buf, "SELECT rolconfig[%d] FROM pg_authid WHERE rolname = ", count);
 		else
@@ -1432,13 +1432,13 @@ dumpUserConfig(PGconn *conn, const char *username)
 static void
 dumpDbRoleConfig(PGconn *conn)
 {
-	PQExpBuffer	buf = createPQExpBuffer();
+	PQExpBuffer buf = createPQExpBuffer();
 	PGresult   *res;
 	int			i;
 
 	printfPQExpBuffer(buf, "SELECT rolname, datname, unnest(setconfig) "
 					  "FROM pg_db_role_setting, pg_authid, pg_database "
-					  "WHERE setrole = pg_authid.oid AND setdatabase = pg_database.oid");
+		  "WHERE setrole = pg_authid.oid AND setdatabase = pg_database.oid");
 	res = executeQuery(conn, buf->data);
 
 	if (PQntuples(res) > 0)
@@ -1628,20 +1628,20 @@ connectDatabase(const char *dbname, const char *pghost, const char *pgport,
 			exit(1);
 		}
 
-		keywords[0]	= "host";
-		values[0]	= pghost;
-		keywords[1]	= "port";
-		values[1]	= pgport;
-		keywords[2]	= "user";
-		values[2]	= pguser;
-		keywords[3]	= "password";
-		values[3]	= password;
-		keywords[4]	= "dbname";
-		values[4]	= dbname;
-		keywords[5]	= "fallback_application_name";
-		values[5]	= progname;
-		keywords[6]	= NULL;
-		values[6]	= NULL;
+		keywords[0] = "host";
+		values[0] = pghost;
+		keywords[1] = "port";
+		values[1] = pgport;
+		keywords[2] = "user";
+		values[2] = pguser;
+		keywords[3] = "password";
+		values[3] = password;
+		keywords[4] = "dbname";
+		values[4] = dbname;
+		keywords[5] = "fallback_application_name";
+		values[5] = progname;
+		keywords[6] = NULL;
+		values[6] = NULL;
 
 		new_pass = false;
 		conn = PQconnectdbParams(keywords, values, true);
diff --git a/src/bin/pg_resetxlog/pg_resetxlog.c b/src/bin/pg_resetxlog/pg_resetxlog.c
index 07ea5fd13bcc0254a6fbef2bf9f4db085ab72dbb..d14df9cec8b328c2c1133252ae46285fc446ad06 100644
--- a/src/bin/pg_resetxlog/pg_resetxlog.c
+++ b/src/bin/pg_resetxlog/pg_resetxlog.c
@@ -23,7 +23,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/bin/pg_resetxlog/pg_resetxlog.c,v 1.77 2010/01/04 12:50:49 heikki Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_resetxlog/pg_resetxlog.c,v 1.78 2010/02/26 02:01:17 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -312,10 +312,10 @@ main(int argc, char *argv[])
 
 		/*
 		 * For the moment, just set oldestXid to a value that will force
-		 * immediate autovacuum-for-wraparound.  It's not clear whether
-		 * adding user control of this is useful, so let's just do something
-		 * that's reasonably safe.  The magic constant here corresponds to
-		 * the maximum allowed value of autovacuum_freeze_max_age.
+		 * immediate autovacuum-for-wraparound.  It's not clear whether adding
+		 * user control of this is useful, so let's just do something that's
+		 * reasonably safe.  The magic constant here corresponds to the
+		 * maximum allowed value of autovacuum_freeze_max_age.
 		 */
 		ControlFile.checkPointCopy.oldestXid = set_xid - 2000000000;
 		if (ControlFile.checkPointCopy.oldestXid < FirstNormalTransactionId)
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 54566c818f25fccad9aa5430ac76922b29a83c5c..e9476c4f3fc1ecda3149cc3e705aa70145224b8a 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.215 2010/02/16 21:07:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.216 2010/02/26 02:01:17 momjian Exp $
  */
 #include "postgres_fe.h"
 #include "command.h"
@@ -418,7 +418,7 @@ exec_command(const char *cmd,
 
 					if (pattern)
 						pattern2 = psql_scan_slash_option(scan_state,
-														  OT_NORMAL, NULL, true);
+													  OT_NORMAL, NULL, true);
 					success = listDbRoleSettings(pattern, pattern2);
 				}
 				else
@@ -1259,20 +1259,20 @@ do_connect(char *dbname, char *user, char *host, char *port)
 		const char **keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords));
 		const char **values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values));
 
-		keywords[0]	= "host";
-		values[0]	= host;
-		keywords[1]	= "port";
-		values[1]	= port;
-		keywords[2]	= "user";
-		values[2]	= user;
-		keywords[3]	= "password";
-		values[3]	= password;
-		keywords[4]	= "dbname";
-		values[4]	= dbname;
-		keywords[5]	= "fallback_application_name";
-		values[5]	= pset.progname;
-		keywords[6]	= NULL;
-		values[6]	= NULL;
+		keywords[0] = "host";
+		values[0] = host;
+		keywords[1] = "port";
+		values[1] = port;
+		keywords[2] = "user";
+		values[2] = user;
+		keywords[3] = "password";
+		values[3] = password;
+		keywords[4] = "dbname";
+		values[4] = dbname;
+		keywords[5] = "fallback_application_name";
+		values[5] = pset.progname;
+		keywords[6] = NULL;
+		values[6] = NULL;
 
 		n_conn = PQconnectdbParams(keywords, values, true);
 
@@ -1331,7 +1331,7 @@ do_connect(char *dbname, char *user, char *host, char *port)
 	PQsetNoticeProcessor(n_conn, NoticeProcessor, NULL);
 	pset.db = n_conn;
 	SyncVariables();
-	connection_warnings(false);		/* Must be after SyncVariables */
+	connection_warnings(false); /* Must be after SyncVariables */
 
 	/* Tell the user about the new connection */
 	if (!pset.quiet)
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 6a6517d0032ffe3d9369445525c6bff0f5c9e4ac..e9e7c37c65ed4f1d8f4247a6f35847bcdc7e63be 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -8,7 +8,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.237 2010/02/17 04:19:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.238 2010/02/26 02:01:18 momjian Exp $
  */
 #include "postgres_fe.h"
 
@@ -755,7 +755,7 @@ listDefaultACLs(const char *pattern)
 	initPQExpBuffer(&buf);
 
 	printfPQExpBuffer(&buf,
-					  "SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS \"%s\",\n"
+			   "SELECT pg_catalog.pg_get_userbyid(d.defaclrole) AS \"%s\",\n"
 					  "  n.nspname AS \"%s\",\n"
 					  "  CASE d.defaclobjtype WHEN 'r' THEN '%s' WHEN 'S' THEN '%s' WHEN 'f' THEN '%s' END AS \"%s\",\n"
 					  "  ",
@@ -769,7 +769,7 @@ listDefaultACLs(const char *pattern)
 	printACLColumn(&buf, "d.defaclacl");
 
 	appendPQExpBuffer(&buf, "\nFROM pg_catalog.pg_default_acl d\n"
-	   "     LEFT JOIN pg_catalog.pg_namespace n ON n.oid = d.defaclnamespace\n");
+					  "     LEFT JOIN pg_catalog.pg_namespace n ON n.oid = d.defaclnamespace\n");
 
 	processSQLNamePattern(pset.db, &buf, pattern, false, false,
 						  NULL,
@@ -1388,7 +1388,7 @@ describeOneTableDetails(const char *schemaname,
 		if (verbose)
 		{
 			int			firstvcol = (tableinfo.relkind == 'i' ? 6 : 5);
-			char	   *storage  = PQgetvalue(res, i, firstvcol);
+			char	   *storage = PQgetvalue(res, i, firstvcol);
 
 			/* these strings are literal in our syntax, so not translated. */
 			printTableAddCell(&cont, (storage[0] == 'p' ? "plain" :
@@ -1418,7 +1418,7 @@ describeOneTableDetails(const char *schemaname,
 							  "  (NOT i.indimmediate) AND "
 							  "EXISTS (SELECT 1 FROM pg_catalog.pg_depend d, "
 							  "pg_catalog.pg_constraint con WHERE "
-							  "d.classid = 'pg_catalog.pg_class'::pg_catalog.regclass AND "
+				"d.classid = 'pg_catalog.pg_class'::pg_catalog.regclass AND "
 							  "d.objid = i.indexrelid AND "
 							  "d.refclassid = 'pg_catalog.pg_constraint'::pg_catalog.regclass AND "
 							  "d.refobjid = con.oid AND d.deptype = 'i' AND "
@@ -1426,7 +1426,7 @@ describeOneTableDetails(const char *schemaname,
 							  "  (NOT i.indimmediate) AND "
 							  "EXISTS (SELECT 1 FROM pg_catalog.pg_depend d, "
 							  "pg_catalog.pg_constraint con WHERE "
-							  "d.classid = 'pg_catalog.pg_class'::pg_catalog.regclass AND "
+				"d.classid = 'pg_catalog.pg_class'::pg_catalog.regclass AND "
 							  "d.objid = i.indexrelid AND "
 							  "d.refclassid = 'pg_catalog.pg_constraint'::pg_catalog.regclass AND "
 							  "d.refobjid = con.oid AND d.deptype = 'i' AND "
@@ -1435,7 +1435,7 @@ describeOneTableDetails(const char *schemaname,
 			appendPQExpBuffer(&buf,
 						"  false AS condeferrable, false AS condeferred,\n");
 		appendPQExpBuffer(&buf, "  a.amname, c2.relname, "
-						  "pg_catalog.pg_get_expr(i.indpred, i.indrelid, true)\n"
+					  "pg_catalog.pg_get_expr(i.indpred, i.indrelid, true)\n"
 						  "FROM pg_catalog.pg_index i, pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_am a\n"
 		  "WHERE i.indexrelid = c.oid AND c.oid = '%s' AND c.relam = a.oid\n"
 						  "AND i.indrelid = c2.oid",
@@ -1551,22 +1551,22 @@ describeOneTableDetails(const char *schemaname,
 			appendPQExpBuffer(&buf, "pg_catalog.pg_get_indexdef(i.indexrelid, 0, true)");
 			if (pset.sversion >= 90000)
 				appendPQExpBuffer(&buf,
-							  ",\n  (NOT i.indimmediate) AND "
-							  "EXISTS (SELECT 1 FROM pg_catalog.pg_depend d, "
-							  "pg_catalog.pg_constraint con WHERE "
-							  "d.classid = 'pg_catalog.pg_class'::pg_catalog.regclass AND "
-							  "d.objid = i.indexrelid AND "
-							  "d.refclassid = 'pg_catalog.pg_constraint'::pg_catalog.regclass AND "
+								  ",\n  (NOT i.indimmediate) AND "
+							 "EXISTS (SELECT 1 FROM pg_catalog.pg_depend d, "
+								  "pg_catalog.pg_constraint con WHERE "
+				"d.classid = 'pg_catalog.pg_class'::pg_catalog.regclass AND "
+								  "d.objid = i.indexrelid AND "
+								  "d.refclassid = 'pg_catalog.pg_constraint'::pg_catalog.regclass AND "
 							  "d.refobjid = con.oid AND d.deptype = 'i' AND "
-							  "con.condeferrable) AS condeferrable"
-							  ",\n  (NOT i.indimmediate) AND "
-							  "EXISTS (SELECT 1 FROM pg_catalog.pg_depend d, "
-							  "pg_catalog.pg_constraint con WHERE "
-							  "d.classid = 'pg_catalog.pg_class'::pg_catalog.regclass AND "
-							  "d.objid = i.indexrelid AND "
-							  "d.refclassid = 'pg_catalog.pg_constraint'::pg_catalog.regclass AND "
+								  "con.condeferrable) AS condeferrable"
+								  ",\n  (NOT i.indimmediate) AND "
+							 "EXISTS (SELECT 1 FROM pg_catalog.pg_depend d, "
+								  "pg_catalog.pg_constraint con WHERE "
+				"d.classid = 'pg_catalog.pg_class'::pg_catalog.regclass AND "
+								  "d.objid = i.indexrelid AND "
+								  "d.refclassid = 'pg_catalog.pg_constraint'::pg_catalog.regclass AND "
 							  "d.refobjid = con.oid AND d.deptype = 'i' AND "
-							  "con.condeferred) AS condeferred");
+								  "con.condeferred) AS condeferred");
 			else
 				appendPQExpBuffer(&buf, ", false AS condeferrable, false AS condeferred");
 			if (pset.sversion >= 80000)
@@ -2313,23 +2313,23 @@ add_role_attribute(PQExpBuffer buf, const char *const str)
 bool
 listDbRoleSettings(const char *pattern, const char *pattern2)
 {
-	PQExpBufferData	buf;
-	PGresult	   *res;
+	PQExpBufferData buf;
+	PGresult   *res;
 	printQueryOpt myopt = pset.popt;
 
 	initPQExpBuffer(&buf);
 
 	if (pset.sversion >= 90000)
 	{
-		bool	havewhere;
+		bool		havewhere;
 
 		printfPQExpBuffer(&buf, "SELECT rolname AS role, datname AS database,\n"
-						  "pg_catalog.array_to_string(setconfig, E'\\n') AS settings\n"
+				"pg_catalog.array_to_string(setconfig, E'\\n') AS settings\n"
 						  "FROM pg_db_role_setting AS s\n"
-						  "LEFT JOIN pg_database ON pg_database.oid = setdatabase\n"
+				   "LEFT JOIN pg_database ON pg_database.oid = setdatabase\n"
 						  "LEFT JOIN pg_roles ON pg_roles.oid = setrole\n");
 		havewhere = processSQLNamePattern(pset.db, &buf, pattern, false, false,
-										  NULL, "pg_roles.rolname", NULL, NULL);
+									   NULL, "pg_roles.rolname", NULL, NULL);
 		processSQLNamePattern(pset.db, &buf, pattern2, havewhere, false,
 							  NULL, "pg_database.datname", NULL, NULL);
 		appendPQExpBufferStr(&buf, "ORDER BY role, database");
@@ -2337,7 +2337,7 @@ listDbRoleSettings(const char *pattern, const char *pattern2)
 	else
 	{
 		fprintf(pset.queryFout,
-				_("No per-database role settings support in this server version.\n"));
+		_("No per-database role settings support in this server version.\n"));
 		return false;
 	}
 
diff --git a/src/bin/psql/input.c b/src/bin/psql/input.c
index 70abc29d803243df7f878cb228a06a6b85f15917..d28fe9c0ba4ef8f0047871477b01e8d123ae6922 100644
--- a/src/bin/psql/input.c
+++ b/src/bin/psql/input.c
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/psql/input.c,v 1.68 2010/01/02 16:57:59 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/input.c,v 1.69 2010/02/26 02:01:18 momjian Exp $
  */
 #include "postgres_fe.h"
 
@@ -357,8 +357,8 @@ saveHistory(char *fname, int max_lines, bool appendFlag, bool encodeFlag)
 		 * On newer versions of libreadline, truncate the history file as
 		 * needed and then append what we've added.  This avoids overwriting
 		 * history from other concurrent sessions (although there are still
-		 * race conditions when two sessions exit at about the same time).
-		 * If we don't have those functions, fall back to write_history().
+		 * race conditions when two sessions exit at about the same time). If
+		 * we don't have those functions, fall back to write_history().
 		 *
 		 * Note: return value of write_history is not standardized across GNU
 		 * readline and libedit.  Therefore, check for errno becoming set to
@@ -367,8 +367,8 @@ saveHistory(char *fname, int max_lines, bool appendFlag, bool encodeFlag)
 #if defined(HAVE_HISTORY_TRUNCATE_FILE) && defined(HAVE_APPEND_HISTORY)
 		if (appendFlag)
 		{
-			int		nlines;
-			int		fd;
+			int			nlines;
+			int			fd;
 
 			/* truncate previous entries if needed */
 			if (max_lines >= 0)
@@ -396,7 +396,7 @@ saveHistory(char *fname, int max_lines, bool appendFlag, bool encodeFlag)
 			/* truncate what we have ... */
 			if (max_lines >= 0)
 				stifle_history(max_lines);
-			/* ... and overwrite file.  Tough luck for concurrent sessions. */
+			/* ... and overwrite file.	Tough luck for concurrent sessions. */
 			errno = 0;
 			(void) write_history(fname);
 			if (errno == 0)
diff --git a/src/bin/psql/input.h b/src/bin/psql/input.h
index 8577563d9e7e5d49816ff6132029e0e474dfc4e6..170590645b05425e36e059f734d6a351d5cce511 100644
--- a/src/bin/psql/input.h
+++ b/src/bin/psql/input.h
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/psql/input.h,v 1.34 2010/01/02 16:57:59 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/input.h,v 1.35 2010/02/26 02:01:19 momjian Exp $
  */
 #ifndef INPUT_H
 #define INPUT_H
@@ -22,21 +22,18 @@
 #if defined(HAVE_READLINE_HISTORY_H)
 #include <readline/history.h>
 #endif
-
 #elif defined(HAVE_EDITLINE_READLINE_H)
 #include <editline/readline.h>
 #if defined(HAVE_EDITLINE_HISTORY_H)
 #include <editline/history.h>
 #endif
-
 #elif defined(HAVE_READLINE_H)
 #include <readline.h>
 #if defined(HAVE_HISTORY_H)
 #include <history.h>
 #endif
-
-#endif /* HAVE_READLINE_READLINE_H, etc */
-#endif /* HAVE_LIBREADLINE */
+#endif   /* HAVE_READLINE_READLINE_H, etc */
+#endif   /* HAVE_LIBREADLINE */
 
 #include "pqexpbuffer.h"
 
diff --git a/src/bin/psql/large_obj.c b/src/bin/psql/large_obj.c
index a83039d353c16f5f678bb6ea695d75bfd3228b1f..b915c9f9e5d4889340cb49b19a97dee88ad38423 100644
--- a/src/bin/psql/large_obj.c
+++ b/src/bin/psql/large_obj.c
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/psql/large_obj.c,v 1.55 2010/02/17 04:19:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/large_obj.c,v 1.56 2010/02/26 02:01:19 momjian Exp $
  */
 #include "postgres_fe.h"
 #include "large_obj.h"
@@ -283,7 +283,7 @@ do_lo_list(void)
 		snprintf(buf, sizeof(buf),
 				 "SELECT oid as \"%s\",\n"
 				 "  pg_catalog.pg_get_userbyid(lomowner) as \"%s\",\n"
-				 "  pg_catalog.obj_description(oid, 'pg_largeobject') as \"%s\"\n"
+			"  pg_catalog.obj_description(oid, 'pg_largeobject') as \"%s\"\n"
 				 "  FROM pg_catalog.pg_largeobject_metadata "
 				 "  ORDER BY oid",
 				 gettext_noop("ID"),
@@ -294,8 +294,8 @@ do_lo_list(void)
 	{
 		snprintf(buf, sizeof(buf),
 				 "SELECT loid as \"%s\",\n"
-				 "  pg_catalog.obj_description(loid, 'pg_largeobject') as \"%s\"\n"
-				 "FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) x\n"
+		   "  pg_catalog.obj_description(loid, 'pg_largeobject') as \"%s\"\n"
+			 "FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) x\n"
 				 "ORDER BY 1",
 				 gettext_noop("ID"),
 				 gettext_noop("Description"));
diff --git a/src/bin/psql/mainloop.c b/src/bin/psql/mainloop.c
index 4088c1819782f50b5faf519b2e25825ab2861f66..23904f3f4543b77f2835303594d64336f7412bc6 100644
--- a/src/bin/psql/mainloop.c
+++ b/src/bin/psql/mainloop.c
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/psql/mainloop.c,v 1.98 2010/01/02 16:57:59 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/mainloop.c,v 1.99 2010/02/26 02:01:19 momjian Exp $
  */
 #include "postgres_fe.h"
 #include "mainloop.h"
@@ -419,7 +419,7 @@ MainLoop(FILE *source)
  * psqlscan.c is #include'd here instead of being compiled on its own.
  * This is because we need postgres_fe.h to be read before any system
  * include files, else things tend to break on platforms that have
- * multiple infrastructures for stdio.h and so on.  flex is absolutely
+ * multiple infrastructures for stdio.h and so on.	flex is absolutely
  * uncooperative about that, so we can't compile psqlscan.c on its own.
  */
 #include "psqlscan.c"
diff --git a/src/bin/psql/print.c b/src/bin/psql/print.c
index fc29cfd90d7d6223f6cbd9fbdd82a930050c4d19..f6acc466dc194b125aff6432db2b729017dfafb7 100644
--- a/src/bin/psql/print.c
+++ b/src/bin/psql/print.c
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.121 2010/01/30 18:59:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.122 2010/02/26 02:01:19 momjian Exp $
  */
 #include "postgres_fe.h"
 
@@ -49,10 +49,10 @@ const printTextFormat pg_asciiformat =
 {
 	"ascii",
 	{
-		{ "-", "+", "+", "+" },
-		{ "-", "+", "+", "+" },
-		{ "-", "+", "+", "+" },
-		{ "",  "|", "|", "|" }
+		{"-", "+", "+", "+"},
+		{"-", "+", "+", "+"},
+		{"-", "+", "+", "+"},
+		{"", "|", "|", "|"}
 	},
 	"|",
 	"|",
@@ -70,10 +70,10 @@ const printTextFormat pg_asciiformat_old =
 {
 	"old-ascii",
 	{
-		{ "-", "+", "+", "+" },
-		{ "-", "+", "+", "+" },
-		{ "-", "+", "+", "+" },
-		{ "",  "|", "|", "|" }
+		{"-", "+", "+", "+"},
+		{"-", "+", "+", "+"},
+		{"-", "+", "+", "+"},
+		{"", "|", "|", "|"}
 	},
 	":",
 	";",
@@ -92,13 +92,13 @@ const printTextFormat pg_utf8format =
 	"unicode",
 	{
 		/* ─, ┌, ┬, ┐ */
-		{ "\342\224\200", "\342\224\214", "\342\224\254", "\342\224\220" },
+		{"\342\224\200", "\342\224\214", "\342\224\254", "\342\224\220"},
 		/* ─, ├, ┼, ┤ */
-		{ "\342\224\200", "\342\224\234", "\342\224\274", "\342\224\244" },
+		{"\342\224\200", "\342\224\234", "\342\224\274", "\342\224\244"},
 		/* ─, └, ┴, ┘ */
-		{ "\342\224\200", "\342\224\224", "\342\224\264", "\342\224\230" },
+		{"\342\224\200", "\342\224\224", "\342\224\264", "\342\224\230"},
 		/* N/A, │, │, │ */
-		{ "", "\342\224\202", "\342\224\202", "\342\224\202" }
+		{"", "\342\224\202", "\342\224\202", "\342\224\202"}
 	},
 	/* │ */
 	"\342\224\202",
@@ -989,11 +989,11 @@ print_aligned_text(const printTableContent *cont, FILE *fout)
 				 * If left-aligned, pad out remaining space if needed (not
 				 * last column, and/or wrap marks required).
 				 */
-				if (cont->aligns[j] != 'r') /* Left aligned cell */
+				if (cont->aligns[j] != 'r')		/* Left aligned cell */
 				{
 					if (finalspaces ||
 						wrap[j] == PRINT_LINE_WRAP_WRAP ||
-					    wrap[j] == PRINT_LINE_WRAP_NEWLINE)
+						wrap[j] == PRINT_LINE_WRAP_NEWLINE)
 						fprintf(fout, "%*s",
 								width_wrap[j] - chars_to_output, "");
 				}
@@ -1009,9 +1009,9 @@ print_aligned_text(const printTableContent *cont, FILE *fout)
 				/* Print column divider, if not the last column */
 				if (opt_border != 0 && j < col_count - 1)
 				{
-					if (wrap[j+1] == PRINT_LINE_WRAP_WRAP)
+					if (wrap[j + 1] == PRINT_LINE_WRAP_WRAP)
 						fputs(format->midvrule_wrap, fout);
-					else if (wrap[j+1] == PRINT_LINE_WRAP_NEWLINE)
+					else if (wrap[j + 1] == PRINT_LINE_WRAP_NEWLINE)
 						fputs(format->midvrule_nl, fout);
 					else if (col_lineptrs[j + 1][curr_nl_line[j + 1]].ptr == NULL)
 						fputs(format->midvrule_blank, fout);
@@ -1080,9 +1080,9 @@ print_aligned_vertical_line(const printTableContent *cont,
 {
 	const printTextFormat *format = get_line_style(cont->opt);
 	const printTextLineFormat *lformat = &format->lrule[pos];
-	unsigned short	opt_border = cont->opt->border;
-	unsigned int	i;
-	int		reclen = 0;
+	unsigned short opt_border = cont->opt->border;
+	unsigned int i;
+	int			reclen = 0;
 
 	if (opt_border == 2)
 		fprintf(fout, "%s%s", lformat->leftvrule, lformat->hrule);
@@ -1231,8 +1231,8 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
 			break;
 
 		if (i == 0)
-	  		pos = PRINT_RULE_TOP;
-		else if (!(*(ptr+1)))
+			pos = PRINT_RULE_TOP;
+		else if (!(*(ptr + 1)))
 			pos = PRINT_RULE_BOTTOM;
 		else
 			pos = PRINT_RULE_MIDDLE;
@@ -2555,8 +2555,8 @@ const printTextFormat *
 get_line_style(const printTableOpt *opt)
 {
 	/*
-	 * Note: this function mainly exists to preserve the convention that
-	 * a printTableOpt struct can be initialized to zeroes to get default
+	 * Note: this function mainly exists to preserve the convention that a
+	 * printTableOpt struct can be initialized to zeroes to get default
 	 * behavior.
 	 */
 	if (opt->line_style != NULL)
diff --git a/src/bin/psql/print.h b/src/bin/psql/print.h
index a2b430c57ff1d7c3194519dd4ae58b37a385001d..9df6146d5bab3a6e2f08f498f4165928d3b3b2d1 100644
--- a/src/bin/psql/print.h
+++ b/src/bin/psql/print.h
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/psql/print.h,v 1.43 2010/01/02 16:57:59 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/print.h,v 1.44 2010/02/26 02:01:19 momjian Exp $
  */
 #ifndef PRINT_H
 #define PRINT_H
@@ -52,19 +52,19 @@ typedef enum printTextLineWrap
 typedef struct printTextFormat
 {
 	/* A complete line style */
-	const char *name;				/* for display purposes */
-	printTextLineFormat lrule[4];	/* indexed by enum printTextRule */
+	const char *name;			/* for display purposes */
+	printTextLineFormat lrule[4];		/* indexed by enum printTextRule */
 	const char *midvrule_nl;	/* vertical line for continue after newline */
 	const char *midvrule_wrap;	/* vertical line for wrapped data */
-	const char *midvrule_blank;	/* vertical line for blank data */
-	const char *header_nl_left;	/* left mark after newline */
-	const char *header_nl_right; /* right mark for newline */
+	const char *midvrule_blank; /* vertical line for blank data */
+	const char *header_nl_left; /* left mark after newline */
+	const char *header_nl_right;	/* right mark for newline */
 	const char *nl_left;		/* left mark after newline */
 	const char *nl_right;		/* right mark for newline */
 	const char *wrap_left;		/* left mark after wrapped data */
 	const char *wrap_right;		/* right mark for wrapped data */
-	bool		wrap_right_border;	/* use right-hand border for wrap marks
-									 * when border=0? */
+	bool		wrap_right_border;		/* use right-hand border for wrap
+										 * marks when border=0? */
 } printTextFormat;
 
 typedef struct printTableOpt
diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c
index 3f49702394576e3c11752f2cbd27ccf47cbfa64f..c34de5cd8cbe0d73e90d041fe1567699af9fa980 100644
--- a/src/bin/psql/startup.c
+++ b/src/bin/psql/startup.c
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/psql/startup.c,v 1.161 2010/02/16 21:07:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/startup.c,v 1.162 2010/02/26 02:01:19 momjian Exp $
  */
 #include "postgres_fe.h"
 
@@ -175,22 +175,22 @@ main(int argc, char *argv[])
 		const char **keywords = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords));
 		const char **values = pg_malloc(PARAMS_ARRAY_SIZE * sizeof(*values));
 
-		keywords[0]	= "host";
-		values[0]	= options.host;
-		keywords[1]	= "port";
-		values[1]	= options.port;
-		keywords[2]	= "user";
-		values[2]	= options.username;
-		keywords[3]	= "password";
-		values[3]	= password;
-		keywords[4]	= "dbname";
-		values[4]	= (options.action == ACT_LIST_DB &&
-						options.dbname == NULL) ?
-						"postgres" : options.dbname;
-		keywords[5]	= "fallback_application_name";
-		values[5]	= pset.progname;
-		keywords[6]	= NULL;
-		values[6]	= NULL;
+		keywords[0] = "host";
+		values[0] = options.host;
+		keywords[1] = "port";
+		values[1] = options.port;
+		keywords[2] = "user";
+		values[2] = options.username;
+		keywords[3] = "password";
+		values[3] = password;
+		keywords[4] = "dbname";
+		values[4] = (options.action == ACT_LIST_DB &&
+					 options.dbname == NULL) ?
+			"postgres" : options.dbname;
+		keywords[5] = "fallback_application_name";
+		values[5] = pset.progname;
+		keywords[6] = NULL;
+		values[6] = NULL;
 
 		new_pass = false;
 		pset.db = PQconnectdbParams(keywords, values, true);
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index b3dbfb20217c30c293aafb09ed1d803b83e8fdec..38882096a26a106673065bf4674a2eda0802954b 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.195 2010/02/17 04:09:40 itagaki Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.196 2010/02/26 02:01:20 momjian Exp $
  */
 
 /*----------------------------------------------------------------------
@@ -1437,7 +1437,7 @@ psql_completion(char *text, int start, int end)
 			 pg_strcasecmp(prev2_wd, "ON") == 0)
 	{
 		static const char *const list_CREATE_INDEX2[] =
-		{"(", "USING",  NULL};
+		{"(", "USING", NULL};
 
 		COMPLETE_WITH_LIST(list_CREATE_INDEX2);
 	}
@@ -1577,7 +1577,11 @@ psql_completion(char *text, int start, int end)
 
 		COMPLETE_WITH_LIST(list_CREATETRIGGER2);
 	}
-	/* complete CREATE TRIGGER <name> BEFORE,AFTER event ON with a list of tables */
+
+	/*
+	 * complete CREATE TRIGGER <name> BEFORE,AFTER event ON with a list of
+	 * tables
+	 */
 	else if (pg_strcasecmp(prev5_wd, "TRIGGER") == 0 &&
 			 (pg_strcasecmp(prev3_wd, "BEFORE") == 0 ||
 			  pg_strcasecmp(prev3_wd, "AFTER") == 0) &&
@@ -1692,6 +1696,7 @@ psql_completion(char *text, int start, int end)
 	}
 
 /* DO */
+
 	/*
 	 * Complete DO with LANGUAGE.
 	 */
@@ -1966,8 +1971,8 @@ psql_completion(char *text, int start, int end)
 		COMPLETE_WITH_ATTR(prev2_wd, "");
 
 	/*
-	 * Complete INSERT INTO <table> with "(" or "VALUES" or "SELECT" or "TABLE"
-	 * or "DEFAULT VALUES"
+	 * Complete INSERT INTO <table> with "(" or "VALUES" or "SELECT" or
+	 * "TABLE" or "DEFAULT VALUES"
 	 */
 	else if (pg_strcasecmp(prev3_wd, "INSERT") == 0 &&
 			 pg_strcasecmp(prev2_wd, "INTO") == 0)
@@ -2452,8 +2457,8 @@ psql_completion(char *text, int start, int end)
 	{
 		static const char *const my_list[] =
 		{"format", "border", "expanded",
-		 "null", "fieldsep", "tuples_only", "title", "tableattr",
-		 "linestyle", "pager", "recordsep", NULL};
+			"null", "fieldsep", "tuples_only", "title", "tableattr",
+		"linestyle", "pager", "recordsep", NULL};
 
 		COMPLETE_WITH_LIST(my_list);
 	}
@@ -2955,7 +2960,7 @@ previous_word(int point, int skip)
 
 	while (skip-- >= 0)
 	{
-		int		parentheses = 0;
+		int			parentheses = 0;
 
 		/* now find the first non-space which then constitutes the end */
 		for (i = point; i >= 0; i--)
diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c
index 026eb80a025fd941ca3a7a451d8393af5ad7287b..fc5a325dbd51edcce4cff2715f80c9c629fcc00e 100644
--- a/src/bin/scripts/common.c
+++ b/src/bin/scripts/common.c
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/bin/scripts/common.c,v 1.39 2010/02/05 03:09:05 joe Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/common.c,v 1.40 2010/02/26 02:01:20 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -118,20 +118,20 @@ connectDatabase(const char *dbname, const char *pghost, const char *pgport,
 			exit(1);
 		}
 
-		keywords[0]	= "host";
-		values[0]	= pghost;
-		keywords[1]	= "port";
-		values[1]	= pgport;
-		keywords[2]	= "user";
-		values[2]	= pguser;
-		keywords[3]	= "password";
-		values[3]	= password;
-		keywords[4]	= "dbname";
-		values[4]	= dbname;
-		keywords[5]	= "fallback_application_name";
-		values[5]	= progname;
-		keywords[6]	= NULL;
-		values[6]	= NULL;
+		keywords[0] = "host";
+		values[0] = pghost;
+		keywords[1] = "port";
+		values[1] = pgport;
+		keywords[2] = "user";
+		values[2] = pguser;
+		keywords[3] = "password";
+		values[3] = password;
+		keywords[4] = "dbname";
+		values[4] = dbname;
+		keywords[5] = "fallback_application_name";
+		values[5] = progname;
+		keywords[6] = NULL;
+		values[6] = NULL;
 
 		new_pass = false;
 		conn = PQconnectdbParams(keywords, values, true);
diff --git a/src/bin/scripts/droplang.c b/src/bin/scripts/droplang.c
index 5b1fd81c6c26ac6e6cee654cd05587930b665e57..b634506f0fb5a1a2df33a67c444f77447764196e 100644
--- a/src/bin/scripts/droplang.c
+++ b/src/bin/scripts/droplang.c
@@ -5,7 +5,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/bin/scripts/droplang.c,v 1.33 2010/01/02 16:58:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/droplang.c,v 1.34 2010/02/26 02:01:20 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -190,8 +190,8 @@ main(int argc, char *argv[])
 	executeCommand(conn, "SET search_path = pg_catalog;", progname, echo);
 
 	/*
-	 * Make sure the language is installed and find the OIDs of the
-	 * language support functions
+	 * Make sure the language is installed and find the OIDs of the language
+	 * support functions
 	 */
 	printfPQExpBuffer(&sql, "SELECT lanplcallfoid, laninline, lanvalidator "
 					  "FROM pg_language WHERE lanname = '%s' AND lanispl;",
@@ -277,7 +277,7 @@ main(int argc, char *argv[])
 		PQclear(result);
 	}
 	else
-		keepinline = true;	/* don't try to delete it */
+		keepinline = true;		/* don't try to delete it */
 
 	/*
 	 * Find the inline handler name
diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c
index af1dc8c397d638a98b2298fc3cc8175b91d53641..21abfedae71fad741603e16be3dcc90835fcce2d 100644
--- a/src/bin/scripts/vacuumdb.c
+++ b/src/bin/scripts/vacuumdb.c
@@ -5,7 +5,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/bin/scripts/vacuumdb.c,v 1.35 2010/02/17 04:19:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/vacuumdb.c,v 1.36 2010/02/26 02:01:20 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -313,7 +313,7 @@ vacuum_all_databases(bool full, bool verbose, bool and_analyze, bool analyze_onl
 		}
 
 		vacuum_one_database(dbname, full, verbose, and_analyze, analyze_only,
-							freeze, NULL, host, port, username, prompt_password,
+						 freeze, NULL, host, port, username, prompt_password,
 							progname, echo);
 	}
 
diff --git a/src/include/access/genam.h b/src/include/access/genam.h
index f355c23149f0ac29c3364923273b1a96b5e6a81a..c2731ba651f4e5088017fb5ba6de2ef611724cfd 100644
--- a/src/include/access/genam.h
+++ b/src/include/access/genam.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/genam.h,v 1.83 2010/02/08 04:33:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/genam.h,v 1.84 2010/02/26 02:01:20 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -92,7 +92,7 @@ typedef struct SysScanDescData *SysScanDesc;
  * blocking to see if a conflicting transaction commits.
  *
  * For deferrable unique constraints, UNIQUE_CHECK_PARTIAL is specified at
- * insertion time.  The index AM should test if the tuple is unique, but
+ * insertion time.	The index AM should test if the tuple is unique, but
  * should not throw error, block, or prevent the insertion if the tuple
  * appears not to be unique.  We'll recheck later when it is time for the
  * constraint to be enforced.  The AM must return true if the tuple is
@@ -101,7 +101,7 @@ typedef struct SysScanDescData *SysScanDesc;
  *
  * When it is time to recheck the deferred constraint, a pseudo-insertion
  * call is made with UNIQUE_CHECK_EXISTING.  The tuple is already in the
- * index in this case, so it should not be inserted again.  Rather, just
+ * index in this case, so it should not be inserted again.	Rather, just
  * check for conflicting live tuples (possibly blocking).
  */
 typedef enum IndexUniqueCheck
diff --git a/src/include/access/gin.h b/src/include/access/gin.h
index 48965613608b6af67cf1f5ea2d9382eb96a2e714..c935838576d6629c6a6b135aa8dbf243b9a9b650 100644
--- a/src/include/access/gin.h
+++ b/src/include/access/gin.h
@@ -4,7 +4,7 @@
  *
  *	Copyright (c) 2006-2010, PostgreSQL Global Development Group
  *
- *	$PostgreSQL: pgsql/src/include/access/gin.h,v 1.37 2010/02/11 14:29:50 teodor Exp $
+ *	$PostgreSQL: pgsql/src/include/access/gin.h,v 1.38 2010/02/26 02:01:20 momjian Exp $
  *--------------------------------------------------------------------------
  */
 #ifndef GIN_H
@@ -576,8 +576,8 @@ typedef struct
 	GinState   *ginstate;
 	long		allocatedMemory;
 	uint32		length;
-	EntryAccumulator   *entryallocator;
-	ItemPointerData	   *tmpList;
+	EntryAccumulator *entryallocator;
+	ItemPointerData *tmpList;
 	RBTree	   *tree;
 	RBTreeIterator *iterator;
 } BuildAccumulator;
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 64eac4cd8963ca9e7e23dabb1273d72f99d98bd8..1f26b376f5c4d0b4afad0b0a7989746e84974721 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/heapam.h,v 1.147 2010/02/08 04:33:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/heapam.h,v 1.148 2010/02/26 02:01:20 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -127,7 +127,7 @@ extern void heap2_redo(XLogRecPtr lsn, XLogRecord *rptr);
 extern void heap2_desc(StringInfo buf, uint8 xl_info, char *rec);
 
 extern XLogRecPtr log_heap_cleanup_info(RelFileNode rnode,
-				TransactionId latestRemovedXid);
+					  TransactionId latestRemovedXid);
 extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
 			   OffsetNumber *redirected, int nredirected,
 			   OffsetNumber *nowdead, int ndead,
diff --git a/src/include/access/htup.h b/src/include/access/htup.h
index b1202fc4e7119dfb0ac9bb1a89e32bd882bb71cb..3be701bf6f685b60dc7f50ace39791dc39876c77 100644
--- a/src/include/access/htup.h
+++ b/src/include/access/htup.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/htup.h,v 1.112 2010/02/08 14:10:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/htup.h,v 1.113 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -175,12 +175,12 @@ typedef HeapTupleHeaderData *HeapTupleHeader;
 #define HEAP_XMAX_INVALID		0x0800	/* t_xmax invalid/aborted */
 #define HEAP_XMAX_IS_MULTI		0x1000	/* t_xmax is a MultiXactId */
 #define HEAP_UPDATED			0x2000	/* this is UPDATEd version of row */
-#define HEAP_MOVED_OFF			0x4000	/* moved to another place by
-										 * pre-9.0 VACUUM FULL; kept
-										 * for binary upgrade support */
-#define HEAP_MOVED_IN			0x8000	/* moved from another place by
-										 * pre-9.0 VACUUM FULL; kept
-										 * for binary upgrade support */
+#define HEAP_MOVED_OFF			0x4000	/* moved to another place by pre-9.0
+										 * VACUUM FULL; kept for binary
+										 * upgrade support */
+#define HEAP_MOVED_IN			0x8000	/* moved from another place by pre-9.0
+										 * VACUUM FULL; kept for binary
+										 * upgrade support */
 #define HEAP_MOVED (HEAP_MOVED_OFF | HEAP_MOVED_IN)
 
 #define HEAP_XACT_MASK			0xFFE0	/* visibility-related bits */
@@ -642,7 +642,7 @@ typedef struct xl_heap_update
 	xl_heaptid	target;			/* deleted tuple id */
 	ItemPointerData newtid;		/* new inserted tuple id */
 	bool		all_visible_cleared;	/* PD_ALL_VISIBLE was cleared */
-	bool		new_all_visible_cleared;	/* same for the page of newtid */
+	bool		new_all_visible_cleared;		/* same for the page of newtid */
 	/* NEW TUPLE xl_heap_header AND TUPLE DATA FOLLOWS AT END OF STRUCT */
 } xl_heap_update;
 
@@ -663,7 +663,7 @@ typedef struct xl_heap_clean
 {
 	RelFileNode node;
 	BlockNumber block;
-	TransactionId	latestRemovedXid;
+	TransactionId latestRemovedXid;
 	uint16		nredirected;
 	uint16		ndead;
 	/* OFFSET NUMBERS FOLLOW */
@@ -678,8 +678,8 @@ typedef struct xl_heap_clean
  */
 typedef struct xl_heap_cleanup_info
 {
-	RelFileNode 	node;
-	TransactionId	latestRemovedXid;
+	RelFileNode node;
+	TransactionId latestRemovedXid;
 } xl_heap_cleanup_info;
 
 #define SizeOfHeapCleanupInfo (sizeof(xl_heap_cleanup_info))
@@ -728,7 +728,7 @@ typedef struct xl_heap_freeze
 #define SizeOfHeapFreeze (offsetof(xl_heap_freeze, cutoff_xid) + sizeof(TransactionId))
 
 extern void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
-										TransactionId *latestRemovedXid);
+									   TransactionId *latestRemovedXid);
 
 /* HeapTupleHeader functions implemented in utils/time/combocid.c */
 extern CommandId HeapTupleHeaderGetCmin(HeapTupleHeader tup);
diff --git a/src/include/access/itup.h b/src/include/access/itup.h
index a2522337d7c6b08b0ea5696e2833e85d54ca375d..c2d3eac99580321a7edbed2562ce50b46284f1eb 100644
--- a/src/include/access/itup.h
+++ b/src/include/access/itup.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/itup.h,v 1.54 2010/01/10 04:26:36 rhaas Exp $
+ * $PostgreSQL: pgsql/src/include/access/itup.h,v 1.55 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -144,7 +144,7 @@ extern IndexTuple index_form_tuple(TupleDesc tupleDescriptor,
 extern Datum nocache_index_getattr(IndexTuple tup, int attnum,
 					  TupleDesc tupleDesc);
 extern void index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor,
-				 Datum *values, bool *isnull);
+				   Datum *values, bool *isnull);
 extern IndexTuple CopyIndexTuple(IndexTuple source);
 
 #endif   /* ITUP_H */
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index f3898a41408f4c90fb6c5823104f5e2789fb7a59..8b7c33e61f10d1db7be4ee7b2af967082c36e703 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.129 2010/02/13 00:59:58 sriggs Exp $
+ * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.130 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -220,8 +220,10 @@ typedef struct BTMetaPageData
 #define XLOG_BTREE_NEWROOT		0xA0	/* new root page */
 #define XLOG_BTREE_DELETE_PAGE_HALF 0xB0		/* page deletion that makes
 												 * parent half-dead */
-#define XLOG_BTREE_VACUUM		0xC0	/* delete entries on a page during vacuum */
-#define XLOG_BTREE_REUSE_PAGE	0xD0	/* old page is about to be reused from FSM */
+#define XLOG_BTREE_VACUUM		0xC0	/* delete entries on a page during
+										 * vacuum */
+#define XLOG_BTREE_REUSE_PAGE	0xD0	/* old page is about to be reused from
+										 * FSM */
 
 /*
  * All that we need to find changed index tuple
@@ -314,8 +316,8 @@ typedef struct xl_btree_delete
 {
 	RelFileNode node;
 	BlockNumber block;
-	TransactionId	latestRemovedXid;
-	int			numItems;		 /* number of items in the offset array */
+	TransactionId latestRemovedXid;
+	int			numItems;		/* number of items in the offset array */
 
 	/* TARGET OFFSET NUMBERS FOLLOW AT THE END */
 } xl_btree_delete;
@@ -329,7 +331,7 @@ typedef struct xl_btree_reuse_page
 {
 	RelFileNode node;
 	BlockNumber block;
-	TransactionId	latestRemovedXid;
+	TransactionId latestRemovedXid;
 } xl_btree_reuse_page;
 
 #define SizeOfBtreeReusePage	(sizeof(xl_btree_reuse_page))
@@ -341,7 +343,7 @@ typedef struct xl_btree_reuse_page
  *
  * The correctness requirement for applying these changes during recovery is
  * that we must do one of these two things for every block in the index:
- * 		* lock the block for cleanup and apply any required changes
+ *		* lock the block for cleanup and apply any required changes
  *		* EnsureBlockUnpinned()
  * The purpose of this is to ensure that no index scans started before we
  * finish scanning the index are still running by the time we begin to remove
@@ -361,7 +363,7 @@ typedef struct xl_btree_vacuum
 	RelFileNode node;
 	BlockNumber block;
 	BlockNumber lastBlockVacuumed;
-	int			numItems;		 /* number of items in the offset array */
+	int			numItems;		/* number of items in the offset array */
 
 	/* TARGET OFFSET NUMBERS FOLLOW */
 } xl_btree_vacuum;
@@ -590,7 +592,7 @@ extern bool _bt_page_recyclable(Page page);
 extern void _bt_delitems(Relation rel, Buffer buf,
 			 OffsetNumber *itemnos, int nitems, bool isVacuum,
 			 BlockNumber lastBlockVacuumed);
-extern int _bt_pagedel(Relation rel, Buffer buf, BTStack stack);
+extern int	_bt_pagedel(Relation rel, Buffer buf, BTStack stack);
 
 /*
  * prototypes for functions in nbtsearch.c
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index cc8588b7b10161a79602a2e2f325a3bfb11da073..e6677552403396055f14501558ffb65363290c14 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/relscan.h,v 1.69 2010/01/02 16:58:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/relscan.h,v 1.70 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -68,7 +68,8 @@ typedef struct IndexScanDescData
 	/* signaling to index AM about killing index tuples */
 	bool		kill_prior_tuple;		/* last-returned tuple is dead */
 	bool		ignore_killed_tuples;	/* do not return killed entries */
-	bool		xactStartedInRecovery;	/* prevents killing/seeing killed tuples */
+	bool		xactStartedInRecovery;	/* prevents killing/seeing killed
+										 * tuples */
 
 	/* index access method's private state */
 	void	   *opaque;			/* access-method-specific info */
diff --git a/src/include/access/skey.h b/src/include/access/skey.h
index 1e8fa7a9752389034bbe7ce28d57fac99019d03d..8cf71377f2a6e1f919916e890ffed258861f8014 100644
--- a/src/include/access/skey.h
+++ b/src/include/access/skey.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/skey.h,v 1.39 2010/01/02 16:58:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/skey.h,v 1.40 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -114,13 +114,14 @@ typedef ScanKeyData *ScanKey;
  * bits should be defined here).  Bits 16-31 are reserved for use within
  * individual index access methods.
  */
-#define SK_ISNULL			0x0001	/* sk_argument is NULL */
-#define SK_UNARY			0x0002	/* unary operator (not supported!) */
-#define SK_ROW_HEADER		0x0004	/* row comparison header (see above) */
-#define SK_ROW_MEMBER		0x0008	/* row comparison member (see above) */
-#define SK_ROW_END			0x0010	/* last row comparison member */
-#define SK_SEARCHNULL		0x0020	/* scankey represents "col IS NULL" */
-#define SK_SEARCHNOTNULL	0x0040	/* scankey represents "col IS NOT NULL" */
+#define SK_ISNULL			0x0001		/* sk_argument is NULL */
+#define SK_UNARY			0x0002		/* unary operator (not supported!) */
+#define SK_ROW_HEADER		0x0004		/* row comparison header (see above) */
+#define SK_ROW_MEMBER		0x0008		/* row comparison member (see above) */
+#define SK_ROW_END			0x0010		/* last row comparison member */
+#define SK_SEARCHNULL		0x0020		/* scankey represents "col IS NULL" */
+#define SK_SEARCHNOTNULL	0x0040		/* scankey represents "col IS NOT
+										 * NULL" */
 
 
 /*
diff --git a/src/include/access/slru.h b/src/include/access/slru.h
index 4cc40ba5f7087153773ea75cd1db8168212e7cbe..aff5578c8f266a5612b317e76e8971d3a6be492d 100644
--- a/src/include/access/slru.h
+++ b/src/include/access/slru.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/slru.h,v 1.26 2010/02/16 22:34:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/slru.h,v 1.27 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -31,7 +31,7 @@
  * segment and page numbers in SimpleLruTruncate (see PagePrecedes()).
  *
  * Note: slru.c currently assumes that segment file names will be four hex
- * digits.  This sets a lower bound on the segment size (64K transactions
+ * digits.	This sets a lower bound on the segment size (64K transactions
  * for 32-bit TransactionIds).
  */
 #define SLRU_PAGES_PER_SEGMENT	32
diff --git a/src/include/access/tupconvert.h b/src/include/access/tupconvert.h
index 65dc4fb5448fc3203e2b119ec04ff9f584e8e565..3f3fc280e3049f77d0725a4152287cce68cfd469 100644
--- a/src/include/access/tupconvert.h
+++ b/src/include/access/tupconvert.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/tupconvert.h,v 1.2 2010/01/02 16:58:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/tupconvert.h,v 1.3 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -30,12 +30,12 @@ typedef struct TupleConversionMap
 
 
 extern TupleConversionMap *convert_tuples_by_position(TupleDesc indesc,
-													  TupleDesc outdesc,
-													  const char *msg);
+						   TupleDesc outdesc,
+						   const char *msg);
 
 extern TupleConversionMap *convert_tuples_by_name(TupleDesc indesc,
-												  TupleDesc outdesc,
-												  const char *msg);
+					   TupleDesc outdesc,
+					   const char *msg);
 
 extern HeapTuple do_convert_tuple(HeapTuple tuple, TupleConversionMap *map);
 
diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h
index a8ec7a9e797c2cb66d1858c7da5f0da93f6fb358..bb3be61365d8abdf4efb768ef9cf269ae90f41ed 100644
--- a/src/include/access/tuptoaster.h
+++ b/src/include/access/tuptoaster.h
@@ -6,7 +6,7 @@
  *
  * Copyright (c) 2000-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/include/access/tuptoaster.h,v 1.45 2010/01/02 16:58:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/tuptoaster.h,v 1.46 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -27,7 +27,7 @@
 /*
  * Find the maximum size of a tuple if there are to be N tuples per page.
  */
-#define MaximumBytesPerTuple(tuplesPerPage)	\
+#define MaximumBytesPerTuple(tuplesPerPage) \
 	MAXALIGN_DOWN((BLCKSZ - \
 				   MAXALIGN(SizeOfPageHeaderData + (tuplesPerPage) * sizeof(ItemIdData))) \
 				  / (tuplesPerPage))
@@ -60,12 +60,12 @@
  * The code will also consider moving MAIN data out-of-line, but only as a
  * last resort if the previous steps haven't reached the target tuple size.
  * In this phase we use a different target size, currently equal to the
- * largest tuple that will fit on a heap page.  This is reasonable since
+ * largest tuple that will fit on a heap page.	This is reasonable since
  * the user has told us to keep the data in-line if at all possible.
  */
 #define TOAST_TUPLES_PER_PAGE_MAIN	1
 
-#define TOAST_TUPLE_TARGET_MAIN	MaximumBytesPerTuple(TOAST_TUPLES_PER_PAGE_MAIN)
+#define TOAST_TUPLE_TARGET_MAIN MaximumBytesPerTuple(TOAST_TUPLES_PER_PAGE_MAIN)
 
 /*
  * If an index value is larger than TOAST_INDEX_TARGET, we will try to
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
index fb9f22d5427667002c77b34615a812d11575ed00..12ec693f4431f805aa105086928e32d4bedf61ae 100644
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/xact.h,v 1.102 2010/02/13 16:15:47 sriggs Exp $
+ * $PostgreSQL: pgsql/src/include/access/xact.h,v 1.103 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -88,9 +88,9 @@ typedef void (*SubXactCallback) (SubXactEvent event, SubTransactionId mySubid,
 
 typedef struct xl_xact_assignment
 {
-	TransactionId	xtop;		/* assigned XID's top-level XID */
-	int				nsubxacts;	/* number of subtransaction XIDs */
-	TransactionId	xsub[1];	/* assigned subxids */
+	TransactionId xtop;			/* assigned XID's top-level XID */
+	int			nsubxacts;		/* number of subtransaction XIDs */
+	TransactionId xsub[1];		/* assigned subxids */
 } xl_xact_assignment;
 
 #define MinSizeOfXactAssignment offsetof(xl_xact_assignment, xsub)
@@ -136,6 +136,7 @@ typedef struct xl_xact_abort
 	RelFileNode xnodes[1];		/* VARIABLE LENGTH ARRAY */
 	/* ARRAY OF ABORTED SUBTRANSACTION XIDs FOLLOWS */
 } xl_xact_abort;
+
 /* Note the intentional lack of an invalidation message array c.f. commit */
 
 #define MinSizeOfXactAbort offsetof(xl_xact_abort, xnodes)
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index 60b98dd66466545cc57936e42ee109d133d3f39c..581391125013050a416b76b5087024542c9a22d3 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/xlog.h,v 1.102 2010/02/08 04:33:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/xlog.h,v 1.103 2010/02/26 02:01:21 momjian Exp $
  */
 #ifndef XLOG_H
 #define XLOG_H
@@ -132,7 +132,7 @@ typedef struct XLogRecData
 	struct XLogRecData *next;	/* next struct in chain, or NULL */
 } XLogRecData;
 
-extern PGDLLIMPORT TimeLineID ThisTimeLineID;		/* current TLI */
+extern PGDLLIMPORT TimeLineID ThisTimeLineID;	/* current TLI */
 
 /*
  * Prior to 8.4, all activity during recovery was carried out by Startup
@@ -182,7 +182,7 @@ extern char *XLogArchiveCommand;
 extern int	XLogArchiveTimeout;
 extern bool log_checkpoints;
 extern bool XLogRequestRecoveryConnections;
-extern int MaxStandbyDelay;
+extern int	MaxStandbyDelay;
 
 #define XLogArchivingActive()	(XLogArchiveMode)
 #define XLogArchiveCommandSet() (XLogArchiveCommand[0] != '\0')
@@ -200,7 +200,7 @@ extern int	MaxWalSenders;
 #define XLogIsNeeded() (XLogArchivingActive() || (MaxWalSenders > 0))
 
 /* Do we need to WAL-log information required only for Hot Standby? */
-#define XLogStandbyInfoActive()	(XLogRequestRecoveryConnections && XLogIsNeeded())
+#define XLogStandbyInfoActive() (XLogRequestRecoveryConnections && XLogIsNeeded())
 
 #ifdef WAL_DEBUG
 extern bool XLOG_DEBUG;
@@ -214,8 +214,9 @@ extern bool XLOG_DEBUG;
 
 /* These directly affect the behavior of CreateCheckPoint and subsidiaries */
 #define CHECKPOINT_IS_SHUTDOWN	0x0001	/* Checkpoint is for shutdown */
-#define CHECKPOINT_END_OF_RECOVERY	0x0002	/* Like shutdown checkpoint, but
-											 * issued at end of WAL recovery */
+#define CHECKPOINT_END_OF_RECOVERY	0x0002		/* Like shutdown checkpoint,
+												 * but issued at end of WAL
+												 * recovery */
 #define CHECKPOINT_IMMEDIATE	0x0004	/* Do it without delays */
 #define CHECKPOINT_FORCE		0x0008	/* Force even if no activity */
 /* These are important to RequestCheckpoint */
@@ -250,8 +251,8 @@ extern XLogRecPtr XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata);
 extern void XLogFlush(XLogRecPtr RecPtr);
 extern void XLogBackgroundFlush(void);
 extern bool XLogNeedsFlush(XLogRecPtr RecPtr);
-extern int	XLogFileInit(uint32 log, uint32 seg,
-						 bool *use_existent, bool use_lock);
+extern int XLogFileInit(uint32 log, uint32 seg,
+			 bool *use_existent, bool use_lock);
 extern int	XLogFileOpen(uint32 log, uint32 seg);
 
 
diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h
index cfb7f0a4de668cfee137ff543b26a4b09988857f..50b73dbec2093667e2fdeb93d37c9013685c9759 100644
--- a/src/include/access/xlog_internal.h
+++ b/src/include/access/xlog_internal.h
@@ -11,7 +11,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/access/xlog_internal.h,v 1.28 2010/01/15 09:19:06 heikki Exp $
+ * $PostgreSQL: pgsql/src/include/access/xlog_internal.h,v 1.29 2010/02/26 02:01:21 momjian Exp $
  */
 #ifndef XLOG_INTERNAL_H
 #define XLOG_INTERNAL_H
@@ -152,15 +152,15 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader;
 	} while (0)
 
 /* Align a record pointer to next page */
-#define NextLogPage(recptr)	\
+#define NextLogPage(recptr) \
 	do {	\
 		if (recptr.xrecoff % XLOG_BLCKSZ != 0)	\
 			recptr.xrecoff +=	\
 				(XLOG_BLCKSZ - recptr.xrecoff % XLOG_BLCKSZ);	\
-		if (recptr.xrecoff >= XLogFileSize)	\
+		if (recptr.xrecoff >= XLogFileSize) \
 		{	\
 			(recptr.xlogid)++;	\
-			recptr.xrecoff = 0;	\
+			recptr.xrecoff = 0; \
 		}	\
 	} while (0)
 
diff --git a/src/include/c.h b/src/include/c.h
index 7578f572d3cbb11730fd1e43f8ea1d4cb09fdead..f63dd20bace220cb40a41e270fa0e2b79bfef4a0 100644
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -12,7 +12,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/c.h,v 1.239 2010/01/07 04:53:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/c.h,v 1.240 2010/02/26 02:01:20 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -270,7 +270,6 @@ typedef long int int64;
 #ifndef HAVE_UINT64
 typedef unsigned long int uint64;
 #endif
-
 #elif defined(HAVE_LONG_LONG_INT_64)
 /* We have working support for "long long int", use that */
 
@@ -280,7 +279,6 @@ typedef long long int int64;
 #ifndef HAVE_UINT64
 typedef unsigned long long int uint64;
 #endif
-
 #else
 /* neither HAVE_LONG_INT_64 nor HAVE_LONG_LONG_INT_64 */
 #error must have a working 64-bit integer datatype
diff --git a/src/include/catalog/catalog.h b/src/include/catalog/catalog.h
index b8401df77223a1fd4194a6565d4cb2e5319dfeae..ccbb5a1b28b3ed2568aa0586ebc103c9097f1dce 100644
--- a/src/include/catalog/catalog.h
+++ b/src/include/catalog/catalog.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/catalog.h,v 1.48 2010/02/07 20:48:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/catalog.h,v 1.49 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -45,6 +45,6 @@ extern bool IsSharedRelation(Oid relationId);
 extern Oid	GetNewOid(Relation relation);
 extern Oid GetNewOidWithIndex(Relation relation, Oid indexId,
 				   AttrNumber oidcolumn);
-extern Oid GetNewRelFileNode(Oid reltablespace, Relation pg_class);
+extern Oid	GetNewRelFileNode(Oid reltablespace, Relation pg_class);
 
 #endif   /* CATALOG_H */
diff --git a/src/include/catalog/heap.h b/src/include/catalog/heap.h
index d733dbb32e38067fca914254ef52957a8f5f026f..557c311bc22b4a25bbaddd666acbd25cc4a197c1 100644
--- a/src/include/catalog/heap.h
+++ b/src/include/catalog/heap.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/heap.h,v 1.97 2010/02/07 20:48:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/heap.h,v 1.98 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -112,9 +112,9 @@ extern Form_pg_attribute SystemAttributeByName(const char *attname,
 					  bool relhasoids);
 
 extern void CheckAttributeNamesTypes(TupleDesc tupdesc, char relkind,
-									 bool allow_system_table_mods);
+						 bool allow_system_table_mods);
 
 extern void CheckAttributeType(const char *attname, Oid atttypid,
-							   bool allow_system_table_mods);
+				   bool allow_system_table_mods);
 
 #endif   /* HEAP_H */
diff --git a/src/include/catalog/indexing.h b/src/include/catalog/indexing.h
index 0d29875e52e8fd397c4a12b72fd126e28281cb56..752a35ef0d29ba2249c6e259c3b8039e1c5aa1d5 100644
--- a/src/include/catalog/indexing.h
+++ b/src/include/catalog/indexing.h
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/indexing.h,v 1.116 2010/01/17 22:56:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/indexing.h,v 1.117 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -212,7 +212,7 @@ DECLARE_INDEX(pg_shdepend_reference_index, 1233, on pg_shdepend using btree(refc
 #define SharedDependReferenceIndexId	1233
 
 DECLARE_UNIQUE_INDEX(pg_statistic_relid_att_inh_index, 2696, on pg_statistic using btree(starelid oid_ops, staattnum int2_ops, stainherit bool_ops));
-#define StatisticRelidAttnumInhIndexId  2696
+#define StatisticRelidAttnumInhIndexId	2696
 
 DECLARE_UNIQUE_INDEX(pg_tablespace_oid_index, 2697, on pg_tablespace using btree(oid oid_ops));
 #define TablespaceOidIndexId  2697
@@ -274,7 +274,7 @@ DECLARE_UNIQUE_INDEX(pg_user_mapping_user_server_index, 175, on pg_user_mapping
 #define UserMappingUserServerIndexId	175
 
 DECLARE_UNIQUE_INDEX(pg_default_acl_role_nsp_obj_index, 827, on pg_default_acl using btree(defaclrole oid_ops, defaclnamespace oid_ops, defaclobjtype char_ops));
-#define DefaultAclRoleNspObjIndexId	827
+#define DefaultAclRoleNspObjIndexId 827
 DECLARE_UNIQUE_INDEX(pg_default_acl_oid_index, 828, on pg_default_acl using btree(oid oid_ops));
 #define DefaultAclOidIndexId	828
 
diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h
index 00d0dbc975e57647d7b4d69568348d86c4b44503..2296fa2708c093d0e93d2d29852eb94efc26c290 100644
--- a/src/include/catalog/pg_class.h
+++ b/src/include/catalog/pg_class.h
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/pg_class.h,v 1.121 2010/02/07 20:48:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_class.h,v 1.122 2010/02/26 02:01:21 momjian Exp $
  *
  * NOTES
  *	  the genbki.pl script reads this file and generates .bki
@@ -33,11 +33,14 @@ CATALOG(pg_class,1259) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83) BKI_SCHEMA_MACRO
 {
 	NameData	relname;		/* class name */
 	Oid			relnamespace;	/* OID of namespace containing this class */
-	Oid			reltype;		/* OID of entry in pg_type for table's implicit row type */
-	Oid			reloftype;		/* OID of entry in pg_type for underlying composite type */
+	Oid			reltype;		/* OID of entry in pg_type for table's
+								 * implicit row type */
+	Oid			reloftype;		/* OID of entry in pg_type for underlying
+								 * composite type */
 	Oid			relowner;		/* class owner */
 	Oid			relam;			/* index access method; 0 if not an index */
 	Oid			relfilenode;	/* identifier of physical storage file */
+
 	/* relfilenode == 0 means it is a "mapped" relation, see relmapper.c */
 	Oid			reltablespace;	/* identifier of table space for relation */
 	int4		relpages;		/* # of blocks (not always up-to-date) */
@@ -58,7 +61,7 @@ CATALOG(pg_class,1259) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83) BKI_SCHEMA_MACRO
 	int2		relchecks;		/* # of CHECK constraints for class */
 	bool		relhasoids;		/* T if we generate OIDs for rows of rel */
 	bool		relhaspkey;		/* has (or has had) PRIMARY KEY index */
-	bool		relhasexclusion; /* has (or has had) exclusion constraint */
+	bool		relhasexclusion;	/* has (or has had) exclusion constraint */
 	bool		relhasrules;	/* has (or has had) any rules */
 	bool		relhastriggers; /* has (or has had) any TRIGGERs */
 	bool		relhassubclass; /* has (or has had) derived classes */
diff --git a/src/include/catalog/pg_constraint.h b/src/include/catalog/pg_constraint.h
index 1aada159dbb2691a8d9ae30614dc9a0192403d1f..5f62f17f16e35a9a8b3eb863ae138456c5d225ec 100644
--- a/src/include/catalog/pg_constraint.h
+++ b/src/include/catalog/pg_constraint.h
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/pg_constraint.h,v 1.37 2010/01/17 22:56:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_constraint.h,v 1.38 2010/02/26 02:01:21 momjian Exp $
  *
  * NOTES
  *	  the genbki.pl script reads this file and generates .bki
@@ -65,10 +65,10 @@ CATALOG(pg_constraint,2606)
 	/*
 	 * conindid links to the index supporting the constraint, if any;
 	 * otherwise it's 0.  This is used for unique and primary-key constraints,
-	 * and less obviously for foreign-key constraints (where the index is
-	 * a unique index on the referenced relation's referenced columns).
-	 * Notice that the index is on conrelid in the first case but confrelid
-	 * in the second.
+	 * and less obviously for foreign-key constraints (where the index is a
+	 * unique index on the referenced relation's referenced columns). Notice
+	 * that the index is on conrelid in the first case but confrelid in the
+	 * second.
 	 */
 	Oid			conindid;		/* index supporting this constraint */
 
@@ -92,8 +92,8 @@ CATALOG(pg_constraint,2606)
 	 */
 
 	/*
-	 * Columns of conrelid that the constraint applies to, if known
-	 * (this is NULL for trigger constraints)
+	 * Columns of conrelid that the constraint applies to, if known (this is
+	 * NULL for trigger constraints)
 	 */
 	int2		conkey[1];
 
@@ -237,6 +237,6 @@ extern char *ChooseConstraintName(const char *name1, const char *name2,
 
 extern void AlterConstraintNamespaces(Oid ownerId, Oid oldNspId,
 						  Oid newNspId, bool isType);
-extern Oid GetConstraintByName(Oid relid, const char *conname);
+extern Oid	GetConstraintByName(Oid relid, const char *conname);
 
 #endif   /* PG_CONSTRAINT_H */
diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h
index 8d9d3f371763fdd94f66151811d8d5c7536cec7e..b2f4a5c5a46c1cf2878707c4db8bc684022d4512 100644
--- a/src/include/catalog/pg_control.h
+++ b/src/include/catalog/pg_control.h
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/pg_control.h,v 1.50 2010/02/17 04:19:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_control.h,v 1.51 2010/02/26 02:01:21 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -42,10 +42,10 @@ typedef struct CheckPoint
 	pg_time_t	time;			/* time stamp of checkpoint */
 
 	/* Important parameter settings at time of shutdown checkpoints */
-	int		MaxConnections;
-	int		max_prepared_xacts;
-	int		max_locks_per_xact;
-	bool	XLogStandbyInfoMode;
+	int			MaxConnections;
+	int			max_prepared_xacts;
+	int			max_locks_per_xact;
+	bool		XLogStandbyInfoMode;
 
 	/*
 	 * Oldest XID still running. This is only needed to initialize hot standby
@@ -53,7 +53,7 @@ typedef struct CheckPoint
 	 * online checkpoints and only when archiving is enabled. Otherwise it's
 	 * set to InvalidTransactionId.
 	 */
-	TransactionId   oldestActiveXid;
+	TransactionId oldestActiveXid;
 } CheckPoint;
 
 /* XLOG info values for XLOG rmgr */
diff --git a/src/include/catalog/pg_db_role_setting.h b/src/include/catalog/pg_db_role_setting.h
index fd70f435a51d99a457ce454f83d82d25345b88cb..11b0ed679167767016f9215253e80f1581ed5fc2 100644
--- a/src/include/catalog/pg_db_role_setting.h
+++ b/src/include/catalog/pg_db_role_setting.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/pg_db_role_setting.h,v 1.3 2010/01/05 01:06:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_db_role_setting.h,v 1.4 2010/02/26 02:01:21 momjian Exp $
  *
  * NOTES
  *		the genbki.pl script reads this file and generates .bki
@@ -27,11 +27,11 @@
 #include "utils/relcache.h"
 
 /* ----------------
- *		pg_db_role_setting definition.  cpp turns this into
+ *		pg_db_role_setting definition.	cpp turns this into
  *		typedef struct FormData_pg_db_role_setting
  * ----------------
  */
-#define DbRoleSettingRelationId	2964
+#define DbRoleSettingRelationId 2964
 
 CATALOG(pg_db_role_setting,2964) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
 {
diff --git a/src/include/catalog/pg_default_acl.h b/src/include/catalog/pg_default_acl.h
index 76a90aed4c586a184cf8ffe1abcaa5de9e3f386c..8ea9ea48aff441610f149937d0a9fdd0cd8457b2 100644
--- a/src/include/catalog/pg_default_acl.h
+++ b/src/include/catalog/pg_default_acl.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/pg_default_acl.h,v 1.3 2010/01/05 01:06:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_default_acl.h,v 1.4 2010/02/26 02:01:21 momjian Exp $
  *
  * NOTES
  *	  the genbki.pl script reads this file and generates .bki
@@ -21,7 +21,7 @@
 #include "catalog/genbki.h"
 
 /* ----------------
- *		pg_default_acl definition.  cpp turns this into
+ *		pg_default_acl definition.	cpp turns this into
  *		typedef struct FormData_pg_default_acl
  * ----------------
  */
@@ -29,15 +29,15 @@
 
 CATALOG(pg_default_acl,826)
 {
-	Oid			defaclrole;			/* OID of role owning this ACL */
+	Oid			defaclrole;		/* OID of role owning this ACL */
 	Oid			defaclnamespace;	/* OID of namespace, or 0 for all */
-	char		defaclobjtype;		/* see DEFACLOBJ_xxx constants below */
+	char		defaclobjtype;	/* see DEFACLOBJ_xxx constants below */
 
 	/*
 	 * VARIABLE LENGTH FIELDS start here.
 	 */
 
-	aclitem		defaclacl[1];		/* permissions to add at CREATE time */
+	aclitem		defaclacl[1];	/* permissions to add at CREATE time */
 } FormData_pg_default_acl;
 
 /* ----------------
@@ -65,11 +65,11 @@ typedef FormData_pg_default_acl *Form_pg_default_acl;
 
 /*
  * Types of objects for which the user is allowed to specify default
- * permissions through pg_default_acl.  These codes are used in the
+ * permissions through pg_default_acl.	These codes are used in the
  * defaclobjtype column.
  */
 #define DEFACLOBJ_RELATION		'r'		/* table, view */
 #define DEFACLOBJ_SEQUENCE		'S'		/* sequence */
 #define DEFACLOBJ_FUNCTION		'f'		/* function */
 
-#endif /* PG_DEFAULT_ACL_H */
+#endif   /* PG_DEFAULT_ACL_H */
diff --git a/src/include/catalog/pg_enum.h b/src/include/catalog/pg_enum.h
index 985dcf0a4492ff60b023a9adc0adc68b742e57e6..fc05b4004abcf4d0ab5fce054068912107c9fce1 100644
--- a/src/include/catalog/pg_enum.h
+++ b/src/include/catalog/pg_enum.h
@@ -7,7 +7,7 @@
  *
  * Copyright (c) 2006-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/include/catalog/pg_enum.h,v 1.8 2010/01/05 01:06:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_enum.h,v 1.9 2010/02/26 02:01:21 momjian Exp $
  *
  * NOTES
  *	  the genbki.pl script reads this file and generates .bki
@@ -61,7 +61,7 @@ typedef FormData_pg_enum *Form_pg_enum;
  * prototypes for functions in pg_enum.c
  */
 extern void EnumValuesCreate(Oid enumTypeOid, List *vals,
-			Oid binary_upgrade_next_pg_enum_oid);
+				 Oid binary_upgrade_next_pg_enum_oid);
 extern void EnumValuesDelete(Oid enumTypeOid);
 
 #endif   /* PG_ENUM_H */
diff --git a/src/include/catalog/pg_largeobject.h b/src/include/catalog/pg_largeobject.h
index cb65803a118196d65f70d8aebf7b30cb9e8ebe8d..78b3119ee28b747a66c67bb6c4e4389d8606fe49 100644
--- a/src/include/catalog/pg_largeobject.h
+++ b/src/include/catalog/pg_largeobject.h
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/pg_largeobject.h,v 1.27 2010/01/05 01:06:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_largeobject.h,v 1.28 2010/02/26 02:01:21 momjian Exp $
  *
  * NOTES
  *	  the genbki.pl script reads this file and generates .bki
@@ -51,7 +51,7 @@ typedef FormData_pg_largeobject *Form_pg_largeobject;
 #define Anum_pg_largeobject_pageno		2
 #define Anum_pg_largeobject_data		3
 
-extern Oid  LargeObjectCreate(Oid loid);
+extern Oid	LargeObjectCreate(Oid loid);
 extern void LargeObjectDrop(Oid loid);
 extern void LargeObjectAlterOwner(Oid loid, Oid newOwnerId);
 extern bool LargeObjectExists(Oid loid);
diff --git a/src/include/catalog/pg_largeobject_metadata.h b/src/include/catalog/pg_largeobject_metadata.h
index bf74bb471d3d57c676c81f6a159be34a8217d90f..f13ff77e2f06052b769e399071b55f850202ad9b 100755
--- a/src/include/catalog/pg_largeobject_metadata.h
+++ b/src/include/catalog/pg_largeobject_metadata.h
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/pg_largeobject_metadata.h,v 1.3 2010/01/05 01:06:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_largeobject_metadata.h,v 1.4 2010/02/26 02:01:21 momjian Exp $
  *
  * NOTES
  *	  the genbki.pl script reads this file and generates .bki
@@ -22,7 +22,7 @@
 #include "catalog/genbki.h"
 
 /* ----------------
- *		pg_largeobject_metadata definition.	cpp turns this into
+ *		pg_largeobject_metadata definition. cpp turns this into
  *		typedef struct FormData_pg_largeobject_metadata
  * ----------------
  */
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 1c87a94a0de0a7b8f9562d122ed1daedd0ad5080..da6b0b2025192a61a3a8b7f12c3910d02e8dd0ba 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/catalog/pg_proc.h,v 1.569 2010/02/16 22:34:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_proc.h,v 1.570 2010/02/26 02:01:21 momjian Exp $
  *
  * NOTES
  *	  The script catalog/genbki.pl reads this file and generates .bki
@@ -396,7 +396,7 @@ DATA(insert OID = 191 (  box_right		   PGNSP PGUID 12 1 0 0 f f f t f i 2 0 16 "
 DESCR("is right of");
 DATA(insert OID = 192 (  box_contained	   PGNSP PGUID 12 1 0 0 f f f t f i 2 0 16 "603 603" _null_ _null_ _null_ _null_ box_contained _null_ _null_ _null_ ));
 DESCR("is contained by?");
-DATA(insert OID = 193 (  box_contain_pt	   PGNSP PGUID 12 1 0 0 f f f t f i 2 0 16 "603 600" _null_ _null_ _null_ _null_ box_contain_pt _null_ _null_ _null_ ));
+DATA(insert OID = 193 (  box_contain_pt    PGNSP PGUID 12 1 0 0 f f f t f i 2 0 16 "603 600" _null_ _null_ _null_ _null_ box_contain_pt _null_ _null_ _null_ ));
 DESCR("contains?");
 
 /* OIDS 200 - 299 */
@@ -2411,7 +2411,7 @@ DESCR("return portion of bitstring");
 
 DATA(insert OID = 3030 (  overlay		   PGNSP PGUID 12 1 0 0 f f f t f i 4 0 1560 "1560 1560 23 23" _null_ _null_ _null_ _null_	bitoverlay _null_ _null_ _null_ ));
 DESCR("substitute portion of bitstring");
-DATA(insert OID = 3031 (  overlay		   PGNSP PGUID 12 1 0 0 f f f t f i 3 0 1560 "1560 1560 23" _null_ _null_ _null_ _null_	bitoverlay_no_len _null_ _null_ _null_ ));
+DATA(insert OID = 3031 (  overlay		   PGNSP PGUID 12 1 0 0 f f f t f i 3 0 1560 "1560 1560 23" _null_ _null_ _null_ _null_ bitoverlay_no_len _null_ _null_ _null_ ));
 DESCR("substitute portion of bitstring");
 DATA(insert OID = 3032 (  get_bit		   PGNSP PGUID 12 1 0 0 f f f t f i 2 0 23 "1560 23" _null_ _null_ _null_ _null_ bitgetbit _null_ _null_ _null_ ));
 DESCR("get bit");
@@ -3097,9 +3097,9 @@ DATA(insert OID = 2274 (  pg_stat_reset					PGNSP PGUID 12 1 0 0 f f f f f v 0 0
 DESCR("statistics: reset collected statistics for current database");
 DATA(insert OID = 3775 (  pg_stat_reset_shared			PGNSP PGUID 12 1 0 0 f f f f f v 1 0 2278 "25" _null_ _null_ _null_ _null_	pg_stat_reset_shared _null_ _null_ _null_ ));
 DESCR("statistics: reset collected statistics shared across the cluster");
-DATA(insert OID = 3776 (  pg_stat_reset_single_table_counters	PGNSP PGUID 12 1 0 0 f f f f f v 1 0 2278 "26" _null_ _null_ _null_ _null_  pg_stat_reset_single_table_counters _null_ _null_ _null_ ));
+DATA(insert OID = 3776 (  pg_stat_reset_single_table_counters	PGNSP PGUID 12 1 0 0 f f f f f v 1 0 2278 "26" _null_ _null_ _null_ _null_	pg_stat_reset_single_table_counters _null_ _null_ _null_ ));
 DESCR("statistics: reset collected statistics for a single table or index in the current database");
-DATA(insert OID = 3777 (  pg_stat_reset_single_function_counters	PGNSP PGUID 12 1 0 0 f f f f f v 1 0 2278 "26" _null_ _null_ _null_ _null_  pg_stat_reset_single_function_counters _null_ _null_ _null_ ));
+DATA(insert OID = 3777 (  pg_stat_reset_single_function_counters	PGNSP PGUID 12 1 0 0 f f f f f v 1 0 2278 "26" _null_ _null_ _null_ _null_	pg_stat_reset_single_function_counters _null_ _null_ _null_ ));
 DESCR("statistics: reset collected statistics for a single function in the current database");
 
 DATA(insert OID = 1946 (  encode						PGNSP PGUID 12 1 0 0 f f f t f i 2 0 25 "17 25" _null_ _null_ _null_ _null_ binary_encode _null_ _null_ _null_ ));
@@ -3315,7 +3315,7 @@ DESCR("xlog filename and byte offset, given an xlog location");
 DATA(insert OID = 2851 ( pg_xlogfile_name			PGNSP PGUID 12 1 0 0 f f f t f i 1 0 25 "25" _null_ _null_ _null_ _null_ pg_xlogfile_name _null_ _null_ _null_ ));
 DESCR("xlog filename, given an xlog location");
 
-DATA(insert OID = 3810 (  pg_is_in_recovery 	PGNSP PGUID 12 1 0 0 f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ pg_is_in_recovery _null_ _null_ _null_ ));
+DATA(insert OID = 3810 (  pg_is_in_recovery		PGNSP PGUID 12 1 0 0 f f f t f v 0 0 16 "" _null_ _null_ _null_ _null_ pg_is_in_recovery _null_ _null_ _null_ ));
 DESCR("true if server is in recovery");
 
 DATA(insert OID = 3820 ( pg_last_xlog_receive_location	PGNSP PGUID 12 1 0 0 f f f t f v 0 0 25 "" _null_ _null_ _null_ _null_ pg_last_xlog_receive_location _null_ _null_ _null_ ));
@@ -3733,7 +3733,7 @@ DATA(insert OID = 2325 ( pg_relation_size		PGNSP PGUID 14 1 0 0 f f f t f v 1 0
 DESCR("disk space usage for the main fork of the specified table or index");
 DATA(insert OID = 2332 ( pg_relation_size		PGNSP PGUID 12 1 0 0 f f f t f v 2 0 20 "2205 25" _null_ _null_ _null_ _null_ pg_relation_size _null_ _null_ _null_ ));
 DESCR("disk space usage for the specified fork of a table or index");
-DATA(insert OID = 2286 ( pg_total_relation_size	PGNSP PGUID 12 1 0 0 f f f t f v 1 0 20 "2205" _null_ _null_ _null_ _null_ pg_total_relation_size _null_ _null_ _null_ ));
+DATA(insert OID = 2286 ( pg_total_relation_size PGNSP PGUID 12 1 0 0 f f f t f v 1 0 20 "2205" _null_ _null_ _null_ _null_ pg_total_relation_size _null_ _null_ _null_ ));
 DESCR("total disk space usage for the specified table and associated indexes");
 DATA(insert OID = 2288 ( pg_size_pretty			PGNSP PGUID 12 1 0 0 f f f t f v 1 0 25 "20" _null_ _null_ _null_ _null_ pg_size_pretty _null_ _null_ _null_ ));
 DESCR("convert a long int to a human readable text using size units");
@@ -4133,7 +4133,7 @@ DATA(insert OID = 2856 (  pg_timezone_names		PGNSP PGUID 12 1 1000 0 f f f t t s
 DESCR("get the available time zone names");
 DATA(insert OID = 2730 (  pg_get_triggerdef		PGNSP PGUID 12 1 0 0 f f f t f s 2 0 25 "26 16" _null_ _null_ _null_ _null_ pg_get_triggerdef_ext _null_ _null_ _null_ ));
 DESCR("trigger description with pretty-print option");
-DATA(insert OID = 3035 (  pg_listening_channels	PGNSP PGUID 12 1 10 0 f f f t t s 0 0 25 "" _null_ _null_ _null_ _null_ pg_listening_channels _null_ _null_ _null_ ));
+DATA(insert OID = 3035 (  pg_listening_channels PGNSP PGUID 12 1 10 0 f f f t t s 0 0 25 "" _null_ _null_ _null_ _null_ pg_listening_channels _null_ _null_ _null_ ));
 DESCR("get the channels that the current backend listens to");
 DATA(insert OID = 3036 (  pg_notify				PGNSP PGUID 12 1 0 0 f f f f f v 2 0 2278 "25 25" _null_ _null_ _null_ _null_ pg_notify _null_ _null_ _null_ ));
 DESCR("send a notification event");
@@ -4254,7 +4254,7 @@ DATA(insert OID = 2592 (  gist_circle_compress	PGNSP PGUID 12 1 0 0 f f f t f i
 DESCR("GiST support");
 DATA(insert OID = 1030 (  gist_point_compress	PGNSP PGUID 12 1 0 0 f f f t f i 1 0 2281 "2281" _null_ _null_ _null_ _null_ gist_point_compress _null_ _null_ _null_ ));
 DESCR("GiST support");
-DATA(insert OID = 2179 (  gist_point_consistent	PGNSP PGUID 12 1 0 0 f f f t f i 5 0 16 "2281 603 23 26 2281" _null_ _null_ _null_ _null_	gist_point_consistent _null_ _null_ _null_ ));
+DATA(insert OID = 2179 (  gist_point_consistent PGNSP PGUID 12 1 0 0 f f f t f i 5 0 16 "2281 603 23 26 2281" _null_ _null_ _null_ _null_	gist_point_consistent _null_ _null_ _null_ ));
 DESCR("GiST support");
 
 /* GIN */
diff --git a/src/include/commands/cluster.h b/src/include/commands/cluster.h
index 0fecd1986ac0840308327251d43317c2318e7eee..ed3853af24cf09bc55aa97539580c0f36cb38aae 100644
--- a/src/include/commands/cluster.h
+++ b/src/include/commands/cluster.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994-5, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/commands/cluster.h,v 1.40 2010/02/07 20:48:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/cluster.h,v 1.41 2010/02/26 02:01:24 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -19,15 +19,15 @@
 
 extern void cluster(ClusterStmt *stmt, bool isTopLevel);
 extern void cluster_rel(Oid tableOid, Oid indexOid, bool recheck,
-					bool verbose, int freeze_min_age, int freeze_table_age);
+			bool verbose, int freeze_min_age, int freeze_table_age);
 extern void check_index_is_clusterable(Relation OldHeap, Oid indexOid,
 						   bool recheck);
 extern void mark_index_clustered(Relation rel, Oid indexOid);
 
 extern Oid	make_new_heap(Oid OIDOldHeap, Oid NewTableSpace);
 extern void finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
-							 bool is_system_catalog,
-							 bool swap_toast_by_content,
-							 TransactionId frozenXid);
+				 bool is_system_catalog,
+				 bool swap_toast_by_content,
+				 TransactionId frozenXid);
 
 #endif   /* CLUSTER_H */
diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h
index 4bc733d49a293e58389051f29e9d9cedbf877b84..49d9f19966df67704e89116f0d3432dc5223489b 100644
--- a/src/include/commands/defrem.h
+++ b/src/include/commands/defrem.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/commands/defrem.h,v 1.100 2010/01/02 16:58:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/defrem.h,v 1.101 2010/02/26 02:01:24 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -46,8 +46,8 @@ extern char *makeObjectName(const char *name1, const char *name2,
 extern char *ChooseRelationName(const char *name1, const char *name2,
 				   const char *label, Oid namespaceid);
 extern char *ChooseIndexName(const char *tabname, Oid namespaceId,
-							 List *colnames, List *exclusionOpNames,
-							 bool primary, bool isconstraint);
+				List *colnames, List *exclusionOpNames,
+				bool primary, bool isconstraint);
 extern List *ChooseIndexColumnNames(List *indexElems);
 extern Oid	GetDefaultOpClass(Oid type_id, Oid am_id);
 
diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h
index 6f39f7a5692a1767d9e2cf019d0358acc72cf6c6..52d39937d09e37dfcf03fa73c8d588fd6b49f0fc 100644
--- a/src/include/commands/explain.h
+++ b/src/include/commands/explain.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994-5, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/commands/explain.h,v 1.46 2010/02/16 22:19:59 adunstan Exp $
+ * $PostgreSQL: pgsql/src/include/commands/explain.h,v 1.47 2010/02/26 02:01:24 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -36,14 +36,14 @@ typedef struct ExplainState
 	PlannedStmt *pstmt;			/* top of plan */
 	List	   *rtable;			/* range table */
 	int			indent;			/* current indentation level */
-	List	   *grouping_stack;	/* format-specific grouping state */
+	List	   *grouping_stack; /* format-specific grouping state */
 } ExplainState;
 
 /* Hook for plugins to get control in ExplainOneQuery() */
 typedef void (*ExplainOneQuery_hook_type) (Query *query,
-										   ExplainState *es,
-										   const char *queryString,
-										   ParamListInfo params);
+													   ExplainState *es,
+													 const char *queryString,
+													   ParamListInfo params);
 extern PGDLLIMPORT ExplainOneQuery_hook_type ExplainOneQuery_hook;
 
 /* Hook for plugins to get control in explain_get_index_name() */
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index e73d113c183c74aa3092b7a655776d10d4c1548a..b2424a0d087002440274d24fb28d930b301e29a3 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.167 2010/02/08 04:33:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.168 2010/02/26 02:01:24 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -172,12 +172,13 @@ extern TupleTableSlot *EvalPlanQual(EState *estate, EPQState *epqstate,
 extern HeapTuple EvalPlanQualFetch(EState *estate, Relation relation,
 				  int lockmode, ItemPointer tid, TransactionId priorXmax);
 extern void EvalPlanQualInit(EPQState *epqstate, EState *estate,
-							 Plan *subplan, int epqParam);
+				 Plan *subplan, int epqParam);
 extern void EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan);
 extern void EvalPlanQualAddRowMark(EPQState *epqstate, ExecRowMark *erm);
 extern void EvalPlanQualSetTuple(EPQState *epqstate, Index rti,
-								 HeapTuple tuple);
+					 HeapTuple tuple);
 extern HeapTuple EvalPlanQualGetTuple(EPQState *epqstate, Index rti);
+
 #define EvalPlanQualSetSlot(epqstate, slot)  ((epqstate)->origslot = (slot))
 extern void EvalPlanQualFetchRowMarks(EPQState *epqstate);
 extern TupleTableSlot *EvalPlanQualNext(EPQState *epqstate);
@@ -221,7 +222,7 @@ typedef TupleTableSlot *(*ExecScanAccessMtd) (ScanState *node);
 typedef bool (*ExecScanRecheckMtd) (ScanState *node, TupleTableSlot *slot);
 
 extern TupleTableSlot *ExecScan(ScanState *node, ExecScanAccessMtd accessMtd,
-								ExecScanRecheckMtd recheckMtd);
+		 ExecScanRecheckMtd recheckMtd);
 extern void ExecAssignScanProjectionInfo(ScanState *node);
 extern void ExecScanReScan(ScanState *node);
 
@@ -322,11 +323,11 @@ extern void ExecCloseIndices(ResultRelInfo *resultRelInfo);
 extern List *ExecInsertIndexTuples(TupleTableSlot *slot, ItemPointer tupleid,
 					  EState *estate);
 extern bool check_exclusion_constraint(Relation heap, Relation index,
-									   IndexInfo *indexInfo,
-									   ItemPointer tupleid,
-									   Datum *values, bool *isnull,
-									   EState *estate,
-									   bool newIndex, bool errorOK);
+						   IndexInfo *indexInfo,
+						   ItemPointer tupleid,
+						   Datum *values, bool *isnull,
+						   EState *estate,
+						   bool newIndex, bool errorOK);
 
 extern void RegisterExprContextCallback(ExprContext *econtext,
 							ExprContextCallbackFunction function,
diff --git a/src/include/executor/instrument.h b/src/include/executor/instrument.h
index 66cacb11ba9352dc8cc2bff36e1f319c61ae3496..a1f680daedfd65483772790ad2a8f0ca3a40599f 100644
--- a/src/include/executor/instrument.h
+++ b/src/include/executor/instrument.h
@@ -6,7 +6,7 @@
  *
  * Copyright (c) 2001-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/include/executor/instrument.h,v 1.23 2010/01/08 00:48:56 itagaki Exp $
+ * $PostgreSQL: pgsql/src/include/executor/instrument.h,v 1.24 2010/02/26 02:01:24 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -18,42 +18,42 @@
 
 typedef struct BufferUsage
 {
-	long	shared_blks_hit;		/* # of shared buffer hits */
-	long	shared_blks_read;		/* # of shared disk blocks read */
-	long	shared_blks_written;	/* # of shared disk blocks written */
-	long	local_blks_hit;			/* # of local buffer hits */
-	long	local_blks_read;		/* # of local disk blocks read */
-	long	local_blks_written;		/* # of local disk blocks written */
-	long	temp_blks_read;			/* # of temp blocks read */
-	long	temp_blks_written;		/* # of temp blocks written */
+	long		shared_blks_hit;	/* # of shared buffer hits */
+	long		shared_blks_read;		/* # of shared disk blocks read */
+	long		shared_blks_written;	/* # of shared disk blocks written */
+	long		local_blks_hit; /* # of local buffer hits */
+	long		local_blks_read;	/* # of local disk blocks read */
+	long		local_blks_written;		/* # of local disk blocks written */
+	long		temp_blks_read; /* # of temp blocks read */
+	long		temp_blks_written;		/* # of temp blocks written */
 } BufferUsage;
 
 typedef enum InstrumentOption
 {
-	INSTRUMENT_TIMER	= 1 << 0,		/* needs timer */
-	INSTRUMENT_BUFFERS	= 1 << 1,		/* needs buffer usage */
-	INSTRUMENT_ALL		= 0x7FFFFFFF
+	INSTRUMENT_TIMER = 1 << 0,	/* needs timer */
+	INSTRUMENT_BUFFERS = 1 << 1,	/* needs buffer usage */
+	INSTRUMENT_ALL = 0x7FFFFFFF
 } InstrumentOption;
 
 typedef struct Instrumentation
 {
 	/* Info about current plan cycle: */
 	bool		running;		/* TRUE if we've completed first tuple */
-	bool		needs_bufusage;	/* TRUE if we need buffer usage */
+	bool		needs_bufusage; /* TRUE if we need buffer usage */
 	instr_time	starttime;		/* Start time of current iteration of node */
 	instr_time	counter;		/* Accumulated runtime for this node */
 	double		firsttuple;		/* Time for first tuple of this cycle */
 	double		tuplecount;		/* Tuples emitted so far this cycle */
-	BufferUsage	bufusage_start;	/* Buffer usage at start */
+	BufferUsage bufusage_start; /* Buffer usage at start */
 	/* Accumulated statistics across all completed cycles: */
 	double		startup;		/* Total startup time (in seconds) */
 	double		total;			/* Total total time (in seconds) */
 	double		ntuples;		/* Total tuples produced */
 	double		nloops;			/* # of run cycles for this node */
-	BufferUsage	bufusage;		/* Total buffer usage */
+	BufferUsage bufusage;		/* Total buffer usage */
 } Instrumentation;
 
-extern PGDLLIMPORT BufferUsage		pgBufferUsage;
+extern PGDLLIMPORT BufferUsage pgBufferUsage;
 
 extern Instrumentation *InstrAlloc(int n, int instrument_options);
 extern void InstrStartNode(Instrumentation *instr);
diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h
index acb20c1c940f9ef70d94841f844d856296224782..5ee60c16b7afa65c2a76451e19aeef19621caad8 100644
--- a/src/include/executor/spi.h
+++ b/src/include/executor/spi.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/executor/spi.h,v 1.74 2010/01/02 16:58:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/executor/spi.h,v 1.75 2010/02/26 02:01:24 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -74,8 +74,8 @@ extern int	SPI_execute(const char *src, bool read_only, long tcount);
 extern int SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
 				 bool read_only, long tcount);
 extern int SPI_execute_plan_with_paramlist(SPIPlanPtr plan,
-										   ParamListInfo params,
-										   bool read_only, long tcount);
+								ParamListInfo params,
+								bool read_only, long tcount);
 extern int	SPI_exec(const char *src, long tcount);
 extern int SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls,
 		  long tcount);
@@ -92,9 +92,9 @@ extern SPIPlanPtr SPI_prepare(const char *src, int nargs, Oid *argtypes);
 extern SPIPlanPtr SPI_prepare_cursor(const char *src, int nargs, Oid *argtypes,
 				   int cursorOptions);
 extern SPIPlanPtr SPI_prepare_params(const char *src,
-									 ParserSetupHook parserSetup,
-									 void *parserSetupArg,
-									 int cursorOptions);
+				   ParserSetupHook parserSetup,
+				   void *parserSetupArg,
+				   int cursorOptions);
 extern SPIPlanPtr SPI_saveplan(SPIPlanPtr plan);
 extern int	SPI_freeplan(SPIPlanPtr plan);
 
diff --git a/src/include/executor/spi_priv.h b/src/include/executor/spi_priv.h
index 520eff68ede41b60f681b914b05c2a8645b34b35..dc854521df4a643455b3fc01f8f6a22e2452dbd2 100644
--- a/src/include/executor/spi_priv.h
+++ b/src/include/executor/spi_priv.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/executor/spi_priv.h,v 1.34 2010/01/02 16:58:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/executor/spi_priv.h,v 1.35 2010/02/26 02:01:24 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -69,7 +69,7 @@ typedef struct _SPI_plan
 	int			nargs;			/* number of plan arguments */
 	Oid		   *argtypes;		/* Argument types (NULL if nargs is 0) */
 	ParserSetupHook parserSetup;	/* alternative parameter spec method */
-	void	   *parserSetupArg;	
+	void	   *parserSetupArg;
 } _SPI_plan;
 
 #endif   /* SPI_PRIV_H */
diff --git a/src/include/fmgr.h b/src/include/fmgr.h
index b5e7435828bc67ed04465a9f153a8043b52b3031..cf74f97cc0cecb0deabd6e7d6813b4c298dddebe 100644
--- a/src/include/fmgr.h
+++ b/src/include/fmgr.h
@@ -11,7 +11,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/fmgr.h,v 1.64 2010/02/08 20:39:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/fmgr.h,v 1.65 2010/02/26 02:01:20 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -538,11 +538,11 @@ extern void **find_rendezvous_variable(const char *varName);
  */
 
 /* AggCheckCallContext can return one of the following codes, or 0: */
-#define AGG_CONTEXT_AGGREGATE	1			/* regular aggregate */
-#define AGG_CONTEXT_WINDOW		2			/* window function */
+#define AGG_CONTEXT_AGGREGATE	1		/* regular aggregate */
+#define AGG_CONTEXT_WINDOW		2		/* window function */
 
-extern int	AggCheckCallContext(FunctionCallInfo fcinfo,
-								MemoryContext *aggcontext);
+extern int AggCheckCallContext(FunctionCallInfo fcinfo,
+					MemoryContext *aggcontext);
 
 
 /*
diff --git a/src/include/funcapi.h b/src/include/funcapi.h
index 5d736c80791f88198bdd2491f38ed169d94afbeb..8190f8abc39cf60576baf4b01fa9725b2b52b0b3 100644
--- a/src/include/funcapi.h
+++ b/src/include/funcapi.h
@@ -9,7 +9,7 @@
  *
  * Copyright (c) 2002-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/include/funcapi.h,v 1.31 2010/01/02 16:58:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/funcapi.h,v 1.32 2010/02/26 02:01:20 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -174,7 +174,7 @@ extern int get_func_arg_info(HeapTuple procTup,
 				  char **p_argmodes);
 
 extern int get_func_input_arg_names(Datum proargnames, Datum proargmodes,
-									char ***arg_names);
+						 char ***arg_names);
 
 extern char *get_func_result_name(Oid functionId);
 
diff --git a/src/include/libpq/be-fsstubs.h b/src/include/libpq/be-fsstubs.h
index f0ec03ecfc6d74ab0f5d0d5ae3e7aa08ab64c5cc..d3dde65896d486423311c48a45f442c2ab664305 100644
--- a/src/include/libpq/be-fsstubs.h
+++ b/src/include/libpq/be-fsstubs.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/libpq/be-fsstubs.h,v 1.34 2010/01/02 16:58:04 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/be-fsstubs.h,v 1.35 2010/02/26 02:01:24 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -40,7 +40,7 @@ extern Datum lo_truncate(PG_FUNCTION_ARGS);
 /*
  * compatibility option for access control
  */
-extern bool	lo_compat_privileges;
+extern bool lo_compat_privileges;
 
 /*
  * These are not fmgr-callable, but are available to C code.
diff --git a/src/include/libpq/ip.h b/src/include/libpq/ip.h
index dc403379ed68acbec692d4de1b75970b820f0537..b6ab7827ed7547f5d800e1ee8dfe972e27f2254e 100644
--- a/src/include/libpq/ip.h
+++ b/src/include/libpq/ip.h
@@ -8,7 +8,7 @@
  *
  * Copyright (c) 2003-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/include/libpq/ip.h,v 1.23 2010/01/02 16:58:04 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/ip.h,v 1.24 2010/02/26 02:01:24 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -25,9 +25,9 @@
 #define IS_AF_UNIX(fam) (0)
 #endif
 
-typedef void (*PgIfAddrCallback) (struct sockaddr *addr,
-								  struct sockaddr *netmask,
-								  void *cb_data);
+typedef void (*PgIfAddrCallback) (struct sockaddr * addr,
+											  struct sockaddr * netmask,
+											  void *cb_data);
 
 extern int pg_getaddrinfo_all(const char *hostname, const char *servname,
 				   const struct addrinfo * hintp,
@@ -51,6 +51,6 @@ extern void pg_promote_v4_to_v6_addr(struct sockaddr_storage * addr);
 extern void pg_promote_v4_to_v6_mask(struct sockaddr_storage * addr);
 #endif
 
-extern int pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data);
+extern int	pg_foreach_ifaddr(PgIfAddrCallback callback, void *cb_data);
 
 #endif   /* IP_H */
diff --git a/src/include/libpq/libpq.h b/src/include/libpq/libpq.h
index c8fa2778824c63dc579778c7b053bab7fd8232da..978d9a9acaa2b5b21aa8fc292e6097f503e2141e 100644
--- a/src/include/libpq/libpq.h
+++ b/src/include/libpq/libpq.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/libpq/libpq.h,v 1.74 2010/01/15 09:19:08 heikki Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/libpq.h,v 1.75 2010/02/26 02:01:24 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -45,7 +45,7 @@ typedef struct
  * prototypes for functions in pqcomm.c
  */
 extern int StreamServerPort(int family, char *hostName,
-		 unsigned short portNumber, char *unixSocketName, pgsocket ListenSocket[],
+	unsigned short portNumber, char *unixSocketName, pgsocket ListenSocket[],
 				 int MaxListen);
 extern int	StreamConnection(pgsocket server_fd, Port *port);
 extern void StreamClose(pgsocket sock);
diff --git a/src/include/libpq/pqsignal.h b/src/include/libpq/pqsignal.h
index db996ebe69f451efc1fefde2f318f01284f4daef..f6b5d5fb042296e9b7952ab963ab63fe8f27c9ad 100644
--- a/src/include/libpq/pqsignal.h
+++ b/src/include/libpq/pqsignal.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/libpq/pqsignal.h,v 1.40 2010/01/20 18:54:27 heikki Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/pqsignal.h,v 1.41 2010/02/26 02:01:24 momjian Exp $
  *
  * NOTES
  *	  This shouldn't be in libpq, but the monitor and some other
@@ -26,7 +26,7 @@ extern sigset_t UnBlockSig,
 			StartupBlockSig;
 
 #define PG_SETMASK(mask)	sigprocmask(SIG_SETMASK, mask, NULL)
-#else /* not HAVE_SIGPROCMASK */
+#else							/* not HAVE_SIGPROCMASK */
 extern int	UnBlockSig,
 			BlockSig,
 			StartupBlockSig;
@@ -40,7 +40,7 @@ int			pqsigsetmask(int mask);
 
 #define sigaddset(set, signum)	(*(set) |= (sigmask(signum)))
 #define sigdelset(set, signum)	(*(set) &= ~(sigmask(signum)))
-#endif /* not HAVE_SIGPROCMASK */
+#endif   /* not HAVE_SIGPROCMASK */
 
 typedef void (*pqsigfunc) (int);
 
diff --git a/src/include/mb/pg_wchar.h b/src/include/mb/pg_wchar.h
index 408bf3db2db58d4c1fc103f73e113b4544833295..817f9aaaaa1f1e3334ab3550f75fda7a889a34f7 100644
--- a/src/include/mb/pg_wchar.h
+++ b/src/include/mb/pg_wchar.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/mb/pg_wchar.h,v 1.93 2010/01/02 16:58:04 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/mb/pg_wchar.h,v 1.94 2010/02/26 02:01:25 momjian Exp $
  *
  *	NOTES
  *		This is used both by the backend and by libpq, but should not be
@@ -258,7 +258,7 @@ typedef struct pg_enc2name
 	char	   *name;
 	pg_enc		encoding;
 #ifdef WIN32
-	unsigned	codepage;	/* codepage for WIN32 */
+	unsigned	codepage;		/* codepage for WIN32 */
 #endif
 } pg_enc2name;
 
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index d7a80b11d29cf869a51038c8f5c21e1cbf2f732c..5d17acd6cddf679a365dc594c2daa8903d043630 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -13,7 +13,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.219 2010/02/20 21:24:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.220 2010/02/26 02:01:20 momjian Exp $
  *
  * NOTES
  *	  some of the information in this file should be moved to other files.
@@ -241,8 +241,8 @@ extern void PreventCommandIfReadOnly(const char *cmdname);
 extern void PreventCommandDuringRecovery(const char *cmdname);
 
 /* in utils/misc/guc.c */
-extern int trace_recovery_messages;
-int trace_recovery(int trace_level);
+extern int	trace_recovery_messages;
+int			trace_recovery(int trace_level);
 
 /*****************************************************************************
  *	  pdir.h --																 *
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 4d9dfc4c82c9896d5be2d9c328573eefdbac4a07..6455eeaa603a64a06c0677010b298824ff1ce60e 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.218 2010/02/12 17:33:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.219 2010/02/26 02:01:25 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -61,7 +61,7 @@ typedef struct IndexInfo
 	List	   *ii_ExpressionsState;	/* list of ExprState */
 	List	   *ii_Predicate;	/* list of Expr */
 	List	   *ii_PredicateState;		/* list of ExprState */
-	Oid		   *ii_ExclusionOps;		/* array with one entry per column */
+	Oid		   *ii_ExclusionOps;	/* array with one entry per column */
 	Oid		   *ii_ExclusionProcs;		/* array with one entry per column */
 	uint16	   *ii_ExclusionStrats;		/* array with one entry per column */
 	bool		ii_Unique;
@@ -353,8 +353,8 @@ typedef struct EState
 
 	/* Stuff used for firing triggers: */
 	List	   *es_trig_target_relations;		/* trigger-only ResultRelInfos */
-	TupleTableSlot *es_trig_tuple_slot;		/* for trigger output tuples */
-	TupleTableSlot *es_trig_oldtup_slot;	/* for trigger old tuples */
+	TupleTableSlot *es_trig_tuple_slot; /* for trigger output tuples */
+	TupleTableSlot *es_trig_oldtup_slot;		/* for trigger old tuples */
 
 	/* Parameter info: */
 	ParamListInfo es_param_list_info;	/* values of external params */
@@ -387,16 +387,16 @@ typedef struct EState
 
 	/*
 	 * These fields are for re-evaluating plan quals when an updated tuple is
-	 * substituted in READ COMMITTED mode.  es_epqTuple[] contains tuples
-	 * that scan plan nodes should return instead of whatever they'd normally
+	 * substituted in READ COMMITTED mode.	es_epqTuple[] contains tuples that
+	 * scan plan nodes should return instead of whatever they'd normally
 	 * return, or NULL if nothing to return; es_epqTupleSet[] is true if a
 	 * particular array entry is valid; and es_epqScanDone[] is state to
 	 * remember if the tuple has been returned already.  Arrays are of size
 	 * list_length(es_range_table) and are indexed by scan node scanrelid - 1.
 	 */
-	HeapTuple  *es_epqTuple;		/* array of EPQ substitute tuples */
-	bool	   *es_epqTupleSet;		/* true if EPQ tuple is provided */
-	bool	   *es_epqScanDone;		/* true if EPQ tuple has been fetched */
+	HeapTuple  *es_epqTuple;	/* array of EPQ substitute tuples */
+	bool	   *es_epqTupleSet; /* true if EPQ tuple is provided */
+	bool	   *es_epqScanDone; /* true if EPQ tuple has been fetched */
 } EState;
 
 
@@ -409,7 +409,7 @@ typedef struct EState
  * parent RTEs, which can be ignored at runtime).  See PlanRowMark for details
  * about most of the fields.
  *
- * es_rowMarks is a list of these structs.  Each LockRows node has its own
+ * es_rowMarks is a list of these structs.	Each LockRows node has its own
  * list, which is the subset of locks that it is supposed to enforce; note
  * that the per-node lists point to the same structs that are in the global
  * list.
@@ -419,7 +419,7 @@ typedef struct ExecRowMark
 	Relation	relation;		/* opened and suitably locked relation */
 	Index		rti;			/* its range table index */
 	Index		prti;			/* parent range table index, if child */
-	RowMarkType	markType;		/* see enum in nodes/plannodes.h */
+	RowMarkType markType;		/* see enum in nodes/plannodes.h */
 	bool		noWait;			/* NOWAIT option */
 	AttrNumber	ctidAttNo;		/* resno of ctid junk attribute, if any */
 	AttrNumber	toidAttNo;		/* resno of tableoid junk attribute, if any */
@@ -1024,13 +1024,13 @@ typedef struct ResultState
  */
 typedef struct ModifyTableState
 {
-	PlanState		ps;				/* its first field is NodeTag */
-	CmdType			operation;
-	PlanState	  **mt_plans;		/* subplans (one per target rel) */
-	int				mt_nplans;		/* number of plans in the array */
-	int				mt_whichplan;	/* which one is being executed (0..n-1) */
-	EPQState		mt_epqstate;	/* for evaluating EvalPlanQual rechecks */
-	bool			fireBSTriggers;	/* do we need to fire stmt triggers? */
+	PlanState	ps;				/* its first field is NodeTag */
+	CmdType		operation;
+	PlanState **mt_plans;		/* subplans (one per target rel) */
+	int			mt_nplans;		/* number of plans in the array */
+	int			mt_whichplan;	/* which one is being executed (0..n-1) */
+	EPQState	mt_epqstate;	/* for evaluating EvalPlanQual rechecks */
+	bool		fireBSTriggers; /* do we need to fire stmt triggers? */
 } ModifyTableState;
 
 /* ----------------
@@ -1600,15 +1600,16 @@ typedef struct WindowAggState
 	int64		frameheadpos;	/* current frame head position */
 	int64		frametailpos;	/* current frame tail position */
 	/* use struct pointer to avoid including windowapi.h here */
-	struct WindowObjectData *agg_winobj;	/* winobj for aggregate fetches */
+	struct WindowObjectData *agg_winobj;		/* winobj for aggregate
+												 * fetches */
 	int64		aggregatedbase; /* start row for current aggregates */
 	int64		aggregatedupto; /* rows before this one are aggregated */
 
 	int			frameOptions;	/* frame_clause options, see WindowDef */
 	ExprState  *startOffset;	/* expression for starting bound offset */
 	ExprState  *endOffset;		/* expression for ending bound offset */
-	Datum		startOffsetValue;	/* result of startOffset evaluation */
-	Datum		endOffsetValue;		/* result of endOffset evaluation */
+	Datum		startOffsetValue;		/* result of startOffset evaluation */
+	Datum		endOffsetValue; /* result of endOffset evaluation */
 
 	MemoryContext partcontext;	/* context for partition-lifespan data */
 	MemoryContext aggcontext;	/* context for each aggregate data */
@@ -1619,12 +1620,12 @@ typedef struct WindowAggState
 	bool		partition_spooled;		/* true if all tuples in current
 										 * partition have been spooled into
 										 * tuplestore */
-	bool		more_partitions;	/* true if there's more partitions after
-									 * this one */
-	bool		framehead_valid;	/* true if frameheadpos is known up to date
-									 * for current row */
-	bool		frametail_valid;	/* true if frametailpos is known up to date
-									 * for current row */
+	bool		more_partitions;/* true if there's more partitions after this
+								 * one */
+	bool		framehead_valid;/* true if frameheadpos is known up to date
+								 * for current row */
+	bool		frametail_valid;/* true if frametailpos is known up to date
+								 * for current row */
 
 	TupleTableSlot *first_part_slot;	/* first tuple of current or next
 										 * partition */
diff --git a/src/include/nodes/params.h b/src/include/nodes/params.h
index 12ef269e610608a7fea7b7a49c523423de321e4e..1219fe49e953c0dfecbc5d148f15407f6a70daff 100644
--- a/src/include/nodes/params.h
+++ b/src/include/nodes/params.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/nodes/params.h,v 1.41 2010/01/15 22:36:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/params.h,v 1.42 2010/02/26 02:01:25 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -29,13 +29,13 @@ struct ParseState;
  *	  Although parameter numbers are normally consecutive, we allow
  *	  ptype == InvalidOid to signal an unused array entry.
  *
- *	  pflags is a flags field.  Currently the only used bit is:
+ *	  pflags is a flags field.	Currently the only used bit is:
  *	  PARAM_FLAG_CONST signals the planner that it may treat this parameter
  *	  as a constant (i.e., generate a plan that works only for this value
  *	  of the parameter).
  *
  *	  There are two hook functions that can be associated with a ParamListInfo
- *	  array to support dynamic parameter handling.  First, if paramFetch
+ *	  array to support dynamic parameter handling.	First, if paramFetch
  *	  isn't null and the executor requires a value for an invalid parameter
  *	  (one with ptype == InvalidOid), the paramFetch hook is called to give
  *	  it a chance to fill in the parameter value.  Second, a parserSetup
@@ -68,7 +68,7 @@ typedef struct ParamListInfoData
 {
 	ParamFetchHook paramFetch;	/* parameter fetch hook */
 	void	   *paramFetchArg;
-	ParserSetupHook parserSetup; /* parser setup hook */
+	ParserSetupHook parserSetup;	/* parser setup hook */
 	void	   *parserSetupArg;
 	int			numParams;		/* number of ParamExternDatas following */
 	ParamExternData params[1];	/* VARIABLE LENGTH ARRAY */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 5a5c040c2518ed31ca9cfbce20d731721961d346..5325f7e924b20d0fae39d56b8c8aedbba6d025a9 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -13,7 +13,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.431 2010/02/23 22:51:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.432 2010/02/26 02:01:25 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -276,7 +276,7 @@ typedef struct FuncCall
 	NodeTag		type;
 	List	   *funcname;		/* qualified name of function */
 	List	   *args;			/* the arguments (list of exprs) */
-	List       *agg_order;      /* ORDER BY (list of SortBy) */
+	List	   *agg_order;		/* ORDER BY (list of SortBy) */
 	bool		agg_star;		/* argument was really '*' */
 	bool		agg_distinct;	/* arguments were labeled DISTINCT */
 	bool		func_variadic;	/* last argument was labeled VARIADIC */
@@ -459,7 +459,7 @@ typedef struct RangeFunction
  * in either "raw" form (an untransformed parse tree) or "cooked" form
  * (a post-parse-analysis, executable expression tree), depending on
  * how this ColumnDef node was created (by parsing, or by inheritance
- * from an existing relation).  We should never have both in the same node!
+ * from an existing relation).	We should never have both in the same node!
  *
  * The constraints list may contain a CONSTR_DEFAULT item in a raw
  * parsetree produced by gram.y, but transformCreateStmt will remove
@@ -493,12 +493,12 @@ typedef struct InhRelation
 
 typedef enum CreateStmtLikeOption
 {
-	CREATE_TABLE_LIKE_DEFAULTS		= 1 << 0,
-	CREATE_TABLE_LIKE_CONSTRAINTS	= 1 << 1,
-	CREATE_TABLE_LIKE_INDEXES		= 1 << 2,
-	CREATE_TABLE_LIKE_STORAGE		= 1 << 3,
-	CREATE_TABLE_LIKE_COMMENTS		= 1 << 4,
-	CREATE_TABLE_LIKE_ALL			= 0x7FFFFFFF
+	CREATE_TABLE_LIKE_DEFAULTS = 1 << 0,
+	CREATE_TABLE_LIKE_CONSTRAINTS = 1 << 1,
+	CREATE_TABLE_LIKE_INDEXES = 1 << 2,
+	CREATE_TABLE_LIKE_STORAGE = 1 << 3,
+	CREATE_TABLE_LIKE_COMMENTS = 1 << 4,
+	CREATE_TABLE_LIKE_ALL = 0x7FFFFFFF
 } CreateStmtLikeOption;
 
 /*
@@ -1917,7 +1917,7 @@ typedef struct IndexStmt
 	List	   *indexParams;	/* a list of IndexElem */
 	List	   *options;		/* options from WITH clause */
 	Node	   *whereClause;	/* qualification (partial-index predicate) */
-	List	   *excludeOpNames;	/* exclusion operator names, or NIL if none */
+	List	   *excludeOpNames; /* exclusion operator names, or NIL if none */
 	bool		unique;			/* is index unique? */
 	bool		primary;		/* is index on primary key? */
 	bool		isconstraint;	/* is it from a CONSTRAINT clause? */
@@ -1998,7 +1998,7 @@ typedef struct InlineCodeBlock
 	NodeTag		type;
 	char	   *source_text;	/* source text of anonymous code block */
 	Oid			langOid;		/* OID of selected language */
-	bool        langIsTrusted;  /* trusted property of the language */
+	bool		langIsTrusted;	/* trusted property of the language */
 } InlineCodeBlock;
 
 /* ----------------------
@@ -2257,11 +2257,11 @@ typedef struct ClusterStmt
  */
 typedef enum VacuumOption
 {
-	VACOPT_VACUUM		= 1 << 0,	/* do VACUUM */
-	VACOPT_ANALYZE		= 1 << 1,	/* do ANALYZE */
-	VACOPT_VERBOSE		= 1 << 2,	/* print progress info */
-	VACOPT_FREEZE		= 1 << 3,	/* FREEZE option */
-	VACOPT_FULL			= 1 << 4	/* FULL (non-concurrent) vacuum */
+	VACOPT_VACUUM = 1 << 0,		/* do VACUUM */
+	VACOPT_ANALYZE = 1 << 1,	/* do ANALYZE */
+	VACOPT_VERBOSE = 1 << 2,	/* print progress info */
+	VACOPT_FREEZE = 1 << 3,		/* FREEZE option */
+	VACOPT_FULL = 1 << 4		/* FULL (non-concurrent) vacuum */
 } VacuumOption;
 
 typedef struct VacuumStmt
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index b6640cfab3385db4a60002ce466e3df56ef1085e..79876100d5ae6a40e5d10a49c9fb01dd73223f54 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.116 2010/02/12 17:33:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.117 2010/02/26 02:01:25 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -163,12 +163,12 @@ typedef struct Result
 typedef struct ModifyTable
 {
 	Plan		plan;
-	CmdType		operation;			/* INSERT, UPDATE, or DELETE */
+	CmdType		operation;		/* INSERT, UPDATE, or DELETE */
 	List	   *resultRelations;	/* integer list of RT indexes */
-	List	   *plans;				/* plan(s) producing source data */
-	List	   *returningLists;		/* per-target-table RETURNING tlists */
-	List	   *rowMarks;			/* PlanRowMarks (non-locking only) */
-	int			epqParam;			/* ID of Param for EvalPlanQual re-eval */
+	List	   *plans;			/* plan(s) producing source data */
+	List	   *returningLists; /* per-target-table RETURNING tlists */
+	List	   *rowMarks;		/* PlanRowMarks (non-locking only) */
+	int			epqParam;		/* ID of Param for EvalPlanQual re-eval */
 } ModifyTable;
 
 /* ----------------
@@ -345,7 +345,7 @@ typedef struct TidScan
  *
  * Note: subrtable is used just to carry the subquery rangetable from
  * createplan.c to setrefs.c; it should always be NIL by the time the
- * executor sees the plan.  Similarly for subrowmark.
+ * executor sees the plan.	Similarly for subrowmark.
  * ----------------
  */
 typedef struct SubqueryScan
@@ -678,7 +678,7 @@ typedef enum RowMarkType
  *	   plan-time representation of FOR UPDATE/SHARE clauses
  *
  * When doing UPDATE, DELETE, or SELECT FOR UPDATE/SHARE, we create a separate
- * PlanRowMark node for each non-target relation in the query.  Relations that
+ * PlanRowMark node for each non-target relation in the query.	Relations that
  * are not specified as FOR UPDATE/SHARE are marked ROW_MARK_REFERENCE (if
  * real tables) or ROW_MARK_COPY (if not).
  *
@@ -690,7 +690,7 @@ typedef enum RowMarkType
  * prti == parent's RT index, and can therefore be recognized as children by
  * the fact that prti != rti.
  *
- * The AttrNumbers are filled in during preprocess_targetlist.  We use
+ * The AttrNumbers are filled in during preprocess_targetlist.	We use
  * different subsets of them for plain relations, inheritance children,
  * and non-table relations.
  */
@@ -699,7 +699,7 @@ typedef struct PlanRowMark
 	NodeTag		type;
 	Index		rti;			/* range table index of markable relation */
 	Index		prti;			/* range table index of parent relation */
-	RowMarkType	markType;		/* see enum above */
+	RowMarkType markType;		/* see enum above */
 	bool		noWait;			/* NOWAIT option */
 	bool		isParent;		/* true if this is a "dummy" parent entry */
 	AttrNumber	ctidAttNo;		/* resno of ctid junk attribute, if any */
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 9cc8edd3c653c799d6f9e57a385398b13c094b1b..3fc3d3483bcca1bf37df6ad81ec9e4c639a2ee8f 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -10,7 +10,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/nodes/primnodes.h,v 1.155 2010/02/17 04:19:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/primnodes.h,v 1.156 2010/02/26 02:01:25 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -211,14 +211,14 @@ typedef struct Param
  * Aggref
  *
  * The aggregate's args list is a targetlist, ie, a list of TargetEntry nodes
- * (before Postgres 9.0 it was just bare expressions).  The non-resjunk TLEs
+ * (before Postgres 9.0 it was just bare expressions).	The non-resjunk TLEs
  * represent the aggregate's regular arguments (if any) and resjunk TLEs can
  * be added at the end to represent ORDER BY expressions that are not also
  * arguments.  As in a top-level Query, the TLEs can be marked with
  * ressortgroupref indexes to let them be referenced by SortGroupClause
  * entries in the aggorder and/or aggdistinct lists.  This represents ORDER BY
  * and DISTINCT operations to be applied to the aggregate input rows before
- * they are passed to the transition function.  The grammar only allows a
+ * they are passed to the transition function.	The grammar only allows a
  * simple "DISTINCT" specifier for the arguments, but we use the full
  * query-level representation to allow more code sharing.
  */
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index fd93dfcce347791936916317efac6016dcfed4e9..888005282aae1cff1086fa915fec57f634caa2a6 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.183 2010/01/05 21:54:00 rhaas Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.184 2010/02/26 02:01:25 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -145,12 +145,12 @@ typedef struct PlannerInfo
 	/*
 	 * When doing a dynamic-programming-style join search, join_rel_level[k]
 	 * is a list of all join-relation RelOptInfos of level k, and
-	 * join_cur_level is the current level.  New join-relation RelOptInfos
-	 * are automatically added to the join_rel_level[join_cur_level] list.
+	 * join_cur_level is the current level.  New join-relation RelOptInfos are
+	 * automatically added to the join_rel_level[join_cur_level] list.
 	 * join_rel_level is NULL if not in use.
 	 */
-	List	  **join_rel_level;	/* lists of join-relation RelOptInfos */
-	int			join_cur_level;	/* index of list being extended */
+	List	  **join_rel_level; /* lists of join-relation RelOptInfos */
+	int			join_cur_level; /* index of list being extended */
 
 	List	   *resultRelations;	/* integer list of RT indexes, or NIL */
 
@@ -876,8 +876,8 @@ typedef struct MergePath
 {
 	JoinPath	jpath;
 	List	   *path_mergeclauses;		/* join clauses to be used for merge */
-	List	   *outersortkeys;			/* keys for explicit sort, if any */
-	List	   *innersortkeys;			/* keys for explicit sort, if any */
+	List	   *outersortkeys;	/* keys for explicit sort, if any */
+	List	   *innersortkeys;	/* keys for explicit sort, if any */
 	bool		materialize_inner;		/* add Materialize to inner? */
 } MergePath;
 
diff --git a/src/include/optimizer/clauses.h b/src/include/optimizer/clauses.h
index 124c6a749cfd6b535992f6c50db5d933b8099b12..566300ab47523d734536b373b2099468c84eeb07 100644
--- a/src/include/optimizer/clauses.h
+++ b/src/include/optimizer/clauses.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/optimizer/clauses.h,v 1.100 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/clauses.h,v 1.101 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -23,7 +23,7 @@
 typedef struct
 {
 	int			numAggs;		/* total number of aggregate calls */
-	int			numOrderedAggs; 	/* number that use DISTINCT or ORDER BY */
+	int			numOrderedAggs; /* number that use DISTINCT or ORDER BY */
 	Size		transitionSpace;	/* for pass-by-ref transition data */
 } AggClauseCounts;
 
diff --git a/src/include/optimizer/geqo.h b/src/include/optimizer/geqo.h
index a12b0efc1ee8343e65262962ca9f426361cd77d8..128818d5a651aea950c957a9ed356a1b2b5bf083 100644
--- a/src/include/optimizer/geqo.h
+++ b/src/include/optimizer/geqo.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/optimizer/geqo.h,v 1.46 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/geqo.h,v 1.47 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -72,7 +72,7 @@ extern double Geqo_seed;		/* 0 .. 1 */
  */
 typedef struct
 {
-	List	   *initial_rels;			/* the base relations we are joining */
+	List	   *initial_rels;	/* the base relations we are joining */
 	unsigned short random_state[3];		/* state for erand48() */
 } GeqoPrivateData;
 
diff --git a/src/include/optimizer/geqo_recombination.h b/src/include/optimizer/geqo_recombination.h
index 733ae1e24f17495e18c963c7f97459e0f3a64df1..9b36db6902df6a9745c4fd392499544cebeb5616 100644
--- a/src/include/optimizer/geqo_recombination.h
+++ b/src/include/optimizer/geqo_recombination.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/optimizer/geqo_recombination.h,v 1.22 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/geqo_recombination.h,v 1.23 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -43,10 +43,10 @@ extern Edge *alloc_edge_table(PlannerInfo *root, int num_gene);
 extern void free_edge_table(PlannerInfo *root, Edge *edge_table);
 
 extern float gimme_edge_table(PlannerInfo *root, Gene *tour1, Gene *tour2,
-							  int num_gene, Edge *edge_table);
+				 int num_gene, Edge *edge_table);
 
-extern int	gimme_tour(PlannerInfo *root, Edge *edge_table, Gene *new_gene,
-					   int num_gene);
+extern int gimme_tour(PlannerInfo *root, Edge *edge_table, Gene *new_gene,
+		   int num_gene);
 
 
 /* partially matched crossover [PMX] */
@@ -55,8 +55,8 @@ extern int	gimme_tour(PlannerInfo *root, Edge *edge_table, Gene *new_gene,
 #define MOM 0					/* indicator for gene from mom */
 
 extern void pmx(PlannerInfo *root,
-				Gene *tour1, Gene *tour2,
-				Gene *offspring, int num_gene);
+	Gene *tour1, Gene *tour2,
+	Gene *offspring, int num_gene);
 
 
 typedef struct City
@@ -71,19 +71,19 @@ extern City *alloc_city_table(PlannerInfo *root, int num_gene);
 extern void free_city_table(PlannerInfo *root, City *city_table);
 
 /* cycle crossover [CX] */
-extern int	cx(PlannerInfo *root, Gene *tour1, Gene *tour2,
-			   Gene *offspring, int num_gene, City *city_table);
+extern int cx(PlannerInfo *root, Gene *tour1, Gene *tour2,
+   Gene *offspring, int num_gene, City *city_table);
 
 /* position crossover [PX] */
 extern void px(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring,
-			   int num_gene, City *city_table);
+   int num_gene, City *city_table);
 
 /* order crossover [OX1] according to Davis */
 extern void ox1(PlannerInfo *root, Gene *mom, Gene *dad, Gene *offspring,
-				int num_gene, City *city_table);
+	int num_gene, City *city_table);
 
 /* order crossover [OX2] according to Syswerda */
 extern void ox2(PlannerInfo *root, Gene *mom, Gene *dad, Gene *offspring,
-				int num_gene, City *city_table);
+	int num_gene, City *city_table);
 
 #endif   /* GEQO_RECOMBINATION_H */
diff --git a/src/include/optimizer/geqo_selection.h b/src/include/optimizer/geqo_selection.h
index 0daa2e419c550e4c20f4ab318d7f5abf468fa7d9..711107dfd1c4676dc7fc86b85d02ddd04ed26bc5 100644
--- a/src/include/optimizer/geqo_selection.h
+++ b/src/include/optimizer/geqo_selection.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/optimizer/geqo_selection.h,v 1.23 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/geqo_selection.h,v 1.24 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -27,7 +27,7 @@
 
 
 extern void geqo_selection(PlannerInfo *root,
-						   Chromosome *momma, Chromosome *daddy,
-						   Pool *pool, double bias);
+			   Chromosome *momma, Chromosome *daddy,
+			   Pool *pool, double bias);
 
 #endif   /* GEQO_SELECTION_H */
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index ac36f9aaaed643e37785eb5ae60e8eb2d67e445f..2255f147bd7d0af02bae1c76f8b1962bb5253a6d 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/optimizer/pathnode.h,v 1.82 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/pathnode.h,v 1.83 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -52,7 +52,7 @@ extern MaterialPath *create_material_path(RelOptInfo *rel, Path *subpath);
 extern UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel,
 				   Path *subpath, SpecialJoinInfo *sjinfo);
 extern NoOpPath *create_noop_path(PlannerInfo *root, RelOptInfo *rel,
-								  Path *subpath);
+				 Path *subpath);
 extern Path *create_subqueryscan_path(RelOptInfo *rel, List *pathkeys);
 extern Path *create_functionscan_path(PlannerInfo *root, RelOptInfo *rel);
 extern Path *create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel);
diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h
index ed00f86d278c5af9fff9f1be1e2260f5570da626..62742f5779225ff3db04d14357e5a6011f183e74 100644
--- a/src/include/optimizer/planmain.h
+++ b/src/include/optimizer/planmain.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/optimizer/planmain.h,v 1.125 2010/02/12 17:33:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/planmain.h,v 1.126 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -79,8 +79,8 @@ extern SetOp *make_setop(SetOpCmd cmd, SetOpStrategy strategy, Plan *lefttree,
 extern Result *make_result(PlannerInfo *root, List *tlist,
 			Node *resconstantqual, Plan *subplan);
 extern ModifyTable *make_modifytable(CmdType operation, List *resultRelations,
-									 List *subplans, List *returningLists,
-									 List *rowMarks, int epqParam);
+				 List *subplans, List *returningLists,
+				 List *rowMarks, int epqParam);
 extern bool is_projection_capable_plan(Plan *plan);
 
 /*
diff --git a/src/include/parser/analyze.h b/src/include/parser/analyze.h
index 5f8f27fc0cdc98809f5b6fa0a0abe7196c7ed43c..86a031223700d27afdc7a0e041eefee00e7c05da 100644
--- a/src/include/parser/analyze.h
+++ b/src/include/parser/analyze.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/parser/analyze.h,v 1.44 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/analyze.h,v 1.45 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -23,8 +23,8 @@ extern Query *parse_analyze_varparams(Node *parseTree, const char *sourceText,
 						Oid **paramTypes, int *numParams);
 
 extern Query *parse_sub_analyze(Node *parseTree, ParseState *parentParseState,
-								CommonTableExpr *parentCTE,
-								bool locked_from_parent);
+				  CommonTableExpr *parentCTE,
+				  bool locked_from_parent);
 extern Query *transformStmt(ParseState *pstate, Node *parseTree);
 
 extern bool analyze_requires_snapshot(Node *parseTree);
diff --git a/src/include/parser/gramparse.h b/src/include/parser/gramparse.h
index 55061d5674b35fb6a52e667fdabd4903a1ee3778..6ca0a4fbc60ae4333727df5d5e4a20f62ab4fb00 100644
--- a/src/include/parser/gramparse.h
+++ b/src/include/parser/gramparse.h
@@ -11,7 +11,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/parser/gramparse.h,v 1.51 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/gramparse.h,v 1.52 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -29,7 +29,7 @@
 #include "parser/gram.h"
 
 /*
- * The YY_EXTRA data that a flex scanner allows us to pass around.  Private
+ * The YY_EXTRA data that a flex scanner allows us to pass around.	Private
  * state needed for raw parsing/lexing goes here.
  */
 typedef struct base_yy_extra_type
@@ -42,10 +42,10 @@ typedef struct base_yy_extra_type
 	/*
 	 * State variables for base_yylex().
 	 */
-	bool		have_lookahead;		/* is lookahead info valid? */
+	bool		have_lookahead; /* is lookahead info valid? */
 	int			lookahead_token;	/* one-token lookahead */
-	core_YYSTYPE lookahead_yylval;	/* yylval for lookahead token */
-	YYLTYPE		lookahead_yylloc;	/* yylloc for lookahead token */
+	core_YYSTYPE lookahead_yylval;		/* yylval for lookahead token */
+	YYLTYPE		lookahead_yylloc;		/* yylloc for lookahead token */
 
 	/*
 	 * State variables that belong to the grammar.
@@ -63,8 +63,8 @@ typedef struct base_yy_extra_type
 
 
 /* from parser.c */
-extern int	base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp,
-					   core_yyscan_t yyscanner);
+extern int base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp,
+		   core_yyscan_t yyscanner);
 
 /* from gram.y */
 extern void parser_init(base_yy_extra_type *yyext);
diff --git a/src/include/parser/keywords.h b/src/include/parser/keywords.h
index 94eed4003bdbab95185add58b18df6fddbecb1f7..a139027850cf34e436309f181220fd599417fbc8 100644
--- a/src/include/parser/keywords.h
+++ b/src/include/parser/keywords.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/parser/keywords.h,v 1.28 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/keywords.h,v 1.29 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -29,10 +29,10 @@ typedef struct ScanKeyword
 } ScanKeyword;
 
 extern const ScanKeyword ScanKeywords[];
-extern const int	NumScanKeywords;
+extern const int NumScanKeywords;
 
 extern const ScanKeyword *ScanKeywordLookup(const char *text,
-											const ScanKeyword *keywords,
-											int num_keywords);
+				  const ScanKeyword *keywords,
+				  int num_keywords);
 
 #endif   /* KEYWORDS_H */
diff --git a/src/include/parser/parse_agg.h b/src/include/parser/parse_agg.h
index dd884e5d435d48ef36df452e1c7058ca6f4aecd4..8da37ef3077c3e36fdf52da671e01d830b8283c6 100644
--- a/src/include/parser/parse_agg.h
+++ b/src/include/parser/parse_agg.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/parser/parse_agg.h,v 1.41 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_agg.h,v 1.42 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -16,7 +16,7 @@
 #include "parser/parse_node.h"
 
 extern void transformAggregateCall(ParseState *pstate, Aggref *agg,
-								   bool agg_distinct);
+					   bool agg_distinct);
 extern void transformWindowFuncCall(ParseState *pstate, WindowFunc *wfunc,
 						WindowDef *windef);
 
diff --git a/src/include/parser/parse_cte.h b/src/include/parser/parse_cte.h
index 022e4e3264fdcd9ecaab16820ae4b6a1c08d9cc3..07efce78dcd11f1124e3713cf3bff762c6544480 100644
--- a/src/include/parser/parse_cte.h
+++ b/src/include/parser/parse_cte.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/parser/parse_cte.h,v 1.4 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_cte.h,v 1.5 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -19,6 +19,6 @@
 extern List *transformWithClause(ParseState *pstate, WithClause *withClause);
 
 extern void analyzeCTETargetList(ParseState *pstate, CommonTableExpr *cte,
-								 List *tlist);
+					 List *tlist);
 
 #endif   /* PARSE_CTE_H */
diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h
index 2b56cde112ace170d79faabe59efa4b9b751e322..f21628a9b325c93e0df9b83847389f8e3b75e059 100644
--- a/src/include/parser/parse_node.h
+++ b/src/include/parser/parse_node.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/parser/parse_node.h,v 1.67 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_node.h,v 1.68 2010/02/26 02:01:26 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -23,12 +23,12 @@
  */
 typedef struct ParseState ParseState;
 
-typedef Node * (*PreParseColumnRefHook) (ParseState *pstate, ColumnRef *cref);
-typedef Node * (*PostParseColumnRefHook) (ParseState *pstate, ColumnRef *cref, Node *var);
-typedef Node * (*ParseParamRefHook) (ParseState *pstate, ParamRef *pref);
-typedef Node * (*CoerceParamHook) (ParseState *pstate, Param *param,
-								   Oid targetTypeId, int32 targetTypeMod,
-								   int location);
+typedef Node *(*PreParseColumnRefHook) (ParseState *pstate, ColumnRef *cref);
+typedef Node *(*PostParseColumnRefHook) (ParseState *pstate, ColumnRef *cref, Node *var);
+typedef Node *(*ParseParamRefHook) (ParseState *pstate, ParamRef *pref);
+typedef Node *(*CoerceParamHook) (ParseState *pstate, Param *param,
+									   Oid targetTypeId, int32 targetTypeMod,
+											  int location);
 
 
 /*
@@ -117,7 +117,7 @@ struct ParseState
 	PostParseColumnRefHook p_post_columnref_hook;
 	ParseParamRefHook p_paramref_hook;
 	CoerceParamHook p_coerce_param_hook;
-	void	   *p_ref_hook_state;	/* common passthrough link for above */
+	void	   *p_ref_hook_state;		/* common passthrough link for above */
 };
 
 /* Support for parser_errposition_callback function */
diff --git a/src/include/parser/parse_param.h b/src/include/parser/parse_param.h
index fc0223ac0a6d77f8de5a97b9a87b9641e7cdd9b8..d8244f4c03d95b0d30297b964bd65c2fd724e0c3 100644
--- a/src/include/parser/parse_param.h
+++ b/src/include/parser/parse_param.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/parser/parse_param.h,v 1.2 2010/01/02 16:58:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_param.h,v 1.3 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -16,9 +16,9 @@
 #include "parser/parse_node.h"
 
 extern void parse_fixed_parameters(ParseState *pstate,
-								   Oid *paramTypes, int numParams);
+					   Oid *paramTypes, int numParams);
 extern void parse_variable_parameters(ParseState *pstate,
-									  Oid **paramTypes, int *numParams);
+						  Oid **paramTypes, int *numParams);
 extern void check_variable_parameters(ParseState *pstate, Query *query);
 
 #endif   /* PARSE_PARAM_H */
diff --git a/src/include/parser/scanner.h b/src/include/parser/scanner.h
index 60a6fc1be10a924405f35c4f0135f38c1265e73d..b076b67f8ecf54a16ac4eb3eebaeee4fa02d1a96 100644
--- a/src/include/parser/scanner.h
+++ b/src/include/parser/scanner.h
@@ -4,14 +4,14 @@
  *		API for the core scanner (flex machine)
  *
  * The core scanner is also used by PL/pgsql, so we provide a public API
- * for it.  However, the rest of the backend is only expected to use the
+ * for it.	However, the rest of the backend is only expected to use the
  * higher-level API provided by parser.h.
  *
  *
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/parser/scanner.h,v 1.2 2010/01/02 16:58:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/scanner.h,v 1.3 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -58,7 +58,7 @@ typedef union core_YYSTYPE
 
 /*
  * The YY_EXTRA data that a flex scanner allows us to pass around.
- * Private state needed by the core scanner goes here.  Note that the actual
+ * Private state needed by the core scanner goes here.	Note that the actual
  * yy_extra struct may be larger and have this as its first component, thus
  * allowing the calling parser to keep some fields of its own in YY_EXTRA.
  */
@@ -78,12 +78,11 @@ typedef struct core_yy_extra_type
 	int			num_keywords;
 
 	/*
-	 * literalbuf is used to accumulate literal values when multiple rules
-	 * are needed to parse a single literal.  Call startlit() to reset buffer
-	 * to empty, addlit() to add text.  NOTE: the string in literalbuf is
-	 * NOT necessarily null-terminated, but there always IS room to add a
-	 * trailing null at offset literallen.  We store a null only when we
-	 * need it.
+	 * literalbuf is used to accumulate literal values when multiple rules are
+	 * needed to parse a single literal.  Call startlit() to reset buffer to
+	 * empty, addlit() to add text.  NOTE: the string in literalbuf is NOT
+	 * necessarily null-terminated, but there always IS room to add a trailing
+	 * null at offset literallen.  We store a null only when we need it.
 	 */
 	char	   *literalbuf;		/* palloc'd expandable buffer */
 	int			literallen;		/* actual current string length */
@@ -108,12 +107,12 @@ typedef void *core_yyscan_t;
 
 /* Entry points in parser/scan.l */
 extern core_yyscan_t scanner_init(const char *str,
-								  core_yy_extra_type *yyext,
-								  const ScanKeyword *keywords,
-								  int num_keywords);
+			 core_yy_extra_type *yyext,
+			 const ScanKeyword *keywords,
+			 int num_keywords);
 extern void scanner_finish(core_yyscan_t yyscanner);
-extern int	core_yylex(core_YYSTYPE *lvalp, YYLTYPE *llocp,
-					   core_yyscan_t yyscanner);
+extern int core_yylex(core_YYSTYPE *lvalp, YYLTYPE *llocp,
+		   core_yyscan_t yyscanner);
 extern int	scanner_errposition(int location, core_yyscan_t yyscanner);
 extern void scanner_yyerror(const char *message, core_yyscan_t yyscanner);
 
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 6545ddf8588a996f93f8565a5b345a036c135494..34577fa55b8e8c6850329ca14466c8383ebcf889 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -5,7 +5,7 @@
  *
  *	Copyright (c) 2001-2010, PostgreSQL Global Development Group
  *
- *	$PostgreSQL: pgsql/src/include/pgstat.h,v 1.88 2010/01/28 14:25:41 mha Exp $
+ *	$PostgreSQL: pgsql/src/include/pgstat.h,v 1.89 2010/02/26 02:01:20 momjian Exp $
  * ----------
  */
 #ifndef PGSTAT_H
@@ -98,7 +98,7 @@ typedef struct PgStat_TableCounts
 /* Possible targets for resetting cluster-wide shared values */
 typedef enum PgStat_Shared_Reset_Target
 {
-    RESET_BGWRITER
+	RESET_BGWRITER
 } PgStat_Shared_Reset_Target;
 
 /* Possible object types for resetting single counters */
@@ -275,7 +275,7 @@ typedef struct PgStat_MsgResetcounter
 } PgStat_MsgResetcounter;
 
 /* ----------
- * PgStat_MsgResetsharedcounter	Sent by the backend to tell the collector
+ * PgStat_MsgResetsharedcounter Sent by the backend to tell the collector
  *								to reset a shared counter
  * ----------
  */
@@ -286,7 +286,7 @@ typedef struct PgStat_MsgResetsharedcounter
 } PgStat_MsgResetsharedcounter;
 
 /* ----------
- * PgStat_MsgResetsinglecounter	Sent by the backend to tell the collector
+ * PgStat_MsgResetsinglecounter Sent by the backend to tell the collector
  *								to reset a single counter
  * ----------
  */
@@ -606,7 +606,7 @@ typedef struct PgBackendStatus
 	bool		st_waiting;
 
 	/* application name; MUST be null-terminated */
-	char       *st_appname;
+	char	   *st_appname;
 
 	/* current command string; MUST be null-terminated */
 	char	   *st_activity;
diff --git a/src/include/port.h b/src/include/port.h
index 9fa5cce555a58a7c3548b6d8a01deda0c8039256..b9197cdb9c604c35059d1e50c1391e990bbafc1b 100644
--- a/src/include/port.h
+++ b/src/include/port.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/port.h,v 1.130 2010/01/31 17:35:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/port.h,v 1.131 2010/02/26 02:01:20 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -20,9 +20,11 @@
 /* socket has a different definition on WIN32 */
 #ifndef WIN32
 typedef int pgsocket;
+
 #define PGINVALID_SOCKET (-1)
 #else
 typedef SOCKET pgsocket;
+
 #define PGINVALID_SOCKET INVALID_SOCKET
 #endif
 
diff --git a/src/include/postmaster/autovacuum.h b/src/include/postmaster/autovacuum.h
index b5807af2031c8a3bfd2c8d6664b4f628ae764c44..f67a2757515e287fc2c1fffdd68be8ad4e642f30 100644
--- a/src/include/postmaster/autovacuum.h
+++ b/src/include/postmaster/autovacuum.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/postmaster/autovacuum.h,v 1.17 2010/01/02 16:58:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/postmaster/autovacuum.h,v 1.18 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -37,6 +37,7 @@ extern int	Log_autovacuum_min_duration;
 extern bool AutoVacuumingActive(void);
 extern bool IsAutoVacuumLauncherProcess(void);
 extern bool IsAutoVacuumWorkerProcess(void);
+
 #define IsAnyAutoVacuumProcess() \
 	(IsAutoVacuumLauncherProcess() || IsAutoVacuumWorkerProcess())
 
diff --git a/src/include/replication/walreceiver.h b/src/include/replication/walreceiver.h
index 56af60560e7548853e160f1b3463b4f25a9a1ef2..4300b80b2781ddc78c7553c7bbc6ac632bc12bed 100644
--- a/src/include/replication/walreceiver.h
+++ b/src/include/replication/walreceiver.h
@@ -5,7 +5,7 @@
  *
  * Portions Copyright (c) 2010-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/include/replication/walreceiver.h,v 1.7 2010/02/19 10:51:04 heikki Exp $
+ * $PostgreSQL: pgsql/src/include/replication/walreceiver.h,v 1.8 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -29,37 +29,37 @@ extern bool am_walreceiver;
  */
 typedef enum
 {
-	WALRCV_STOPPED,		/* stopped and mustn't start up again */
-	WALRCV_STARTING,	/* launched, but the process hasn't initialized yet */
-	WALRCV_RUNNING,		/* walreceiver is running */
-	WALRCV_STOPPING		/* requested to stop, but still running */
+	WALRCV_STOPPED,				/* stopped and mustn't start up again */
+	WALRCV_STARTING,			/* launched, but the process hasn't
+								 * initialized yet */
+	WALRCV_RUNNING,				/* walreceiver is running */
+	WALRCV_STOPPING				/* requested to stop, but still running */
 } WalRcvState;
 
 /* Shared memory area for management of walreceiver process */
 typedef struct
 {
 	/*
-	 * connection string; is used for walreceiver to connect with
-	 * the primary.
+	 * connection string; is used for walreceiver to connect with the primary.
 	 */
-	char	conninfo[MAXCONNINFO];
+	char		conninfo[MAXCONNINFO];
 
 	/*
 	 * PID of currently active walreceiver process, and the current state.
 	 */
-	pid_t	pid;
+	pid_t		pid;
 	WalRcvState walRcvState;
-	pg_time_t startTime;
+	pg_time_t	startTime;
 
 	/*
 	 * receivedUpto-1 is the last byte position that has been already
-	 * received. When startup process starts the walreceiver, it sets this
-	 * to the point where it wants the streaming to begin. After that,
+	 * received. When startup process starts the walreceiver, it sets this to
+	 * the point where it wants the streaming to begin. After that,
 	 * walreceiver updates this whenever it flushes the received WAL.
 	 */
 	XLogRecPtr	receivedUpto;
 
-	slock_t	mutex;		/* locks shared variables shown above */
+	slock_t		mutex;			/* locks shared variables shown above */
 } WalRcvData;
 
 extern WalRcvData *WalRcv;
@@ -69,7 +69,7 @@ typedef bool (*walrcv_connect_type) (char *conninfo, XLogRecPtr startpoint);
 extern PGDLLIMPORT walrcv_connect_type walrcv_connect;
 
 typedef bool (*walrcv_receive_type) (int timeout, unsigned char *type,
-									 char **buffer, int *len);
+												 char **buffer, int *len);
 extern PGDLLIMPORT walrcv_receive_type walrcv_receive;
 
 typedef void (*walrcv_disconnect_type) (void);
diff --git a/src/include/replication/walsender.h b/src/include/replication/walsender.h
index c9bfd12e8bc9d2e3430066fd8bee36bfd3eeadd7..abb8312ecf78eb07c5e3302eb0d0ae5149830523 100644
--- a/src/include/replication/walsender.h
+++ b/src/include/replication/walsender.h
@@ -5,7 +5,7 @@
  *
  * Portions Copyright (c) 2010-2010, PostgreSQL Global Development Group
  *
- * $PostgreSQL: pgsql/src/include/replication/walsender.h,v 1.1 2010/01/15 09:19:09 heikki Exp $
+ * $PostgreSQL: pgsql/src/include/replication/walsender.h,v 1.2 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -20,30 +20,30 @@
  */
 typedef struct WalSnd
 {
-	pid_t	pid;		/* this walsender's process id, or 0 */
-	XLogRecPtr sentPtr;	/* WAL has been sent up to this point */
+	pid_t		pid;			/* this walsender's process id, or 0 */
+	XLogRecPtr	sentPtr;		/* WAL has been sent up to this point */
 
-	slock_t	mutex;		/* locks shared variables shown above */
+	slock_t		mutex;			/* locks shared variables shown above */
 } WalSnd;
 
 /* There is one WalSndCtl struct for the whole database cluster */
 typedef struct
 {
-	WalSnd	walsnds[1];		/* VARIABLE LENGTH ARRAY */
+	WalSnd		walsnds[1];		/* VARIABLE LENGTH ARRAY */
 } WalSndCtlData;
 
 extern WalSndCtlData *WalSndCtl;
 
 /* global state */
-extern bool	am_walsender;
+extern bool am_walsender;
 
 /* user-settable parameters */
 extern int	WalSndDelay;
 
-extern int WalSenderMain(void);
+extern int	WalSenderMain(void);
 extern void WalSndSignals(void);
 extern Size WalSndShmemSize(void);
 extern void WalSndShmemInit(void);
 extern XLogRecPtr GetOldestWALSendPointer(void);
 
-#endif	/* _WALSENDER_H */
+#endif   /* _WALSENDER_H */
diff --git a/src/include/rewrite/rewriteManip.h b/src/include/rewrite/rewriteManip.h
index 3bd0a02869b1a33dfb6c51778e495e15d46552a4..0e48129d8289254f9b99b148d31e804d556ab0e9 100644
--- a/src/include/rewrite/rewriteManip.h
+++ b/src/include/rewrite/rewriteManip.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/rewrite/rewriteManip.h,v 1.52 2010/01/02 16:58:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/rewrite/rewriteManip.h,v 1.53 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -19,16 +19,16 @@
 
 typedef struct replace_rte_variables_context replace_rte_variables_context;
 
-typedef Node * (*replace_rte_variables_callback) (Var *var,
-									replace_rte_variables_context *context);
+typedef Node *(*replace_rte_variables_callback) (Var *var,
+									 replace_rte_variables_context *context);
 
 struct replace_rte_variables_context
 {
 	replace_rte_variables_callback callback;	/* callback function */
-	void	   *callback_arg;		/* context data for callback function */
-	int			target_varno;		/* RTE index to search for */
-	int			sublevels_up;		/* (current) nesting depth */
-	bool		inserted_sublink;	/* have we inserted a SubLink? */
+	void	   *callback_arg;	/* context data for callback function */
+	int			target_varno;	/* RTE index to search for */
+	int			sublevels_up;	/* (current) nesting depth */
+	bool		inserted_sublink;		/* have we inserted a SubLink? */
 };
 
 
diff --git a/src/include/storage/fd.h b/src/include/storage/fd.h
index 9dd240e34cf2a5b2dffca32de049ec5795b96987..5798ee385651938ca2a7c49867931c12436aa3cf 100644
--- a/src/include/storage/fd.h
+++ b/src/include/storage/fd.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/storage/fd.h,v 1.67 2010/02/15 00:50:57 stark Exp $
+ * $PostgreSQL: pgsql/src/include/storage/fd.h,v 1.68 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -98,7 +98,7 @@ extern int	pg_fsync(int fd);
 extern int	pg_fsync_no_writethrough(int fd);
 extern int	pg_fsync_writethrough(int fd);
 extern int	pg_fdatasync(int fd);
-extern int  pg_flush_data(int fd, off_t offset, off_t amount);
+extern int	pg_flush_data(int fd, off_t offset, off_t amount);
 
 /* Filename components for OpenTemporaryFile */
 #define PG_TEMP_FILES_DIR "pgsql_tmp"
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index 052dc16b0d0caf3a4fd17393af953bfee62cff17..8d3a6012b829a3e6b065d5f49532e63e2b618512 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/storage/lock.h,v 1.118 2010/01/02 16:58:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/lock.h,v 1.119 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -478,10 +478,10 @@ extern LockAcquireResult LockAcquire(const LOCKTAG *locktag,
 			bool sessionLock,
 			bool dontWait);
 extern LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag,
-			LOCKMODE lockmode,
-			bool sessionLock,
-			bool dontWait,
-			bool report_memory_error);
+					LOCKMODE lockmode,
+					bool sessionLock,
+					bool dontWait,
+					bool report_memory_error);
 extern bool LockRelease(const LOCKTAG *locktag,
 			LOCKMODE lockmode, bool sessionLock);
 extern void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks);
@@ -504,9 +504,9 @@ extern void ReportLockTableError(bool report);
 
 typedef struct xl_standby_lock
 {
-	TransactionId	xid;	/* xid of holder of AccessExclusiveLock */
-	Oid		dbOid;
-	Oid		relOid;
+	TransactionId xid;			/* xid of holder of AccessExclusiveLock */
+	Oid			dbOid;
+	Oid			relOid;
 } xl_standby_lock;
 
 extern xl_standby_lock *GetRunningTransactionLocks(int *nlocks);
@@ -519,7 +519,7 @@ extern void lock_twophase_postcommit(TransactionId xid, uint16 info,
 extern void lock_twophase_postabort(TransactionId xid, uint16 info,
 						void *recdata, uint32 len);
 extern void lock_twophase_standby_recover(TransactionId xid, uint16 info,
-					  void *recdata, uint32 len);
+							  void *recdata, uint32 len);
 
 extern DeadLockState DeadLockCheck(PGPROC *proc);
 extern PGPROC *GetBlockingAutoVacuumPgproc(void);
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index 2ace9585009c06e1a469fc127d2ae91bb279f271..0322007da902142a8a0da253efa7b5720906115b 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/storage/lwlock.h,v 1.45 2010/02/16 22:34:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/lwlock.h,v 1.46 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -68,8 +68,8 @@ typedef enum LWLockId
 	AutovacuumScheduleLock,
 	SyncScanLock,
 	RelationMappingLock,
- 	AsyncCtlLock,
- 	AsyncQueueLock,
+	AsyncCtlLock,
+	AsyncQueueLock,
 	/* Individual lock IDs end here */
 	FirstBufMappingLock,
 	FirstLockMgrLock = FirstBufMappingLock + NUM_BUFFER_PARTITIONS,
diff --git a/src/include/storage/pmsignal.h b/src/include/storage/pmsignal.h
index c49c2f5fd2bbeb36576f6c3f3c749b7c9ed35077..b1eed1bd56f4febf4a61a1557b22792f0a636ef7 100644
--- a/src/include/storage/pmsignal.h
+++ b/src/include/storage/pmsignal.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/storage/pmsignal.h,v 1.29 2010/01/27 15:27:51 heikki Exp $
+ * $PostgreSQL: pgsql/src/include/storage/pmsignal.h,v 1.30 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -29,7 +29,7 @@ typedef enum
 	PMSIGNAL_ROTATE_LOGFILE,	/* send SIGUSR1 to syslogger to rotate logfile */
 	PMSIGNAL_START_AUTOVAC_LAUNCHER,	/* start an autovacuum launcher */
 	PMSIGNAL_START_AUTOVAC_WORKER,		/* start an autovacuum worker */
-	PMSIGNAL_START_WALRECEIVER,			/* start a walreceiver */
+	PMSIGNAL_START_WALRECEIVER, /* start a walreceiver */
 
 	NUM_PMSIGNALS				/* Must be last value of enum! */
 } PMSignalReason;
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index b1fc78d3edeed6d7423e57cb72d56faf9c1814de..1e91e8b766db0ac4eda429396279374f6bd99170 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.120 2010/02/13 01:32:20 sriggs Exp $
+ * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.121 2010/02/26 02:01:27 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -180,7 +180,7 @@ extern void InitAuxiliaryProcess(void);
 
 extern void PublishStartupProcessInformation(void);
 extern void SetStartupBufferPinWaitBufId(int bufid);
-extern int GetStartupBufferPinWaitBufId(void);
+extern int	GetStartupBufferPinWaitBufId(void);
 
 extern bool HaveNFreeProcs(int n);
 extern void ProcReleaseLocks(bool isCommit);
diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h
index 0b5316e691f18814dbbf5518135b8f8c7b3ef2b1..aad98982d13d88b29b2afe32a64db866e2d084e4 100644
--- a/src/include/storage/procsignal.h
+++ b/src/include/storage/procsignal.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/storage/procsignal.h,v 1.5 2010/02/13 01:32:20 sriggs Exp $
+ * $PostgreSQL: pgsql/src/include/storage/procsignal.h,v 1.6 2010/02/26 02:01:28 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -50,8 +50,8 @@ extern Size ProcSignalShmemSize(void);
 extern void ProcSignalShmemInit(void);
 
 extern void ProcSignalInit(int pss_idx);
-extern int  SendProcSignal(pid_t pid, ProcSignalReason reason,
-						   BackendId backendId);
+extern int SendProcSignal(pid_t pid, ProcSignalReason reason,
+			   BackendId backendId);
 
 extern void procsignal_sigusr1_handler(SIGNAL_ARGS);
 
diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h
index 70148c3f27a50b7c12ba2eb9dd562b2b61dcc25a..864a28fde8f34f74c3d6c54ab0c34f5f9bad2b51 100644
--- a/src/include/storage/sinval.h
+++ b/src/include/storage/sinval.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/storage/sinval.h,v 1.58 2010/02/13 16:15:48 sriggs Exp $
+ * $PostgreSQL: pgsql/src/include/storage/sinval.h,v 1.59 2010/02/26 02:01:28 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -132,9 +132,9 @@ extern void EnableCatchupInterrupt(void);
 extern bool DisableCatchupInterrupt(void);
 
 extern int xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
-										bool *RelcacheInitFileInval);
+									 bool *RelcacheInitFileInval);
 extern void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
-										int nmsgs, bool RelcacheInitFileInval,
-										Oid dbid, Oid tsid);
+									 int nmsgs, bool RelcacheInitFileInval,
+									 Oid dbid, Oid tsid);
 
 #endif   /* SINVAL_H */
diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h
index d7e267729d30cc6645f595f662f30d924aa87c4b..c037190b4ba9587ee094f93d0a040593beb4e03c 100644
--- a/src/include/storage/smgr.h
+++ b/src/include/storage/smgr.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/storage/smgr.h,v 1.70 2010/02/09 21:43:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/smgr.h,v 1.71 2010/02/26 02:01:28 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -46,12 +46,12 @@ typedef struct SMgrRelationData
 	/*
 	 * These next three fields are not actually used or manipulated by smgr,
 	 * except that they are reset to InvalidBlockNumber upon a cache flush
-	 * event (in particular, upon truncation of the relation).  Higher levels
+	 * event (in particular, upon truncation of the relation).	Higher levels
 	 * store cached state here so that it will be reset when truncation
 	 * happens.  In all three cases, InvalidBlockNumber means "unknown".
 	 */
-	BlockNumber smgr_targblock;		/* current insertion target block */
-	BlockNumber smgr_fsm_nblocks;	/* last known size of fsm fork */
+	BlockNumber smgr_targblock; /* current insertion target block */
+	BlockNumber smgr_fsm_nblocks;		/* last known size of fsm fork */
 	BlockNumber smgr_vm_nblocks;	/* last known size of vm fork */
 
 	/* additional public fields may someday exist here */
diff --git a/src/include/storage/standby.h b/src/include/storage/standby.h
index 081fa51ba00b799757a46f7404d21151bfd90a8a..fd2dfacd3511558da75064dd18d987dfd539deaa 100644
--- a/src/include/storage/standby.h
+++ b/src/include/storage/standby.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/storage/standby.h,v 1.8 2010/02/13 01:32:20 sriggs Exp $
+ * $PostgreSQL: pgsql/src/include/storage/standby.h,v 1.9 2010/02/26 02:01:28 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -25,7 +25,7 @@ extern void InitRecoveryTransactionEnvironment(void);
 extern void ShutdownRecoveryTransactionEnvironment(void);
 
 extern void ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid,
-												RelFileNode node);
+									RelFileNode node);
 extern void ResolveRecoveryConflictWithRemovedTransactionId(void);
 extern void ResolveRecoveryConflictWithTablespace(Oid tsid);
 extern void ResolveRecoveryConflictWithDatabase(Oid dbid);
@@ -43,7 +43,7 @@ extern void CheckRecoveryConflictDeadlock(LWLockId partitionLock);
  */
 extern void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid);
 extern void StandbyReleaseLockTree(TransactionId xid,
-								   int nsubxids, TransactionId *subxids);
+					   int nsubxids, TransactionId *subxids);
 extern void StandbyReleaseAllLocks(void);
 extern void StandbyReleaseOldLocks(TransactionId removeXid);
 
@@ -55,8 +55,8 @@ extern void StandbyReleaseOldLocks(TransactionId removeXid);
 
 typedef struct xl_standby_locks
 {
-	int				nlocks;		/* number of entries in locks array */
-	xl_standby_lock	locks[1];	/* VARIABLE LENGTH ARRAY */
+	int			nlocks;			/* number of entries in locks array */
+	xl_standby_lock locks[1];	/* VARIABLE LENGTH ARRAY */
 } xl_standby_locks;
 
 /*
@@ -64,12 +64,12 @@ typedef struct xl_standby_locks
  */
 typedef struct xl_running_xacts
 {
-	int				xcnt;				/* # of xact ids in xids[] */
-	bool			subxid_overflow;	/* snapshot overflowed, subxids missing */
-	TransactionId	nextXid;			/* copy of ShmemVariableCache->nextXid */
-	TransactionId	oldestRunningXid;	/* *not* oldestXmin */
+	int			xcnt;			/* # of xact ids in xids[] */
+	bool		subxid_overflow;	/* snapshot overflowed, subxids missing */
+	TransactionId nextXid;		/* copy of ShmemVariableCache->nextXid */
+	TransactionId oldestRunningXid;		/* *not* oldestXmin */
 
-	TransactionId	xids[1];		/* VARIABLE LENGTH ARRAY */
+	TransactionId xids[1];		/* VARIABLE LENGTH ARRAY */
 } xl_running_xacts;
 
 #define MinSizeOfXactRunningXacts offsetof(xl_running_xacts, xids)
@@ -93,12 +93,12 @@ extern void standby_desc(StringInfo buf, uint8 xl_info, char *rec);
 
 typedef struct RunningTransactionsData
 {
-	int				xcnt;				/* # of xact ids in xids[] */
-	bool			subxid_overflow;	/* snapshot overflowed, subxids missing */
-	TransactionId 	nextXid;			/* copy of ShmemVariableCache->nextXid */
-	TransactionId	oldestRunningXid;	/* *not* oldestXmin */
+	int			xcnt;			/* # of xact ids in xids[] */
+	bool		subxid_overflow;	/* snapshot overflowed, subxids missing */
+	TransactionId nextXid;		/* copy of ShmemVariableCache->nextXid */
+	TransactionId oldestRunningXid;		/* *not* oldestXmin */
 
-	TransactionId  *xids;				/* array of (sub)xids still running */
+	TransactionId *xids;		/* array of (sub)xids still running */
 } RunningTransactionsData;
 
 typedef RunningTransactionsData *RunningTransactions;
diff --git a/src/include/tcop/tcopprot.h b/src/include/tcop/tcopprot.h
index b46160160ea7961e1c7e02cb3d8f224c3950fab5..216980ab9e7120af3820ef9c0a161a5d95db591f 100644
--- a/src/include/tcop/tcopprot.h
+++ b/src/include/tcop/tcopprot.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/tcop/tcopprot.h,v 1.103 2010/01/16 10:05:59 sriggs Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/tcopprot.h,v 1.104 2010/02/26 02:01:28 momjian Exp $
  *
  * OLD COMMENTS
  *	  This file was created so that other c files could get the two
@@ -51,9 +51,9 @@ extern List *pg_parse_query(const char *query_string);
 extern List *pg_analyze_and_rewrite(Node *parsetree, const char *query_string,
 					   Oid *paramTypes, int numParams);
 extern List *pg_analyze_and_rewrite_params(Node *parsetree,
-										   const char *query_string,
-										   ParserSetupHook parserSetup,
-										   void *parserSetupArg);
+							  const char *query_string,
+							  ParserSetupHook parserSetup,
+							  void *parserSetupArg);
 extern PlannedStmt *pg_plan_query(Query *querytree, int cursorOptions,
 			  ParamListInfo boundParams);
 extern List *pg_plan_queries(List *querytrees, int cursorOptions,
@@ -65,11 +65,12 @@ extern void die(SIGNAL_ARGS);
 extern void quickdie(SIGNAL_ARGS);
 extern void StatementCancelHandler(SIGNAL_ARGS);
 extern void FloatExceptionHandler(SIGNAL_ARGS);
-extern void RecoveryConflictInterrupt(ProcSignalReason reason); /* called from SIGUSR1 handler */
+extern void RecoveryConflictInterrupt(ProcSignalReason reason); /* called from SIGUSR1
+																 * handler */
 extern void prepare_for_client_read(void);
 extern void client_read_ended(void);
 extern const char *process_postgres_switches(int argc, char *argv[],
-											 GucContext ctx);
+						  GucContext ctx);
 extern int	PostgresMain(int argc, char *argv[], const char *username);
 extern long get_stack_depth_rlimit(void);
 extern void ResetUsage(void);
diff --git a/src/include/tcop/utility.h b/src/include/tcop/utility.h
index 5aae6fc34aeb8a46eb19d12f92d9a9f34f81c2e2..4970410b6d3bfacd156f2ddf4e28727eb68de77e 100644
--- a/src/include/tcop/utility.h
+++ b/src/include/tcop/utility.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/tcop/utility.h,v 1.39 2010/01/02 16:58:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/utility.h,v 1.40 2010/02/26 02:01:28 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -19,16 +19,16 @@
 
 /* Hook for plugins to get control in ProcessUtility() */
 typedef void (*ProcessUtility_hook_type) (Node *parsetree,
-			   const char *queryString, ParamListInfo params, bool isTopLevel,
-			   DestReceiver *dest, char *completionTag);
+			  const char *queryString, ParamListInfo params, bool isTopLevel,
+									DestReceiver *dest, char *completionTag);
 extern PGDLLIMPORT ProcessUtility_hook_type ProcessUtility_hook;
 
 extern void ProcessUtility(Node *parsetree, const char *queryString,
 			   ParamListInfo params, bool isTopLevel,
 			   DestReceiver *dest, char *completionTag);
 extern void standard_ProcessUtility(Node *parsetree, const char *queryString,
-			   ParamListInfo params, bool isTopLevel,
-			   DestReceiver *dest, char *completionTag);
+						ParamListInfo params, bool isTopLevel,
+						DestReceiver *dest, char *completionTag);
 
 extern bool UtilityReturnsTuples(Node *parsetree);
 
diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h
index 885a651bbee7aaae4df6de2319f900d8e03a33e0..16a2202fdd1ed90ddedf6b2e436cd0eb50d5b793 100644
--- a/src/include/utils/acl.h
+++ b/src/include/utils/acl.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/acl.h,v 1.112 2010/01/02 16:58:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/acl.h,v 1.113 2010/02/26 02:01:28 momjian Exp $
  *
  * NOTES
  *	  An ACL array is simply an array of AclItems, representing the union
@@ -202,7 +202,7 @@ typedef enum AclObjectKind
  */
 extern Acl *acldefault(GrantObjectType objtype, Oid ownerId);
 extern Acl *get_user_default_acl(GrantObjectType objtype, Oid ownerId,
-								 Oid nsp_oid);
+					 Oid nsp_oid);
 
 extern Acl *aclupdate(const Acl *old_acl, const AclItem *mod_aip,
 		  int modechg, Oid ownerId, DropBehavior behavior);
@@ -263,7 +263,7 @@ extern AclMode pg_proc_aclmask(Oid proc_oid, Oid roleid,
 extern AclMode pg_language_aclmask(Oid lang_oid, Oid roleid,
 					AclMode mask, AclMaskHow how);
 extern AclMode pg_largeobject_aclmask_snapshot(Oid lobj_oid, Oid roleid,
-					AclMode mask, AclMaskHow how, Snapshot snapshot);
+							AclMode mask, AclMaskHow how, Snapshot snapshot);
 extern AclMode pg_namespace_aclmask(Oid nsp_oid, Oid roleid,
 					 AclMode mask, AclMaskHow how);
 extern AclMode pg_tablespace_aclmask(Oid spc_oid, Oid roleid,
@@ -282,7 +282,7 @@ extern AclResult pg_database_aclcheck(Oid db_oid, Oid roleid, AclMode mode);
 extern AclResult pg_proc_aclcheck(Oid proc_oid, Oid roleid, AclMode mode);
 extern AclResult pg_language_aclcheck(Oid lang_oid, Oid roleid, AclMode mode);
 extern AclResult pg_largeobject_aclcheck_snapshot(Oid lang_oid, Oid roleid,
-												  AclMode mode, Snapshot snapshot);
+								 AclMode mode, Snapshot snapshot);
 extern AclResult pg_namespace_aclcheck(Oid nsp_oid, Oid roleid, AclMode mode);
 extern AclResult pg_tablespace_aclcheck(Oid spc_oid, Oid roleid, AclMode mode);
 extern AclResult pg_foreign_data_wrapper_aclcheck(Oid fdw_oid, Oid roleid, AclMode mode);
diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h
index a6a4284b44ab70484945e34bc23c0f0ac7542b5b..e7edc5717f1fd12080af98a17431a66636d9fac1 100644
--- a/src/include/utils/builtins.h
+++ b/src/include/utils/builtins.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.347 2010/02/07 20:48:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.348 2010/02/26 02:01:28 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -740,7 +740,7 @@ extern Datum xidrecv(PG_FUNCTION_ARGS);
 extern Datum xidsend(PG_FUNCTION_ARGS);
 extern Datum xideq(PG_FUNCTION_ARGS);
 extern Datum xid_age(PG_FUNCTION_ARGS);
-extern int xidComparator(const void *arg1, const void *arg2);
+extern int	xidComparator(const void *arg1, const void *arg2);
 extern Datum cidin(PG_FUNCTION_ARGS);
 extern Datum cidout(PG_FUNCTION_ARGS);
 extern Datum cidrecv(PG_FUNCTION_ARGS);
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index 6f3397f731f9ff0ee683049eab065890c3f6f763..eead399ad7165a1fd73ff2db057eae9cbed7a3a7 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -13,7 +13,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.72 2010/02/14 18:42:18 rhaas Exp $
+ * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.73 2010/02/26 02:01:29 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -47,8 +47,9 @@ typedef struct catcache
 	int			cc_nbuckets;	/* # of hash buckets in this cache */
 	int			cc_nkeys;		/* # of keys (1..CATCACHE_MAXKEYS) */
 	int			cc_key[CATCACHE_MAXKEYS];		/* AttrNumber of each key */
-	PGFunction	cc_hashfunc[CATCACHE_MAXKEYS]; /* hash function for each key */
-	ScanKeyData cc_skey[CATCACHE_MAXKEYS];		/* precomputed key info for heap scans */
+	PGFunction	cc_hashfunc[CATCACHE_MAXKEYS];	/* hash function for each key */
+	ScanKeyData cc_skey[CATCACHE_MAXKEYS];		/* precomputed key info for
+												 * heap scans */
 	bool		cc_isname[CATCACHE_MAXKEYS];	/* flag "name" key columns */
 	Dllist		cc_lists;		/* list of CatCList structs */
 #ifdef CATCACHE_STATS
diff --git a/src/include/utils/datetime.h b/src/include/utils/datetime.h
index 1eae9a556f675f02cb4fcfdf43d103d8da5ff1fa..c0129e30583550a4bdda6d907fc9389e366dbe40 100644
--- a/src/include/utils/datetime.h
+++ b/src/include/utils/datetime.h
@@ -9,7 +9,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/datetime.h,v 1.78 2010/01/02 16:58:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/datetime.h,v 1.79 2010/02/26 02:01:29 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -262,7 +262,7 @@ extern const int day_tab[2][13];
   || (((m) == JULIAN_MINMONTH) && ((d) >= JULIAN_MINDAY))))) \
  && ((y) < JULIAN_MAXYEAR))
 
-#define JULIAN_MAX (2147483494)	/* == date2j(JULIAN_MAXYEAR, 1 ,1) */
+#define JULIAN_MAX (2147483494) /* == date2j(JULIAN_MAXYEAR, 1 ,1) */
 
 /* Julian-date equivalents of Day 0 in Unix and Postgres reckoning */
 #define UNIX_EPOCH_JDATE		2440588 /* == date2j(1970, 1, 1) */
diff --git a/src/include/utils/pg_crc.h b/src/include/utils/pg_crc.h
index f094a3d0c7c1de7226abc601625a25028f7890bd..6bab70ff48ed540f14d1b5b084e7d8343be90114 100644
--- a/src/include/utils/pg_crc.h
+++ b/src/include/utils/pg_crc.h
@@ -17,7 +17,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/pg_crc.h,v 1.23 2010/01/07 04:53:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/pg_crc.h,v 1.24 2010/02/26 02:01:29 momjian Exp $
  */
 #ifndef PG_CRC_H
 #define PG_CRC_H
@@ -113,7 +113,6 @@ do { \
 /* Constant table for CRC calculation */
 extern CRCDLLIMPORT const uint32 pg_crc64_table0[];
 extern CRCDLLIMPORT const uint32 pg_crc64_table1[];
-
 #else							/* use int64 implementation */
 
 typedef struct pg_crc64
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index 5e729c37a7506ce0b74fd4645d0c54d9ae83ef2e..c2123181d42d133b715699a371d4bbc0c95d38fe 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -8,7 +8,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/plancache.h,v 1.17 2010/01/02 16:58:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/plancache.h,v 1.18 2010/02/26 02:01:29 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -52,7 +52,7 @@ typedef struct CachedPlanSource
 	Oid		   *param_types;	/* array of parameter type OIDs, or NULL */
 	int			num_params;		/* length of param_types array */
 	ParserSetupHook parserSetup;	/* alternative parameter spec method */
-	void	   *parserSetupArg;	
+	void	   *parserSetupArg;
 	int			cursor_options; /* cursor options used for planning */
 	bool		fully_planned;	/* do we cache planner or rewriter output? */
 	bool		fixed_result;	/* disallow change in result tupdesc? */
@@ -109,8 +109,8 @@ extern CachedPlanSource *FastCreateCachedPlan(Node *raw_parse_tree,
 					 bool fixed_result,
 					 MemoryContext context);
 extern void CachedPlanSetParserHook(CachedPlanSource *plansource,
-									ParserSetupHook parserSetup,
-									void *parserSetupArg);
+						ParserSetupHook parserSetup,
+						void *parserSetupArg);
 extern void DropCachedPlan(CachedPlanSource *plansource);
 extern CachedPlan *RevalidateCachedPlan(CachedPlanSource *plansource,
 					 bool useResOwner);
diff --git a/src/include/utils/rbtree.h b/src/include/utils/rbtree.h
index 535a23780b3d3b37f1071a4070f2258fdd7a823c..bf6daa5aafdcdf7bd67f408189cd4155ce1b529f 100644
--- a/src/include/utils/rbtree.h
+++ b/src/include/utils/rbtree.h
@@ -1,12 +1,12 @@
 /*-------------------------------------------------------------------------
  *
  * rbtree.h
- *    interface for PostgreSQL generic Red-Black binary tree package
+ *	  interface for PostgreSQL generic Red-Black binary tree package
  *
  * Copyright (c) 1996-2009, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- * 		$PostgreSQL: pgsql/src/include/utils/rbtree.h,v 1.1 2010/02/11 14:29:50 teodor Exp $
+ *		$PostgreSQL: pgsql/src/include/utils/rbtree.h,v 1.2 2010/02/26 02:01:29 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -18,13 +18,13 @@ typedef struct RBTree RBTree;
 typedef struct RBTreeIterator RBTreeIterator;
 
 typedef int (*rb_comparator) (const void *a, const void *b, void *arg);
-typedef void* (*rb_appendator) (void *current, void *new, void *arg);
+typedef void *(*rb_appendator) (void *current, void *new, void *arg);
 typedef void (*rb_freefunc) (void *a);
 
 extern RBTree *rb_create(rb_comparator comparator,
-							rb_appendator appendator,
-							rb_freefunc freefunc,
-							void *arg);
+		  rb_appendator appendator,
+		  rb_freefunc freefunc,
+		  void *arg);
 
 extern void *rb_find(RBTree *rb, void *data);
 extern void *rb_insert(RBTree *rb, void *data);
@@ -39,7 +39,7 @@ typedef enum RBOrderControl
 	InvertedWalk
 } RBOrderControl;
 
-extern RBTreeIterator* rb_begin_iterate(RBTree *rb, RBOrderControl ctrl);
+extern RBTreeIterator *rb_begin_iterate(RBTree *rb, RBOrderControl ctrl);
 extern void *rb_iterate(RBTreeIterator *iterator);
 extern void rb_free_iterator(RBTreeIterator *iterator);
 
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index 405ff18e0ba204fedf952dc368dac63290d033a8..a0a9b301c4a8ff044b1c968cea6ae40228312297 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/rel.h,v 1.123 2010/02/09 21:43:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/rel.h,v 1.124 2010/02/26 02:01:29 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -369,7 +369,7 @@ typedef struct StdRdOptions
  * RelationGetTargetBlock
  *		Fetch relation's current insertion target block.
  *
- * Returns InvalidBlockNumber if there is no current target block.  Note
+ * Returns InvalidBlockNumber if there is no current target block.	Note
  * that the target block status is discarded on any smgr-level invalidation.
  */
 #define RelationGetTargetBlock(relation) \
diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h
index 74d6af01baba4ecfc6c1ae62edb3188ba27096ef..4db4ba5db28be72e55b556a14d7145aa30ddd022 100644
--- a/src/include/utils/relcache.h
+++ b/src/include/utils/relcache.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/relcache.h,v 1.68 2010/02/07 20:48:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/relcache.h,v 1.69 2010/02/26 02:01:29 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -44,9 +44,9 @@ extern List *RelationGetIndexExpressions(Relation relation);
 extern List *RelationGetIndexPredicate(Relation relation);
 extern Bitmapset *RelationGetIndexAttrBitmap(Relation relation);
 extern void RelationGetExclusionInfo(Relation indexRelation,
-									 Oid **operators,
-									 Oid **procs,
-									 uint16 **strategies);
+						 Oid **operators,
+						 Oid **procs,
+						 uint16 **strategies);
 
 extern void RelationSetIndexList(Relation relation,
 					 List *indexIds, Oid oidIndex);
@@ -75,7 +75,7 @@ extern Relation RelationBuildLocalRelation(const char *relname,
  * Routine to manage assignment of new relfilenode to a relation
  */
 extern void RelationSetNewRelfilenode(Relation relation,
-									  TransactionId freezeXid);
+						  TransactionId freezeXid);
 
 /*
  * Routines for flushing/rebuilding relcache entries in various scenarios
@@ -101,6 +101,7 @@ extern void RelationCacheInitFileRemove(void);
 
 /* should be used only by relcache.c and catcache.c */
 extern bool criticalRelcachesBuilt;
+
 /* should be used only by relcache.c and postinit.c */
 extern bool criticalSharedRelcachesBuilt;
 
diff --git a/src/include/utils/relmapper.h b/src/include/utils/relmapper.h
index 6bd1f6ba4037fe748dcdf36dee50ce9c9fbd662b..af291f3fb4b6563a43e6b7b3f2fa61d91496267d 100644
--- a/src/include/utils/relmapper.h
+++ b/src/include/utils/relmapper.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/relmapper.h,v 1.1 2010/02/07 20:48:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/relmapper.h,v 1.2 2010/02/26 02:01:29 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -37,7 +37,7 @@ typedef struct xl_relmap_update
 extern Oid	RelationMapOidToFilenode(Oid relationId, bool shared);
 
 extern void RelationMapUpdateMap(Oid relationId, Oid fileNode, bool shared,
-								 bool immediate);
+					 bool immediate);
 
 extern void RelationMapRemoveMapping(Oid relationId);
 
diff --git a/src/include/utils/spccache.h b/src/include/utils/spccache.h
index 73b9f7370d95ae7fd1a9e04596bd41c3ce979853..9b620efa2b29c341a6f366ff804a5bb8cef3d969 100644
--- a/src/include/utils/spccache.h
+++ b/src/include/utils/spccache.h
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/spccache.h,v 1.1 2010/01/05 21:54:00 rhaas Exp $
+ * $PostgreSQL: pgsql/src/include/utils/spccache.h,v 1.2 2010/02/26 02:01:29 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -14,6 +14,6 @@
 #define SPCCACHE_H
 
 void get_tablespace_page_costs(Oid spcid, float8 *spc_random_page_cost,
-					     float8 *spc_seq_page_cost);
+						  float8 *spc_seq_page_cost);
 
 #endif   /* SPCCACHE_H */
diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h
index 10dad13082ffb85ce449aad5b7453a63494f7ceb..08d9f384a997558f16c38103db7f39b967ba3e67 100644
--- a/src/include/utils/tuplesort.h
+++ b/src/include/utils/tuplesort.h
@@ -13,7 +13,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/include/utils/tuplesort.h,v 1.35 2010/01/02 16:58:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/tuplesort.h,v 1.36 2010/02/26 02:01:29 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -85,9 +85,9 @@ extern bool tuplesort_getdatum(Tuplesortstate *state, bool forward,
 extern void tuplesort_end(Tuplesortstate *state);
 
 extern void tuplesort_get_stats(Tuplesortstate *state,
-								const char **sortMethod,
-								const char **spaceType,
-								long *spaceUsed);
+					const char **sortMethod,
+					const char **spaceType,
+					long *spaceUsed);
 
 extern int	tuplesort_merge_order(long allowedMem);
 
diff --git a/src/interfaces/ecpg/compatlib/informix.c b/src/interfaces/ecpg/compatlib/informix.c
index 24906ab487f76d79873c0451a7c8b253e73c83c9..92e2a5269a70251840e38a471cc11bf2b28c3c30 100644
--- a/src/interfaces/ecpg/compatlib/informix.c
+++ b/src/interfaces/ecpg/compatlib/informix.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/compatlib/informix.c,v 1.63 2010/01/26 09:07:31 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/compatlib/informix.c,v 1.64 2010/02/26 02:01:29 momjian Exp $ */
 
 #define POSTGRES_ECPG_INTERNAL
 #include "postgres_fe.h"
@@ -767,7 +767,7 @@ rfmtlong(long lng_val, char *fmt, char *outbuf)
 	size_t		fmt_len = strlen(fmt);
 	size_t		temp_len;
 	int			i,
-				j, /* position in temp */
+				j,				/* position in temp */
 				k,
 				dotpos;
 	int			leftalign = 0,
diff --git a/src/interfaces/ecpg/ecpglib/data.c b/src/interfaces/ecpg/ecpglib/data.c
index 2027ae8ef49c0baea6ded6a11742e020dbdcfab6..f5190049e12e04375ba1885064aad291d8336c50 100644
--- a/src/interfaces/ecpg/ecpglib/data.c
+++ b/src/interfaces/ecpg/ecpglib/data.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/data.c,v 1.49 2010/02/04 09:41:34 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/data.c,v 1.50 2010/02/26 02:01:29 momjian Exp $ */
 
 #define POSTGRES_ECPG_INTERNAL
 #include "postgres_fe.h"
@@ -26,7 +26,7 @@ array_delimiter(enum ARRAY_TYPE isarray, char c)
 
 	if (isarray == ECPG_ARRAY_VECTOR && c == ' ')
 		return true;
-	
+
 	return false;
 }
 
@@ -39,7 +39,7 @@ array_boundary(enum ARRAY_TYPE isarray, char c)
 
 	if (isarray == ECPG_ARRAY_VECTOR && c == '\0')
 		return true;
-	
+
 	return false;
 }
 
@@ -86,7 +86,7 @@ static double
 get_float8_nan(void)
 {
 #ifdef NAN
-	return (double) NAN;  
+	return (double) NAN;
 #else
 	return (double) (0.0 / 0.0);
 #endif
@@ -142,11 +142,11 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 	ecpg_log("ecpg_get_data on line %d: RESULT: %s offset: %ld; array: %s\n", lineno, pval ? (binary ? "BINARY" : pval) : "EMPTY", log_offset, ECPG_IS_ARRAY(isarray) ? "yes" : "no");
 
 	/* pval is a pointer to the value */
-	if (!pval) 
+	if (!pval)
 	{
 		/*
-		 * This should never happen because we already checked that we
-		 * found at least one tuple, but let's play it safe.
+		 * This should never happen because we already checked that we found
+		 * at least one tuple, but let's play it safe.
 		 */
 		ecpg_raise(lineno, ECPG_NOT_FOUND, ECPG_SQLSTATE_NO_DATA, NULL);
 		return (false);
@@ -365,7 +365,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 				case ECPGt_unsigned_long_long:
 					*((unsigned long long int *) (var + offset * act_tuple)) = strtoull(pval, &scan_length, 10);
 					if ((isarray && *scan_length != ',' && *scan_length != '}')
-						|| (!isarray && !(INFORMIX_MODE(compat) && *scan_length == '.') && *scan_length != '\0' && *scan_length != ' '))	/* Garbage left */
+						|| (!isarray && !(INFORMIX_MODE(compat) && *scan_length == '.') && *scan_length != '\0' && *scan_length != ' '))		/* Garbage left */
 					{
 						ecpg_raise(lineno, ECPG_UINT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
 						return (false);
@@ -449,14 +449,16 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 				case ECPGt_unsigned_char:
 				case ECPGt_string:
 					{
-						char	*str = (char *) (var + offset * act_tuple);
+						char	   *str = (char *) (var + offset * act_tuple);
+
 						if (varcharsize == 0 || varcharsize > size)
 						{
 							strncpy(str, pval, size + 1);
 							/* do the rtrim() */
 							if (type == ECPGt_string)
 							{
-								char	*last = str + size;
+								char	   *last = str + size;
+
 								while (last > str && (*last == ' ' || *last == '\0'))
 								{
 									*last = '\0';
@@ -564,8 +566,8 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 						if (INFORMIX_MODE(compat))
 						{
 							/*
-							 * Informix wants its own NULL value here
-							 * instead of an error
+							 * Informix wants its own NULL value here instead
+							 * of an error
 							 */
 							nres = PGTYPESnumeric_new();
 							if (nres)
@@ -573,14 +575,14 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 							else
 							{
 								ecpg_raise(lineno, ECPG_OUT_OF_MEMORY,
-								 ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY, NULL);
+									 ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY, NULL);
 								return (false);
 							}
 						}
 						else
 						{
 							ecpg_raise(lineno, ECPG_NUMERIC_FORMAT,
-								  ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+									   ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
 							return (false);
 						}
 					}
@@ -593,7 +595,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 						{
 							free(nres);
 							ecpg_raise(lineno, ECPG_NUMERIC_FORMAT,
-								  ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+									   ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
 							return (false);
 						}
 					}
@@ -622,8 +624,8 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 						if (INFORMIX_MODE(compat))
 						{
 							/*
-							 * Informix wants its own NULL value here
-							 * instead of an error
+							 * Informix wants its own NULL value here instead
+							 * of an error
 							 */
 							ires = (interval *) ecpg_alloc(sizeof(interval), lineno);
 							if (!ires)
@@ -634,7 +636,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 						else
 						{
 							ecpg_raise(lineno, ECPG_INTERVAL_FORMAT,
-								  ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+									   ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
 							return (false);
 						}
 					}
@@ -647,7 +649,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 						{
 							free(ires);
 							ecpg_raise(lineno, ECPG_INTERVAL_FORMAT,
-								  ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+									   ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
 							return (false);
 						}
 					}
@@ -672,15 +674,15 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 						if (INFORMIX_MODE(compat))
 						{
 							/*
-							 * Informix wants its own NULL value here
-							 * instead of an error
+							 * Informix wants its own NULL value here instead
+							 * of an error
 							 */
 							ECPGset_noind_null(ECPGt_date, &ddres);
 						}
 						else
 						{
 							ecpg_raise(lineno, ECPG_DATE_FORMAT,
-								  ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+									   ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
 							return (false);
 						}
 					}
@@ -692,7 +694,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 						if (garbage_left(isarray, scan_length, compat))
 						{
 							ecpg_raise(lineno, ECPG_DATE_FORMAT,
-								  ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+									   ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
 							return (false);
 						}
 					}
@@ -716,15 +718,15 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 						if (INFORMIX_MODE(compat))
 						{
 							/*
-							 * Informix wants its own NULL value here
-							 * instead of an error
+							 * Informix wants its own NULL value here instead
+							 * of an error
 							 */
 							ECPGset_noind_null(ECPGt_timestamp, &tres);
 						}
 						else
 						{
 							ecpg_raise(lineno, ECPG_TIMESTAMP_FORMAT,
-								  ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+									   ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
 							return (false);
 						}
 					}
@@ -736,7 +738,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 						if (garbage_left(isarray, scan_length, compat))
 						{
 							ecpg_raise(lineno, ECPG_TIMESTAMP_FORMAT,
-								  ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+									   ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
 							return (false);
 						}
 					}
@@ -760,7 +762,11 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 				++act_tuple;
 
 				/* set pval to the next entry */
-				/* *pval != '\0' should not be needed, but is used as a safety guard */
+
+				/*
+				 * *pval != '\0' should not be needed, but is used as a safety
+				 * guard
+				 */
 				for (; *pval != '\0' && (string || (!array_delimiter(isarray, *pval) && !array_boundary(isarray, *pval))); ++pval)
 					if (*pval == '"')
 						string = string ? false : true;
diff --git a/src/interfaces/ecpg/ecpglib/descriptor.c b/src/interfaces/ecpg/ecpglib/descriptor.c
index acc26574b75f0365a71eec2f0460b3d387a739f3..21dbe0ce08e804d28fdc174e69314ccf57ffa9e3 100644
--- a/src/interfaces/ecpg/ecpglib/descriptor.c
+++ b/src/interfaces/ecpg/ecpglib/descriptor.c
@@ -1,6 +1,6 @@
 /* dynamic SQL support routines
  *
- * $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/descriptor.c,v 1.35 2010/01/15 13:19:12 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/descriptor.c,v 1.36 2010/02/26 02:01:30 momjian Exp $
  */
 
 #define POSTGRES_ECPG_INTERNAL
@@ -382,6 +382,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...)
 			case ECPGd_ret_octet:
 
 				RETURN_IF_NO_DATA;
+
 				/*
 				 * this is like ECPGstore_result
 				 */
@@ -485,6 +486,7 @@ ECPGget_desc(int lineno, const char *desc_name, int index,...)
 	sqlca->sqlerrd[2] = ntuples;
 	return (true);
 }
+
 #undef RETURN_IF_NO_DATA
 
 bool
@@ -729,7 +731,7 @@ ecpg_find_desc(int line, const char *name)
 }
 
 bool
-ECPGdescribe(int line, int compat, bool input, const char *connection_name, const char *stmt_name, ...)
+ECPGdescribe(int line, int compat, bool input, const char *connection_name, const char *stmt_name,...)
 {
 	bool		ret = false;
 	struct connection *con;
@@ -748,7 +750,7 @@ ECPGdescribe(int line, int compat, bool input, const char *connection_name, cons
 	if (!con)
 	{
 		ecpg_raise(line, ECPG_NO_CONN, ECPG_SQLSTATE_CONNECTION_DOES_NOT_EXIST,
-				connection_name ? connection_name : ecpg_gettext("NULL"));
+				   connection_name ? connection_name : ecpg_gettext("NULL"));
 		return ret;
 	}
 	prep = ecpg_find_prepared_statement(stmt_name, con, NULL);
@@ -762,8 +764,10 @@ ECPGdescribe(int line, int compat, bool input, const char *connection_name, cons
 
 	for (;;)
 	{
-		enum ECPGttype	type, dummy_type;
-		void		*ptr, *dummy_ptr;
+		enum ECPGttype type,
+					dummy_type;
+		void	   *ptr,
+				   *dummy_ptr;
 		long		dummy;
 
 		/* variable type */
@@ -772,7 +776,7 @@ ECPGdescribe(int line, int compat, bool input, const char *connection_name, cons
 		if (type == ECPGt_EORT)
 			break;
 
-		/* rest of variable parameters*/
+		/* rest of variable parameters */
 		ptr = va_arg(args, void *);
 		dummy = va_arg(args, long);
 		dummy = va_arg(args, long);
@@ -788,84 +792,84 @@ ECPGdescribe(int line, int compat, bool input, const char *connection_name, cons
 		switch (type)
 		{
 			case ECPGt_descriptor:
-			{
-				char	*name = ptr;
-				struct descriptor *desc = ecpg_find_desc(line, name);
-
-				if (desc == NULL)
-					break;
-
-				res = PQdescribePrepared(con->connection, stmt_name);
-				if (!ecpg_check_PQresult(res, line, con->connection, compat))
-					break;
-
-				if (desc->result != NULL)
-					PQclear(desc->result);
-
-				desc->result = res;
-				ret = true;
-				break;
-			}
-			case ECPGt_sqlda:
-			{
-				if (INFORMIX_MODE(compat))
 				{
-					struct sqlda_compat **_sqlda = ptr;
-					struct sqlda_compat *sqlda;
+					char	   *name = ptr;
+					struct descriptor *desc = ecpg_find_desc(line, name);
+
+					if (desc == NULL)
+						break;
 
 					res = PQdescribePrepared(con->connection, stmt_name);
 					if (!ecpg_check_PQresult(res, line, con->connection, compat))
 						break;
 
-					sqlda = ecpg_build_compat_sqlda(line, res, -1, compat);
-					if (sqlda)
+					if (desc->result != NULL)
+						PQclear(desc->result);
+
+					desc->result = res;
+					ret = true;
+					break;
+				}
+			case ECPGt_sqlda:
+				{
+					if (INFORMIX_MODE(compat))
 					{
-						struct sqlda_compat *sqlda_old = *_sqlda;
-						struct sqlda_compat *sqlda_old1;
+						struct sqlda_compat **_sqlda = ptr;
+						struct sqlda_compat *sqlda;
+
+						res = PQdescribePrepared(con->connection, stmt_name);
+						if (!ecpg_check_PQresult(res, line, con->connection, compat))
+							break;
 
-						while (sqlda_old)
+						sqlda = ecpg_build_compat_sqlda(line, res, -1, compat);
+						if (sqlda)
 						{
-							sqlda_old1 = sqlda_old->desc_next;
-							free(sqlda_old);
-							sqlda_old = sqlda_old1;
+							struct sqlda_compat *sqlda_old = *_sqlda;
+							struct sqlda_compat *sqlda_old1;
+
+							while (sqlda_old)
+							{
+								sqlda_old1 = sqlda_old->desc_next;
+								free(sqlda_old);
+								sqlda_old = sqlda_old1;
+							}
+
+							*_sqlda = sqlda;
+							ret = true;
 						}
 
-						*_sqlda = sqlda;
-						ret = true;
+						PQclear(res);
 					}
-
-					PQclear(res);
-				}
-				else
-				{
-					struct sqlda_struct **_sqlda = ptr;
-					struct sqlda_struct *sqlda;
-
-					res = PQdescribePrepared(con->connection, stmt_name);
-					if (!ecpg_check_PQresult(res, line, con->connection, compat))
-						break;
-
-					sqlda = ecpg_build_native_sqlda(line, res, -1, compat);
-					if (sqlda)
+					else
 					{
-						struct sqlda_struct *sqlda_old = *_sqlda;
-						struct sqlda_struct *sqlda_old1;
+						struct sqlda_struct **_sqlda = ptr;
+						struct sqlda_struct *sqlda;
+
+						res = PQdescribePrepared(con->connection, stmt_name);
+						if (!ecpg_check_PQresult(res, line, con->connection, compat))
+							break;
 
-						while (sqlda_old)
+						sqlda = ecpg_build_native_sqlda(line, res, -1, compat);
+						if (sqlda)
 						{
-							sqlda_old1 = sqlda_old->desc_next;
-							free(sqlda_old);
-							sqlda_old = sqlda_old1;
+							struct sqlda_struct *sqlda_old = *_sqlda;
+							struct sqlda_struct *sqlda_old1;
+
+							while (sqlda_old)
+							{
+								sqlda_old1 = sqlda_old->desc_next;
+								free(sqlda_old);
+								sqlda_old = sqlda_old1;
+							}
+
+							*_sqlda = sqlda;
+							ret = true;
 						}
 
-						*_sqlda = sqlda;
-						ret = true;
+						PQclear(res);
 					}
-
-					PQclear(res);
+					break;
 				}
-				break;
-			}
 			default:
 				/* nothing else may come */
 				;
diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c
index f65ede58368cbe25ac84b209ec807baeb8edb291..8ae102b44936c16c97d353bd69d2982c066e530c 100644
--- a/src/interfaces/ecpg/ecpglib/execute.c
+++ b/src/interfaces/ecpg/ecpglib/execute.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.94 2010/02/16 18:41:23 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.95 2010/02/26 02:01:30 momjian Exp $ */
 
 /*
  * The aim is to get a simpler inteface to the database routines.
@@ -505,8 +505,8 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
 	char	   *newcopy = NULL;
 
 	/*
-	 * arrays are not possible unless the attribute is an array too
-	 * FIXME: we do not know if the attribute is an array here
+	 * arrays are not possible unless the attribute is an array too FIXME: we
+	 * do not know if the attribute is an array here
 	 */
 #if 0
 	if (var->arrsize > 1 &&...)
@@ -1213,9 +1213,9 @@ ecpg_execute(struct statement * stmt)
 		{
 			if (INFORMIX_MODE(stmt->compat))
 			{
-				struct sqlda_compat	   *sqlda = *(struct sqlda_compat **)var->pointer;
-				struct variable	desc_inlist;
-				int		i;
+				struct sqlda_compat *sqlda = *(struct sqlda_compat **) var->pointer;
+				struct variable desc_inlist;
+				int			i;
 
 				if (sqlda == NULL)
 					return false;
@@ -1268,9 +1268,9 @@ ecpg_execute(struct statement * stmt)
 			}
 			else
 			{
-				struct sqlda_struct	   *sqlda = *(struct sqlda_struct **)var->pointer;
-				struct variable	desc_inlist;
-				int		i;
+				struct sqlda_struct *sqlda = *(struct sqlda_struct **) var->pointer;
+				struct variable desc_inlist;
+				int			i;
 
 				if (sqlda == NULL)
 					return false;
@@ -1508,12 +1508,15 @@ ecpg_execute(struct statement * stmt)
 			{
 				if (INFORMIX_MODE(stmt->compat))
 				{
-					struct sqlda_compat  **_sqlda = (struct sqlda_compat **)var->pointer;
-					struct sqlda_compat   *sqlda = *_sqlda;
-					struct sqlda_compat   *sqlda_new;
-					int		i;
+					struct sqlda_compat **_sqlda = (struct sqlda_compat **) var->pointer;
+					struct sqlda_compat *sqlda = *_sqlda;
+					struct sqlda_compat *sqlda_new;
+					int			i;
 
-					/* If we are passed in a previously existing sqlda (chain) then free it. */
+					/*
+					 * If we are passed in a previously existing sqlda (chain)
+					 * then free it.
+					 */
 					while (sqlda)
 					{
 						sqlda_new = sqlda->desc_next;
@@ -1523,7 +1526,10 @@ ecpg_execute(struct statement * stmt)
 					*_sqlda = sqlda = sqlda_new = NULL;
 					for (i = ntuples - 1; i >= 0; i--)
 					{
-						/* Build a new sqlda structure. Note that only fetching 1 record is supported */
+						/*
+						 * Build a new sqlda structure. Note that only
+						 * fetching 1 record is supported
+						 */
 						sqlda_new = ecpg_build_compat_sqlda(stmt->lineno, results, i, stmt->compat);
 
 						if (!sqlda_new)
@@ -1549,7 +1555,7 @@ ecpg_execute(struct statement * stmt)
 
 							ecpg_set_compat_sqlda(stmt->lineno, _sqlda, results, i, stmt->compat);
 							ecpg_log("ecpg_execute on line %d: putting result (1 tuple %d fields) into sqlda descriptor\n",
-									stmt->lineno, PQnfields(results));
+									 stmt->lineno, PQnfields(results));
 
 							sqlda_new->desc_next = sqlda;
 							sqlda = sqlda_new;
@@ -1558,12 +1564,15 @@ ecpg_execute(struct statement * stmt)
 				}
 				else
 				{
-					struct sqlda_struct  **_sqlda = (struct sqlda_struct **)var->pointer;
-					struct sqlda_struct   *sqlda = *_sqlda;
-					struct sqlda_struct   *sqlda_new;
-					int		i;
+					struct sqlda_struct **_sqlda = (struct sqlda_struct **) var->pointer;
+					struct sqlda_struct *sqlda = *_sqlda;
+					struct sqlda_struct *sqlda_new;
+					int			i;
 
-					/* If we are passed in a previously existing sqlda (chain) then free it. */
+					/*
+					 * If we are passed in a previously existing sqlda (chain)
+					 * then free it.
+					 */
 					while (sqlda)
 					{
 						sqlda_new = sqlda->desc_next;
@@ -1573,7 +1582,10 @@ ecpg_execute(struct statement * stmt)
 					*_sqlda = sqlda = sqlda_new = NULL;
 					for (i = ntuples - 1; i >= 0; i--)
 					{
-						/* Build a new sqlda structure. Note that only fetching 1 record is supported */
+						/*
+						 * Build a new sqlda structure. Note that only
+						 * fetching 1 record is supported
+						 */
 						sqlda_new = ecpg_build_native_sqlda(stmt->lineno, results, i, stmt->compat);
 
 						if (!sqlda_new)
@@ -1599,7 +1611,7 @@ ecpg_execute(struct statement * stmt)
 
 							ecpg_set_native_sqlda(stmt->lineno, _sqlda, results, i, stmt->compat);
 							ecpg_log("ecpg_execute on line %d: putting result (1 tuple %d fields) into sqlda descriptor\n",
-									stmt->lineno, PQnfields(results));
+									 stmt->lineno, PQnfields(results));
 
 							sqlda_new->desc_next = sqlda;
 							sqlda = sqlda_new;
diff --git a/src/interfaces/ecpg/ecpglib/extern.h b/src/interfaces/ecpg/ecpglib/extern.h
index e2e61484c489edff48a2515fb312322f506df4e5..7a5259f793b70e979b2eef2a07d54410ab9e08b8 100644
--- a/src/interfaces/ecpg/ecpglib/extern.h
+++ b/src/interfaces/ecpg/ecpglib/extern.h
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/extern.h,v 1.38 2010/02/04 09:41:34 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/extern.h,v 1.39 2010/02/26 02:01:30 momjian Exp $ */
 
 #ifndef _ECPG_LIB_EXTERN_H
 #define _ECPG_LIB_EXTERN_H
@@ -151,7 +151,7 @@ struct descriptor *ecpggetdescp(int, char *);
 struct descriptor *ecpg_find_desc(int line, const char *name);
 
 struct prepared_statement *ecpg_find_prepared_statement(const char *,
-				  struct connection *, struct prepared_statement **);
+						  struct connection *, struct prepared_statement **);
 
 bool ecpg_store_result(const PGresult *results, int act_field,
 				  const struct statement * stmt, struct variable * var);
diff --git a/src/interfaces/ecpg/ecpglib/misc.c b/src/interfaces/ecpg/ecpglib/misc.c
index 11d69ac04c44d895625ae5de5f15d703db85da42..9fade9ea3c2034c6400a8bf605a23660c897199a 100644
--- a/src/interfaces/ecpg/ecpglib/misc.c
+++ b/src/interfaces/ecpg/ecpglib/misc.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.55 2010/02/02 16:09:11 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.56 2010/02/26 02:01:30 momjian Exp $ */
 
 #define POSTGRES_ECPG_INTERNAL
 #include "postgres_fe.h"
@@ -176,7 +176,8 @@ ECPGtransactionStatus(const char *connection_name)
 	const struct connection *con;
 
 	con = ecpg_get_connection(connection_name);
-	if (con == NULL) {
+	if (con == NULL)
+	{
 		/* transaction status is unknown */
 		return PQTRANS_UNKNOWN;
 	}
@@ -503,15 +504,14 @@ ecpg_gettext(const char *msgid)
 
 	return dgettext(PG_TEXTDOMAIN("ecpg"), msgid);
 }
-
 #endif   /* ENABLE_NLS */
 
 static struct var_list
 {
-	int		number;
+	int			number;
 	void	   *pointer;
 	struct var_list *next;
-} *ivlist = NULL;
+}	*ivlist = NULL;
 
 void
 ECPGset_var(int number, void *pointer, int lineno)
@@ -533,6 +533,7 @@ ECPGset_var(int number, void *pointer, int lineno)
 	if (!ptr)
 	{
 		struct sqlca_t *sqlca = ECPGget_sqlca();
+
 		sqlca->sqlcode = ECPG_OUT_OF_MEMORY;
 		strncpy(sqlca->sqlstate, "YE001", sizeof("YE001"));
 		snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc), "out of memory on line %d", lineno);
@@ -555,5 +556,5 @@ ECPGget_var(int number)
 	struct var_list *ptr;
 
 	for (ptr = ivlist; ptr != NULL && ptr->number != number; ptr = ptr->next);
-		return (ptr) ? ptr->pointer : NULL;
+	return (ptr) ? ptr->pointer : NULL;
 }
diff --git a/src/interfaces/ecpg/ecpglib/prepare.c b/src/interfaces/ecpg/ecpglib/prepare.c
index 621da1a9cfc7ee3220a4ee72384e4625930d0b7b..5c13af285bd195eee5469cb4be3c7d1e0fae6732 100644
--- a/src/interfaces/ecpg/ecpglib/prepare.c
+++ b/src/interfaces/ecpg/ecpglib/prepare.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/prepare.c,v 1.36 2010/01/22 14:19:27 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/prepare.c,v 1.37 2010/02/26 02:01:30 momjian Exp $ */
 
 #define POSTGRES_ECPG_INTERNAL
 #include "postgres_fe.h"
@@ -100,7 +100,7 @@ replace_variables(char **text, int lineno)
 }
 
 static bool
-prepare_common(int lineno, struct connection *con, const bool questionmarks, const char *name, const char *variable)
+prepare_common(int lineno, struct connection * con, const bool questionmarks, const char *name, const char *variable)
 {
 	struct statement *stmt;
 	struct prepared_statement *this;
@@ -414,7 +414,7 @@ ecpg_freeStmtCacheEntry(int lineno, int compat, int entNo)		/* entry # to free *
  */
 static int
 AddStmtToCache(int lineno,		/* line # of statement		*/
-			   const char *stmtID,	/* statement ID				*/
+			   const char *stmtID,		/* statement ID				*/
 			   const char *connection,	/* connection				*/
 			   int compat,		/* compatibility level */
 			   const char *ecpgQuery)	/* query					*/
@@ -491,7 +491,7 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha
 	}
 	else
 	{
-		char	stmtID[STMTID_SIZE];
+		char		stmtID[STMTID_SIZE];
 
 		ecpg_log("ecpg_auto_prepare on line %d: statement not in cache; inserting\n", lineno);
 
diff --git a/src/interfaces/ecpg/ecpglib/sqlda.c b/src/interfaces/ecpg/ecpglib/sqlda.c
index 656bd4bb7b5cff17617c64eb77baedca897deb9a..e06f25e4874e7d9606758f7e9344e93044c5a60f 100644
--- a/src/interfaces/ecpg/ecpglib/sqlda.c
+++ b/src/interfaces/ecpg/ecpglib/sqlda.c
@@ -23,7 +23,7 @@
 /*
  * Compute the next variable's offset with
  * the current variable's size and alignment.
- * 
+ *
  *
  * Returns:
  * - the current variable's offset in *current
@@ -44,9 +44,9 @@ ecpg_sqlda_align_add_size(long offset, int alignment, int size, long *current, l
 static long
 sqlda_compat_empty_size(const PGresult *res)
 {
-	long	offset;
-	int	i;
-	int	sqld = PQnfields(res);
+	long		offset;
+	int			i;
+	int			sqld = PQnfields(res);
 
 	/* Initial size to store main structure and field structures */
 	offset = sizeof(struct sqlda_compat) + sqld * sizeof(struct sqlvar_compat);
@@ -64,14 +64,15 @@ sqlda_compat_empty_size(const PGresult *res)
 static long
 sqlda_common_total_size(const PGresult *res, int row, enum COMPAT_MODE compat, long offset)
 {
-	int	sqld = PQnfields(res);
-	int	i;
-	long	next_offset;
+	int			sqld = PQnfields(res);
+	int			i;
+	long		next_offset;
 
 	/* Add space for the field values */
 	for (i = 0; i < sqld; i++)
 	{
 		enum ECPGttype type = sqlda_dynamic_type(PQftype(res, i), compat);
+
 		switch (type)
 		{
 			case ECPGt_short:
@@ -103,16 +104,17 @@ sqlda_common_total_size(const PGresult *res, int row, enum COMPAT_MODE compat, l
 				ecpg_sqlda_align_add_size(offset, sizeof(int), sizeof(decimal), &offset, &next_offset);
 				break;
 			case ECPGt_numeric:
+
 				/*
-				 * Let's align both the numeric struct and the digits array to int
-				 * Unfortunately we need to do double work here to compute the size
-				 * of the space needed for the numeric structure.
+				 * Let's align both the numeric struct and the digits array to
+				 * int Unfortunately we need to do double work here to compute
+				 * the size of the space needed for the numeric structure.
 				 */
 				ecpg_sqlda_align_add_size(offset, sizeof(int), sizeof(numeric), &offset, &next_offset);
 				if (!PQgetisnull(res, row, i))
 				{
 					char	   *val = PQgetvalue(res, row, i);
-					numeric	   *num;
+					numeric    *num;
 
 					num = PGTYPESnumeric_from_asc(val, NULL);
 					if (!num)
@@ -134,11 +136,12 @@ sqlda_common_total_size(const PGresult *res, int row, enum COMPAT_MODE compat, l
 			case ECPGt_unsigned_char:
 			case ECPGt_string:
 			default:
-			{
-				long	datalen = strlen(PQgetvalue(res, row, i)) + 1;
-				ecpg_sqlda_align_add_size(offset, sizeof(int), datalen, &offset, &next_offset);
-				break;
-			}
+				{
+					long		datalen = strlen(PQgetvalue(res, row, i)) + 1;
+
+					ecpg_sqlda_align_add_size(offset, sizeof(int), datalen, &offset, &next_offset);
+					break;
+				}
 		}
 		offset = next_offset;
 	}
@@ -149,7 +152,7 @@ sqlda_common_total_size(const PGresult *res, int row, enum COMPAT_MODE compat, l
 static long
 sqlda_compat_total_size(const PGresult *res, int row, enum COMPAT_MODE compat)
 {
-	long	offset;
+	long		offset;
 
 	offset = sqlda_compat_empty_size(res);
 
@@ -163,8 +166,8 @@ sqlda_compat_total_size(const PGresult *res, int row, enum COMPAT_MODE compat)
 static long
 sqlda_native_empty_size(const PGresult *res)
 {
-	long	offset;
-	int	sqld = PQnfields(res);
+	long		offset;
+	int			sqld = PQnfields(res);
 
 	/* Initial size to store main structure and field structures */
 	offset = sizeof(struct sqlda_struct) + (sqld - 1) * sizeof(struct sqlvar_struct);
@@ -178,7 +181,7 @@ sqlda_native_empty_size(const PGresult *res)
 static long
 sqlda_native_total_size(const PGresult *res, int row, enum COMPAT_MODE compat)
 {
-	long	offset;
+	long		offset;
 
 	offset = sqlda_native_empty_size(res);
 
@@ -201,22 +204,22 @@ ecpg_build_compat_sqlda(int line, PGresult *res, int row, enum COMPAT_MODE compa
 	struct sqlvar_compat *sqlvar;
 	char	   *fname;
 	long		size;
-	int		sqld;
-	int		i;
+	int			sqld;
+	int			i;
 
 	size = sqlda_compat_total_size(res, row, compat);
-	sqlda = (struct sqlda_compat *)ecpg_alloc(size, line);
+	sqlda = (struct sqlda_compat *) ecpg_alloc(size, line);
 	if (!sqlda)
 		return NULL;
 
 	memset(sqlda, 0, size);
-	sqlvar = (struct sqlvar_compat *)(sqlda + 1);
+	sqlvar = (struct sqlvar_compat *) (sqlda + 1);
 	sqld = PQnfields(res);
-	fname = (char *)(sqlvar + sqld);
+	fname = (char *) (sqlvar + sqld);
 
 	sqlda->sqld = sqld;
 	ecpg_log("ecpg_build_compat_sqlda on line %d sqld = %d\n", line, sqld);
-	sqlda->desc_occ = size; /* cheat here, keep the full allocated size */
+	sqlda->desc_occ = size;		/* cheat here, keep the full allocated size */
 	sqlda->sqlvar = sqlvar;
 
 	for (i = 0; i < sqlda->sqld; i++)
@@ -225,7 +228,7 @@ ecpg_build_compat_sqlda(int line, PGresult *res, int row, enum COMPAT_MODE compa
 		strcpy(fname, PQfname(res, i));
 		sqlda->sqlvar[i].sqlname = fname;
 		fname += strlen(sqlda->sqlvar[i].sqlname) + 1;
-		sqlda->sqlvar[i].sqlformat = (char *)(long)PQfformat(res, i);
+		sqlda->sqlvar[i].sqlformat = (char *) (long) PQfformat(res, i);
 		sqlda->sqlvar[i].sqlxid = PQftype(res, i);
 		sqlda->sqlvar[i].sqltypelen = PQfsize(res, i);
 	}
@@ -236,15 +239,16 @@ ecpg_build_compat_sqlda(int line, PGresult *res, int row, enum COMPAT_MODE compa
 /*
  * Sets values from PGresult.
  */
-static int2	value_is_null = -1;
-static int2	value_is_not_null = 0;
+static int2 value_is_null = -1;
+static int2 value_is_not_null = 0;
 
 void
-ecpg_set_compat_sqlda(int lineno, struct sqlda_compat **_sqlda, const PGresult *res, int row, enum COMPAT_MODE compat)
+ecpg_set_compat_sqlda(int lineno, struct sqlda_compat ** _sqlda, const PGresult *res, int row, enum COMPAT_MODE compat)
 {
 	struct sqlda_compat *sqlda = (*_sqlda);
-	int		i;
-	long		offset, next_offset;
+	int			i;
+	long		offset,
+				next_offset;
 
 	if (row < 0)
 		return;
@@ -257,106 +261,106 @@ ecpg_set_compat_sqlda(int lineno, struct sqlda_compat **_sqlda, const PGresult *
 	 */
 	for (i = 0; i < sqlda->sqld; i++)
 	{
-		int	isnull;
-		int	datalen;
-		bool	set_data = true;
+		int			isnull;
+		int			datalen;
+		bool		set_data = true;
 
 		switch (sqlda->sqlvar[i].sqltype)
 		{
 			case ECPGt_short:
 			case ECPGt_unsigned_short:
 				ecpg_sqlda_align_add_size(offset, sizeof(short), sizeof(short), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(short);
 				break;
 			case ECPGt_int:
 			case ECPGt_unsigned_int:
 				ecpg_sqlda_align_add_size(offset, sizeof(int), sizeof(int), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(int);
 				break;
 			case ECPGt_long:
 			case ECPGt_unsigned_long:
 				ecpg_sqlda_align_add_size(offset, sizeof(long), sizeof(long), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(long);
 				break;
 			case ECPGt_long_long:
 			case ECPGt_unsigned_long_long:
 				ecpg_sqlda_align_add_size(offset, sizeof(long long), sizeof(long long), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(long long);
 				break;
 			case ECPGt_bool:
 				ecpg_sqlda_align_add_size(offset, sizeof(bool), sizeof(bool), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(bool);
 				break;
 			case ECPGt_float:
 				ecpg_sqlda_align_add_size(offset, sizeof(float), sizeof(float), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(float);
 				break;
 			case ECPGt_double:
 				ecpg_sqlda_align_add_size(offset, sizeof(double), sizeof(double), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(double);
 				break;
 			case ECPGt_decimal:
 				ecpg_sqlda_align_add_size(offset, sizeof(int), sizeof(decimal), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(decimal);
 				break;
 			case ECPGt_numeric:
-			{
-				numeric	   *num;
-				char	   *val;
+				{
+					numeric    *num;
+					char	   *val;
 
-				set_data = false;
+					set_data = false;
 
-				ecpg_sqlda_align_add_size(offset, sizeof(int), sizeof(numeric), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
-				sqlda->sqlvar[i].sqllen = sizeof(numeric);
+					ecpg_sqlda_align_add_size(offset, sizeof(int), sizeof(numeric), &offset, &next_offset);
+					sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
+					sqlda->sqlvar[i].sqllen = sizeof(numeric);
 
-				if (PQgetisnull(res, row, i))
-				{
-					ECPGset_noind_null(ECPGt_numeric, sqlda->sqlvar[i].sqldata);
-					break;
-				}
+					if (PQgetisnull(res, row, i))
+					{
+						ECPGset_noind_null(ECPGt_numeric, sqlda->sqlvar[i].sqldata);
+						break;
+					}
 
-				val = PQgetvalue(res, row, i);
-				num = PGTYPESnumeric_from_asc(val, NULL);
-				if (!num)
-				{
-					ECPGset_noind_null(ECPGt_numeric, sqlda->sqlvar[i].sqldata);
-					break;
-				}
+					val = PQgetvalue(res, row, i);
+					num = PGTYPESnumeric_from_asc(val, NULL);
+					if (!num)
+					{
+						ECPGset_noind_null(ECPGt_numeric, sqlda->sqlvar[i].sqldata);
+						break;
+					}
 
-				memcpy(sqlda->sqlvar[i].sqldata, num, sizeof(numeric));
+					memcpy(sqlda->sqlvar[i].sqldata, num, sizeof(numeric));
 
-				ecpg_sqlda_align_add_size(next_offset, sizeof(int), num->ndigits + 1, &offset, &next_offset);
-				memcpy((char *)sqlda + offset, num->buf, num->ndigits + 1);
+					ecpg_sqlda_align_add_size(next_offset, sizeof(int), num->ndigits + 1, &offset, &next_offset);
+					memcpy((char *) sqlda + offset, num->buf, num->ndigits + 1);
 
-				((numeric *)sqlda->sqlvar[i].sqldata)->buf = (NumericDigit *)sqlda + offset;
-				((numeric *)sqlda->sqlvar[i].sqldata)->digits = (NumericDigit *)sqlda + offset + (num->digits - num->buf);
+					((numeric *) sqlda->sqlvar[i].sqldata)->buf = (NumericDigit *) sqlda + offset;
+					((numeric *) sqlda->sqlvar[i].sqldata)->digits = (NumericDigit *) sqlda + offset + (num->digits - num->buf);
 
-				PGTYPESnumeric_free(num);
+					PGTYPESnumeric_free(num);
 
-				break;
-			}
+					break;
+				}
 			case ECPGt_date:
 				ecpg_sqlda_align_add_size(offset, sizeof(date), sizeof(date), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(date);
 				break;
 			case ECPGt_timestamp:
 				ecpg_sqlda_align_add_size(offset, sizeof(timestamp), sizeof(timestamp), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(timestamp);
 				break;
 			case ECPGt_interval:
 				ecpg_sqlda_align_add_size(offset, sizeof(int64), sizeof(interval), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(interval);
 				break;
 			case ECPGt_char:
@@ -365,7 +369,7 @@ ecpg_set_compat_sqlda(int lineno, struct sqlda_compat **_sqlda, const PGresult *
 			default:
 				datalen = strlen(PQgetvalue(res, row, i)) + 1;
 				ecpg_sqlda_align_add_size(offset, sizeof(int), datalen, &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = datalen;
 				if (datalen > 32768)
 					sqlda->sqlvar[i].sqlilongdata = sqlda->sqlvar[i].sqldata;
@@ -381,9 +385,9 @@ ecpg_set_compat_sqlda(int lineno, struct sqlda_compat **_sqlda, const PGresult *
 		{
 			if (set_data)
 				ecpg_get_data(res, row, i, lineno,
-						sqlda->sqlvar[i].sqltype, ECPGt_NO_INDICATOR,
-						sqlda->sqlvar[i].sqldata, NULL, 0, 0, 0,
-						ECPG_ARRAY_NONE, compat, false);
+							  sqlda->sqlvar[i].sqltype, ECPGt_NO_INDICATOR,
+							  sqlda->sqlvar[i].sqldata, NULL, 0, 0, 0,
+							  ECPG_ARRAY_NONE, compat, false);
 		}
 		else
 			ECPGset_noind_null(sqlda->sqlvar[i].sqltype, sqlda->sqlvar[i].sqldata);
@@ -397,10 +401,10 @@ ecpg_build_native_sqlda(int line, PGresult *res, int row, enum COMPAT_MODE compa
 {
 	struct sqlda_struct *sqlda;
 	long		size;
-	int		i;
+	int			i;
 
 	size = sqlda_native_total_size(res, row, compat);
-	sqlda = (struct sqlda_struct *)ecpg_alloc(size, line);
+	sqlda = (struct sqlda_struct *) ecpg_alloc(size, line);
 	if (!sqlda)
 		return NULL;
 
@@ -425,11 +429,12 @@ ecpg_build_native_sqlda(int line, PGresult *res, int row, enum COMPAT_MODE compa
 }
 
 void
-ecpg_set_native_sqlda(int lineno, struct sqlda_struct **_sqlda, const PGresult *res, int row, enum COMPAT_MODE compat)
+ecpg_set_native_sqlda(int lineno, struct sqlda_struct ** _sqlda, const PGresult *res, int row, enum COMPAT_MODE compat)
 {
 	struct sqlda_struct *sqlda = (*_sqlda);
-	int		i;
-	long		offset, next_offset;
+	int			i;
+	long		offset,
+				next_offset;
 
 	if (row < 0)
 		return;
@@ -442,106 +447,106 @@ ecpg_set_native_sqlda(int lineno, struct sqlda_struct **_sqlda, const PGresult *
 	 */
 	for (i = 0; i < sqlda->sqld; i++)
 	{
-		int	isnull;
-		int	datalen;
-		bool	set_data = true;
+		int			isnull;
+		int			datalen;
+		bool		set_data = true;
 
 		switch (sqlda->sqlvar[i].sqltype)
 		{
 			case ECPGt_short:
 			case ECPGt_unsigned_short:
 				ecpg_sqlda_align_add_size(offset, sizeof(short), sizeof(short), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(short);
 				break;
 			case ECPGt_int:
 			case ECPGt_unsigned_int:
 				ecpg_sqlda_align_add_size(offset, sizeof(int), sizeof(int), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(int);
 				break;
 			case ECPGt_long:
 			case ECPGt_unsigned_long:
 				ecpg_sqlda_align_add_size(offset, sizeof(long), sizeof(long), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(long);
 				break;
 			case ECPGt_long_long:
 			case ECPGt_unsigned_long_long:
 				ecpg_sqlda_align_add_size(offset, sizeof(long long), sizeof(long long), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(long long);
 				break;
 			case ECPGt_bool:
 				ecpg_sqlda_align_add_size(offset, sizeof(bool), sizeof(bool), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(bool);
 				break;
 			case ECPGt_float:
 				ecpg_sqlda_align_add_size(offset, sizeof(float), sizeof(float), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(float);
 				break;
 			case ECPGt_double:
 				ecpg_sqlda_align_add_size(offset, sizeof(double), sizeof(double), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(double);
 				break;
 			case ECPGt_decimal:
 				ecpg_sqlda_align_add_size(offset, sizeof(int), sizeof(decimal), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(decimal);
 				break;
 			case ECPGt_numeric:
-			{
-				numeric	   *num;
-				char	   *val;
+				{
+					numeric    *num;
+					char	   *val;
 
-				set_data = false;
+					set_data = false;
 
-				ecpg_sqlda_align_add_size(offset, sizeof(int), sizeof(numeric), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
-				sqlda->sqlvar[i].sqllen = sizeof(numeric);
+					ecpg_sqlda_align_add_size(offset, sizeof(int), sizeof(numeric), &offset, &next_offset);
+					sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
+					sqlda->sqlvar[i].sqllen = sizeof(numeric);
 
-				if (PQgetisnull(res, row, i))
-				{
-					ECPGset_noind_null(ECPGt_numeric, sqlda->sqlvar[i].sqldata);
-					break;
-				}
+					if (PQgetisnull(res, row, i))
+					{
+						ECPGset_noind_null(ECPGt_numeric, sqlda->sqlvar[i].sqldata);
+						break;
+					}
 
-				val = PQgetvalue(res, row, i);
-				num = PGTYPESnumeric_from_asc(val, NULL);
-				if (!num)
-				{
-					ECPGset_noind_null(ECPGt_numeric, sqlda->sqlvar[i].sqldata);
-					break;
-				}
+					val = PQgetvalue(res, row, i);
+					num = PGTYPESnumeric_from_asc(val, NULL);
+					if (!num)
+					{
+						ECPGset_noind_null(ECPGt_numeric, sqlda->sqlvar[i].sqldata);
+						break;
+					}
 
-				memcpy(sqlda->sqlvar[i].sqldata, num, sizeof(numeric));
+					memcpy(sqlda->sqlvar[i].sqldata, num, sizeof(numeric));
 
-				ecpg_sqlda_align_add_size(next_offset, sizeof(int), num->ndigits + 1, &offset, &next_offset);
-				memcpy((char *)sqlda + offset, num->buf, num->ndigits + 1);
+					ecpg_sqlda_align_add_size(next_offset, sizeof(int), num->ndigits + 1, &offset, &next_offset);
+					memcpy((char *) sqlda + offset, num->buf, num->ndigits + 1);
 
-				((numeric *)sqlda->sqlvar[i].sqldata)->buf = (NumericDigit *)sqlda + offset;
-				((numeric *)sqlda->sqlvar[i].sqldata)->digits = (NumericDigit *)sqlda + offset + (num->digits - num->buf);
+					((numeric *) sqlda->sqlvar[i].sqldata)->buf = (NumericDigit *) sqlda + offset;
+					((numeric *) sqlda->sqlvar[i].sqldata)->digits = (NumericDigit *) sqlda + offset + (num->digits - num->buf);
 
-				PGTYPESnumeric_free(num);
+					PGTYPESnumeric_free(num);
 
-				break;
-			}
+					break;
+				}
 			case ECPGt_date:
 				ecpg_sqlda_align_add_size(offset, sizeof(date), sizeof(date), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(date);
 				break;
 			case ECPGt_timestamp:
 				ecpg_sqlda_align_add_size(offset, sizeof(timestamp), sizeof(timestamp), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(timestamp);
 				break;
 			case ECPGt_interval:
 				ecpg_sqlda_align_add_size(offset, sizeof(int64), sizeof(interval), &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = sizeof(interval);
 				break;
 			case ECPGt_char:
@@ -550,7 +555,7 @@ ecpg_set_native_sqlda(int lineno, struct sqlda_struct **_sqlda, const PGresult *
 			default:
 				datalen = strlen(PQgetvalue(res, row, i)) + 1;
 				ecpg_sqlda_align_add_size(offset, sizeof(int), datalen, &offset, &next_offset);
-				sqlda->sqlvar[i].sqldata = (char *)sqlda + offset;
+				sqlda->sqlvar[i].sqldata = (char *) sqlda + offset;
 				sqlda->sqlvar[i].sqllen = datalen;
 				break;
 		}
@@ -562,9 +567,9 @@ ecpg_set_native_sqlda(int lineno, struct sqlda_struct **_sqlda, const PGresult *
 		{
 			if (set_data)
 				ecpg_get_data(res, row, i, lineno,
-						sqlda->sqlvar[i].sqltype, ECPGt_NO_INDICATOR,
-						sqlda->sqlvar[i].sqldata, NULL, 0, 0, 0,
-						ECPG_ARRAY_NONE, compat, false);
+							  sqlda->sqlvar[i].sqltype, ECPGt_NO_INDICATOR,
+							  sqlda->sqlvar[i].sqldata, NULL, 0, 0, 0,
+							  ECPG_ARRAY_NONE, compat, false);
 		}
 
 		offset = next_offset;
diff --git a/src/interfaces/ecpg/ecpglib/typename.c b/src/interfaces/ecpg/ecpglib/typename.c
index ff22d703bfed26eac6b229d2032d5e8713c38cef..02f432347a6f3e9fabb8e8798d056843b1ab8825 100644
--- a/src/interfaces/ecpg/ecpglib/typename.c
+++ b/src/interfaces/ecpg/ecpglib/typename.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/typename.c,v 1.18 2010/01/13 09:06:51 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/typename.c,v 1.19 2010/02/26 02:01:30 momjian Exp $ */
 
 #define POSTGRES_ECPG_INTERNAL
 #include "postgres_fe.h"
@@ -136,7 +136,7 @@ sqlda_dynamic_type(Oid type, enum COMPAT_MODE compat)
 #ifdef HAVE_LONG_INT_64
 			return ECPGt_long;
 #endif
-		/* Unhandled types always return a string */
+			/* Unhandled types always return a string */
 		default:
 			return ECPGt_char;
 	}
diff --git a/src/interfaces/ecpg/include/ecpg_informix.h b/src/interfaces/ecpg/include/ecpg_informix.h
index a97246b1937f3391b364cc12203e0de7fe27dfdd..3be8ebba9c7898e2c5ee81f40917f160e7e618f9 100644
--- a/src/interfaces/ecpg/include/ecpg_informix.h
+++ b/src/interfaces/ecpg/include/ecpg_informix.h
@@ -1,6 +1,6 @@
 /*
  * This file contains stuff needed to be as compatible to Informix as possible.
- * $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpg_informix.h,v 1.23 2009/08/14 13:28:22 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpg_informix.h,v 1.24 2010/02/26 02:01:31 momjian Exp $
  */
 #ifndef _ECPG_INFORMIX_H
 #define _ECPG_INFORMIX_H
@@ -34,7 +34,7 @@ extern		"C"
 #endif
 
 extern int	rdatestr(date, char *);
-extern void 	rtoday(date *);
+extern void rtoday(date *);
 extern int	rjulmdy(date, short *);
 extern int	rdefmtdate(date *, char *, char *);
 extern int	rfmtdate(date, char *, char *);
@@ -49,7 +49,7 @@ extern int	rsetnull(int, char *);
 extern int	rtypalign(int, int);
 extern int	rtypmsize(int, int);
 extern int	rtypwidth(int, int);
-extern void 	rupshift(char *);
+extern void rupshift(char *);
 
 extern int	byleng(char *, int);
 extern void ldchar(char *, int, char *);
diff --git a/src/interfaces/ecpg/include/ecpglib.h b/src/interfaces/ecpg/include/ecpglib.h
index 775fe7a6e250f1d07453dba791093e1bbe03d458..2e1f1d6e246fd49729f8c8ff7570a3e2597aa3b5 100644
--- a/src/interfaces/ecpg/include/ecpglib.h
+++ b/src/interfaces/ecpg/include/ecpglib.h
@@ -1,7 +1,7 @@
 /*
  * this is a small part of c.h since we don't want to leak all postgres
  * definitions into ecpg programs
- * $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpglib.h,v 1.82 2010/01/26 09:07:31 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpglib.h,v 1.83 2010/02/26 02:01:31 momjian Exp $
  */
 
 #ifndef _ECPGLIB_H
@@ -83,7 +83,7 @@ bool		ECPGset_desc(int, const char *, int,...);
 
 void		ECPGset_noind_null(enum ECPGttype, void *);
 bool		ECPGis_noind_null(enum ECPGttype, void *);
-bool		ECPGdescribe(int, int, bool, const char *, const char *, ...);
+bool		ECPGdescribe(int, int, bool, const char *, const char *,...);
 
 void		ECPGset_var(int, void *, int);
 void	   *ECPGget_var(int number);
diff --git a/src/interfaces/ecpg/include/ecpgtype.h b/src/interfaces/ecpg/include/ecpgtype.h
index bd73badd0ff5a257b839cbdf52cf986351d608ea..12bfd135ba7fe10790b19f872c4d60eb5da3773d 100644
--- a/src/interfaces/ecpg/include/ecpgtype.h
+++ b/src/interfaces/ecpg/include/ecpgtype.h
@@ -5,7 +5,7 @@
  * All types that can be handled for host variable declarations has to
  * be handled eventually.
  *
- * $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpgtype.h,v 1.39 2010/01/05 16:38:23 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpgtype.h,v 1.40 2010/02/26 02:01:31 momjian Exp $
  */
 
 /*
@@ -63,7 +63,7 @@ enum ECPGttype
 	ECPGt_EORT,					/* End of result types. */
 	ECPGt_NO_INDICATOR,			/* no indicator */
 	ECPGt_string,				/* trimmed (char *) type */
-	ECPGt_sqlda				/* C struct descriptor */
+	ECPGt_sqlda					/* C struct descriptor */
 };
 
  /* descriptor items */
diff --git a/src/interfaces/ecpg/include/pgtypes_interval.h b/src/interfaces/ecpg/include/pgtypes_interval.h
index 0d6ab38603f9eba7f2601be33852da22c5c70028..6f2225c03a1bfbfde808f07a8dcf388f2c7a964a 100644
--- a/src/interfaces/ecpg/include/pgtypes_interval.h
+++ b/src/interfaces/ecpg/include/pgtypes_interval.h
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/include/pgtypes_interval.h,v 1.15 2010/01/07 04:53:35 tgl Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/include/pgtypes_interval.h,v 1.16 2010/02/26 02:01:31 momjian Exp $ */
 
 #ifndef PGTYPES_INTERVAL
 #define PGTYPES_INTERVAL
@@ -23,7 +23,6 @@ typedef long long int int64;
 #ifdef USE_INTEGER_DATETIMES
 #define HAVE_INT64_TIMESTAMP
 #endif
-
 #endif   /* C_H */
 
 typedef struct
diff --git a/src/interfaces/ecpg/include/sqlda-compat.h b/src/interfaces/ecpg/include/sqlda-compat.h
index e4f56aaadd80f9e79c6de2db2a6c8cad236f8714..2c4e07c5f1b0754de80c5b352366990119fe1a95 100644
--- a/src/interfaces/ecpg/include/sqlda-compat.h
+++ b/src/interfaces/ecpg/include/sqlda-compat.h
@@ -7,41 +7,41 @@
 
 struct sqlvar_compat
 {
-	short	sqltype;		/* variable type                */
-	int	sqllen;			/* length in bytes              */
-	char	   *sqldata;		/* pointer to data              */
-	short	   *sqlind;		/* pointer to indicator         */
-	char	   *sqlname;		/* variable name                */
-	char	   *sqlformat;		/* reserved for future use      */
-	short	sqlitype;		/* ind variable type            */
-	short	sqlilen;		/* ind length in bytes          */
-	char	   *sqlidata;		/* ind data pointer             */
-	int	sqlxid;			/* extended id type             */
-	char	   *sqltypename;	/* extended type name           */
-	short	sqltypelen;		/* length of extended type name */
-	short	sqlownerlen;		/* length of owner name         */
-	short	sqlsourcetype;		/* source type for distinct of built-ins */
-	char	   *sqlownername;	/* owner name                   */
-	int	sqlsourceid;		/* extended id of source type   */
+	short		sqltype;		/* variable type				*/
+	int			sqllen;			/* length in bytes				*/
+	char	   *sqldata;		/* pointer to data				*/
+	short	   *sqlind;			/* pointer to indicator			*/
+	char	   *sqlname;		/* variable name				*/
+	char	   *sqlformat;		/* reserved for future use		*/
+	short		sqlitype;		/* ind variable type			*/
+	short		sqlilen;		/* ind length in bytes			*/
+	char	   *sqlidata;		/* ind data pointer				*/
+	int			sqlxid;			/* extended id type				*/
+	char	   *sqltypename;	/* extended type name			*/
+	short		sqltypelen;		/* length of extended type name */
+	short		sqlownerlen;	/* length of owner name			*/
+	short		sqlsourcetype;	/* source type for distinct of built-ins */
+	char	   *sqlownername;	/* owner name					*/
+	int			sqlsourceid;	/* extended id of source type	*/
 
 	/*
-	 * sqlilongdata is new.  It supports data that exceeds the 32k
-	 * limit.  sqlilen and sqlidata are for backward compatibility
-	 * and they have maximum value of <32K.
+	 * sqlilongdata is new.  It supports data that exceeds the 32k limit.
+	 * sqlilen and sqlidata are for backward compatibility and they have
+	 * maximum value of <32K.
 	 */
-	char	   *sqlilongdata;	/* for data field beyond 32K    */
-	int	sqlflags;		/* for internal use only        */
-	void	   *sqlreserved;	/* reserved for future use      */
+	char	   *sqlilongdata;	/* for data field beyond 32K	*/
+	int			sqlflags;		/* for internal use only		*/
+	void	   *sqlreserved;	/* reserved for future use		*/
 };
 
 struct sqlda_compat
 {
 	short		sqld;
 	struct sqlvar_compat *sqlvar;
-	char		desc_name[19];	/* descriptor name              */
-	short		desc_occ;	/* size of sqlda structure      */
-	struct sqlda_compat *desc_next;	/* pointer to next sqlda struct */
-	void		   *reserved;	/* reserved for future use */
+	char		desc_name[19];	/* descriptor name				*/
+	short		desc_occ;		/* size of sqlda structure		*/
+	struct sqlda_compat *desc_next;		/* pointer to next sqlda struct */
+	void	   *reserved;		/* reserved for future use */
 };
 
-#endif /* ECPG_SQLDA_COMPAT_H */
+#endif   /* ECPG_SQLDA_COMPAT_H */
diff --git a/src/interfaces/ecpg/include/sqlda-native.h b/src/interfaces/ecpg/include/sqlda-native.h
index d8a6669ef4032c4121cea8814eb253afa9277f74..bd870764ead3806f213a79f606a3004b206ba737 100644
--- a/src/interfaces/ecpg/include/sqlda-native.h
+++ b/src/interfaces/ecpg/include/sqlda-native.h
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/src/interfaces/ecpg/include/sqlda-native.h,v 1.2 2010/01/06 15:10:21 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/include/sqlda-native.h,v 1.3 2010/02/26 02:01:31 momjian Exp $
  */
 
 #ifndef ECPG_SQLDA_NATIVE_H
@@ -11,7 +11,7 @@
  * because the length must include a trailing zero byte.
  *
  * This should be at least as much as NAMEDATALEN of the database the
- * applications run against. 
+ * applications run against.
  */
 #define NAMEDATALEN 64
 
@@ -37,7 +37,7 @@ struct sqlda_struct
 	short		sqln;
 	short		sqld;
 	struct sqlda_struct *desc_next;
-	struct sqlvar_struct	sqlvar[1];
+	struct sqlvar_struct sqlvar[1];
 };
 
-#endif /* ECPG_SQLDA_NATIVE_H */
+#endif   /* ECPG_SQLDA_NATIVE_H */
diff --git a/src/interfaces/ecpg/include/sqlda.h b/src/interfaces/ecpg/include/sqlda.h
index b1b4debf905721a6fb794baa5b3ff0f57aa8e800..3f99a463b5d58269e0e9f53afd4a3f4e28698456 100644
--- a/src/interfaces/ecpg/include/sqlda.h
+++ b/src/interfaces/ecpg/include/sqlda.h
@@ -4,15 +4,13 @@
 #ifdef _ECPG_INFORMIX_H
 
 #include "sqlda-compat.h"
-typedef struct sqlvar_compat	sqlvar_t;
-typedef struct sqlda_compat	sqlda_t;
-
+typedef struct sqlvar_compat sqlvar_t;
+typedef struct sqlda_compat sqlda_t;
 #else
 
 #include "sqlda-native.h"
-typedef struct sqlvar_struct	sqlvar_t;
-typedef struct sqlda_struct	sqlda_t;
-
+typedef struct sqlvar_struct sqlvar_t;
+typedef struct sqlda_struct sqlda_t;
 #endif
 
-#endif /* ECPG_SQLDA_H */
+#endif   /* ECPG_SQLDA_H */
diff --git a/src/interfaces/ecpg/include/sqltypes.h b/src/interfaces/ecpg/include/sqltypes.h
index 0c01867d02fcf2a1f7ce045c3c4b0f7a0170ac2a..797cb5b1be4f6514841a334fce9027f77250df95 100644
--- a/src/interfaces/ecpg/include/sqltypes.h
+++ b/src/interfaces/ecpg/include/sqltypes.h
@@ -32,26 +32,26 @@
 /*
  * Values used in sqlda->sqlvar[i]->sqltype
  */
-#define	SQLCHAR		ECPGt_char
-#define	SQLSMINT	ECPGt_short
-#define	SQLINT		ECPGt_int
-#define	SQLFLOAT	ECPGt_double
-#define	SQLSMFLOAT	ECPGt_float
-#define	SQLDECIMAL	ECPGt_decimal
-#define	SQLSERIAL	ECPGt_int
-#define	SQLDATE		ECPGt_date
-#define	SQLDTIME	ECPGt_timestamp
-#define	SQLTEXT		ECPGt_char
-#define	SQLVCHAR	ECPGt_char
-#define SQLINTERVAL     ECPGt_interval
-#define	SQLNCHAR	ECPGt_char
-#define	SQLNVCHAR	ECPGt_char
+#define SQLCHAR		ECPGt_char
+#define SQLSMINT	ECPGt_short
+#define SQLINT		ECPGt_int
+#define SQLFLOAT	ECPGt_double
+#define SQLSMFLOAT	ECPGt_float
+#define SQLDECIMAL	ECPGt_decimal
+#define SQLSERIAL	ECPGt_int
+#define SQLDATE		ECPGt_date
+#define SQLDTIME	ECPGt_timestamp
+#define SQLTEXT		ECPGt_char
+#define SQLVCHAR	ECPGt_char
+#define SQLINTERVAL		ECPGt_interval
+#define SQLNCHAR	ECPGt_char
+#define SQLNVCHAR	ECPGt_char
 #ifdef HAVE_LONG_LONG_INT_64
-#define	SQLINT8		ECPGt_long_long
-#define	SQLSERIAL8	ECPGt_long_long
+#define SQLINT8		ECPGt_long_long
+#define SQLSERIAL8	ECPGt_long_long
 #else
-#define	SQLINT8		ECPGt_long
-#define	SQLSERIAL8	ECPGt_long
+#define SQLINT8		ECPGt_long
+#define SQLSERIAL8	ECPGt_long
 #endif
 
 #endif   /* ndef ECPG_SQLTYPES_H */
diff --git a/src/interfaces/ecpg/preproc/c_keywords.c b/src/interfaces/ecpg/preproc/c_keywords.c
index 36f72b537e99ef761b80ef70b5492cc4eaef72b9..7abd94f4ca23bb65a7f3aba536a504c857262862 100644
--- a/src/interfaces/ecpg/preproc/c_keywords.c
+++ b/src/interfaces/ecpg/preproc/c_keywords.c
@@ -3,7 +3,7 @@
  * c_keywords.c
  *	  lexical token lookup for reserved words in postgres embedded SQL
  *
- * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/c_keywords.c,v 1.24 2009/07/14 20:24:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/c_keywords.c,v 1.25 2010/02/26 02:01:31 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -57,7 +57,7 @@ static const ScanKeyword ScanCKeywords[] = {
 
 
 /*
- * Do a binary search using plain strcmp() comparison.  This is much like
+ * Do a binary search using plain strcmp() comparison.	This is much like
  * ScanKeywordLookup(), except we want case-sensitive matching.
  */
 const ScanKeyword *
diff --git a/src/interfaces/ecpg/preproc/descriptor.c b/src/interfaces/ecpg/preproc/descriptor.c
index 6762aa4e0ea2ada690ae5c88ce83ec41e408f33a..06a8c26037e059e7789ad05462a35a38c41881a8 100644
--- a/src/interfaces/ecpg/preproc/descriptor.c
+++ b/src/interfaces/ecpg/preproc/descriptor.c
@@ -1,7 +1,7 @@
 /*
  * functions needed for descriptor handling
  *
- * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/descriptor.c,v 1.30 2010/01/26 09:07:31 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/descriptor.c,v 1.31 2010/02/26 02:01:31 momjian Exp $
  *
  * since descriptor might be either a string constant or a string var
  * we need to check for a constant if we expect a constant
@@ -344,4 +344,3 @@ sqlda_variable(const char *name)
 
 	return p;
 }
-
diff --git a/src/interfaces/ecpg/preproc/ecpg.c b/src/interfaces/ecpg/preproc/ecpg.c
index 25ba08e519a7d4ae9f744bd8d7f5f7b24eac6273..1c90202b57cb63d1ad60b8a633ba65f0fe2db835 100644
--- a/src/interfaces/ecpg/preproc/ecpg.c
+++ b/src/interfaces/ecpg/preproc/ecpg.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg.c,v 1.113 2010/01/26 09:07:31 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg.c,v 1.114 2010/02/26 02:01:31 momjian Exp $ */
 
 /* Main for ecpg, the PostgreSQL embedded SQL precompiler. */
 /* Copyright (c) 1996-2010, PostgreSQL Global Development Group */
@@ -419,7 +419,10 @@ main(int argc, char *const argv[])
 				/* and structure member lists */
 				memset(struct_member_list, 0, sizeof(struct_member_list));
 
-				/* and our variable counter for out of scope cursors' variables */
+				/*
+				 * and our variable counter for out of scope cursors'
+				 * variables
+				 */
 				ecpg_internal_var = 0;
 
 				/* finally the actual connection */
diff --git a/src/interfaces/ecpg/preproc/type.h b/src/interfaces/ecpg/preproc/type.h
index a36e4518a25617bc00d9f049641aa810fdc54d6f..5cca1816c2c5cfd921f678fe63f40052c11472fe 100644
--- a/src/interfaces/ecpg/preproc/type.h
+++ b/src/interfaces/ecpg/preproc/type.h
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/type.h,v 1.52 2010/01/26 09:07:31 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/type.h,v 1.53 2010/02/26 02:01:31 momjian Exp $
  */
 #ifndef _ECPG_PREPROC_TYPE_H
 #define _ECPG_PREPROC_TYPE_H
@@ -17,7 +17,8 @@ struct ECPGstruct_member
 struct ECPGtype
 {
 	enum ECPGttype type;
-	char	   *type_name;			/* For struct and union types it is the struct name */
+	char	   *type_name;		/* For struct and union types it is the struct
+								 * name */
 	char	   *size;			/* For array it is the number of elements. For
 								 * varchar it is the maxsize of the area. */
 	char	   *struct_sizeof;	/* For a struct this is the sizeof() type as
diff --git a/src/interfaces/ecpg/preproc/variable.c b/src/interfaces/ecpg/preproc/variable.c
index 728bc360dc7d8c718dbe3a27b2474e7072a12325..001accd3284a89954b1efd17138226fcb548ce4e 100644
--- a/src/interfaces/ecpg/preproc/variable.c
+++ b/src/interfaces/ecpg/preproc/variable.c
@@ -1,4 +1,4 @@
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/variable.c,v 1.52 2010/01/26 09:07:31 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/variable.c,v 1.53 2010/02/26 02:01:31 momjian Exp $ */
 
 #include "postgres_fe.h"
 
@@ -404,8 +404,9 @@ add_variable_to_tail(struct arguments ** list, struct variable * var, struct var
 void
 remove_variable_from_list(struct arguments ** list, struct variable * var)
 {
-	struct arguments *p, *prev = NULL;
-	bool found = false;
+	struct arguments *p,
+			   *prev = NULL;
+	bool		found = false;
 
 	for (p = *list; p; p = p->next)
 	{
diff --git a/src/interfaces/ecpg/test/preproc/struct.h b/src/interfaces/ecpg/test/preproc/struct.h
index cc4681b74fefeb6926900efabf3179ec0d209fb7..75e802ac6feed9dde1ebaea509a80dece2399576 100644
--- a/src/interfaces/ecpg/test/preproc/struct.h
+++ b/src/interfaces/ecpg/test/preproc/struct.h
@@ -1,18 +1,20 @@
 
-struct mytype {
-	int	id;
-	char	t[64];
-	double	d1; /* dec_t */
-	double	d2;
-	char	c[30];
+struct mytype
+{
+	int			id;
+	char		t[64];
+	double		d1;				/* dec_t */
+	double		d2;
+	char		c[30];
 };
 typedef struct mytype MYTYPE;
 
-struct mynulltype {
-	int	id;
-	int	t;
-	int	d1;
-	int	d2;
-	int	c;
+struct mynulltype
+{
+	int			id;
+	int			t;
+	int			d1;
+	int			d2;
+	int			c;
 };
 typedef struct mynulltype MYNULLTYPE;
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 89e61a7ec710dcdb1095d26a23e7754c29b70fdd..9302e287822e6007e699842b32f62bfeb65b3916 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.387 2010/02/17 04:19:41 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.388 2010/02/26 02:01:32 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -228,7 +228,7 @@ static const PQconninfoOption PQconninfoOptions[] = {
 #endif
 
 	{"replication", NULL, NULL, NULL,
-	 "Replication", "D", 5},
+	"Replication", "D", 5},
 
 	/* Terminating entry --- MUST BE LAST */
 	{NULL, NULL, NULL, NULL,
@@ -268,8 +268,8 @@ static void closePGconn(PGconn *conn);
 static PQconninfoOption *conninfo_parse(const char *conninfo,
 			   PQExpBuffer errorMessage, bool use_defaults);
 static PQconninfoOption *conninfo_array_parse(const char **keywords,
-				const char **values, PQExpBuffer errorMessage,
-				bool use_defaults, int expand_dbname);
+					 const char **values, PQExpBuffer errorMessage,
+					 bool use_defaults, int expand_dbname);
 static char *conninfo_getval(PQconninfoOption *connOptions,
 				const char *keyword);
 static void defaultNoticeReceiver(void *arg, const PGresult *res);
@@ -277,10 +277,10 @@ static void defaultNoticeProcessor(void *arg, const char *message);
 static int parseServiceInfo(PQconninfoOption *options,
 				 PQExpBuffer errorMessage);
 static int parseServiceFile(const char *serviceFile,
-							const char *service,
-							PQconninfoOption *options,
-							PQExpBuffer errorMessage,
-							bool *group_found);
+				 const char *service,
+				 PQconninfoOption *options,
+				 PQExpBuffer errorMessage,
+				 bool *group_found);
 static char *pwdfMatchesString(char *buf, char *token);
 static char *PasswordFromFile(char *hostname, char *port, char *dbname,
 				 char *username);
@@ -306,7 +306,7 @@ pgthreadlock_t pg_g_threadlock = default_threadlock;
  * terminated arrays instead.
  *
  * To connect in an asynchronous (non-blocking) manner, use the functions
- * PQconnectStart or PQconnectStartParams (which differ in the same way as 
+ * PQconnectStart or PQconnectStartParams (which differ in the same way as
  * PQconnectdb and PQconnectdbParams) and PQconnectPoll.
  *
  * Internally, the static functions connectDBStart, connectDBComplete
@@ -406,8 +406,8 @@ PQconnectStartParams(const char **keywords,
 					 const char **values,
 					 int expand_dbname)
 {
-	PGconn			   *conn;
-	PQconninfoOption   *connOptions;
+	PGconn	   *conn;
+	PQconninfoOption *connOptions;
 
 	/*
 	 * Allocate memory for the conn structure
@@ -432,7 +432,7 @@ PQconnectStartParams(const char **keywords,
 	/*
 	 * Move option values into conn structure
 	 */
-    fillPGconn(conn, connOptions);
+	fillPGconn(conn, connOptions);
 
 	/*
 	 * Free the option info - all is in conn now
@@ -609,7 +609,7 @@ connectOptions1(PGconn *conn, const char *conninfo)
 	/*
 	 * Move option values into conn structure
 	 */
-    fillPGconn(conn, connOptions);
+	fillPGconn(conn, connOptions);
 
 	/*
 	 * Free the option info - all is in conn now
@@ -1326,9 +1326,9 @@ keep_going:						/* We will come back to here until there is
 					 * We have three methods of blocking SIGPIPE during
 					 * send() calls to this socket:
 					 *
-					 *  - setsockopt(sock, SO_NOSIGPIPE)
-					 *  - send(sock, ..., MSG_NOSIGNAL)
-					 *  - setting the signal mask to SIG_IGN during send()
+					 *	- setsockopt(sock, SO_NOSIGPIPE)
+					 *	- send(sock, ..., MSG_NOSIGNAL)
+					 *	- setting the signal mask to SIG_IGN during send()
 					 *
 					 * The third method requires three syscalls per send,
 					 * so we prefer either of the first two, but they are
@@ -1350,7 +1350,7 @@ keep_going:						/* We will come back to here until there is
 					conn->sigpipe_flag = true;
 #else
 					conn->sigpipe_flag = false;
-#endif /* MSG_NOSIGNAL */
+#endif   /* MSG_NOSIGNAL */
 
 #ifdef SO_NOSIGPIPE
 					optval = 1;
@@ -1360,7 +1360,7 @@ keep_going:						/* We will come back to here until there is
 						conn->sigpipe_so = true;
 						conn->sigpipe_flag = false;
 					}
-#endif /* SO_NOSIGPIPE */
+#endif   /* SO_NOSIGPIPE */
 
 					/*
 					 * Start/make connection.  This should not block, since we
@@ -2034,7 +2034,7 @@ keep_going:						/* We will come back to here until there is
 						/*
 						 * If we tried to send application_name, check to see
 						 * if the error is about that --- pre-9.0 servers will
-						 * reject it at this stage of the process.  If so,
+						 * reject it at this stage of the process.	If so,
 						 * close the connection and retry without sending
 						 * application_name.  We could possibly get a false
 						 * SQLSTATE match here and retry uselessly, but there
@@ -2124,7 +2124,7 @@ keep_going:						/* We will come back to here until there is
 		default:
 			appendPQExpBuffer(&conn->errorMessage,
 							  libpq_gettext("invalid connection state %d, "
-								 "probably indicative of memory corruption\n"),
+							   "probably indicative of memory corruption\n"),
 							  conn->status);
 			goto error_return;
 	}
@@ -3266,6 +3266,7 @@ parseServiceInfo(PQconninfoOption *options, PQExpBuffer errorMessage)
 		return status;
 
 next_file:
+
 	/*
 	 * This could be used by any application so we can't use the binary
 	 * location to find our config files.
@@ -3284,7 +3285,7 @@ last_file:
 	if (!group_found)
 	{
 		printfPQExpBuffer(errorMessage,
-						  libpq_gettext("definition of service \"%s\" not found\n"), service);
+		 libpq_gettext("definition of service \"%s\" not found\n"), service);
 		return 3;
 	}
 
@@ -3297,7 +3298,7 @@ parseServiceFile(const char *serviceFile,
 				 PQconninfoOption *options,
 				 PQExpBuffer errorMessage,
 				 bool *group_found)
-{	
+{
 	int			linenr = 0,
 				i;
 	FILE	   *f;
@@ -3320,7 +3321,7 @@ parseServiceFile(const char *serviceFile,
 		{
 			fclose(f);
 			printfPQExpBuffer(errorMessage,
-							  libpq_gettext("line %d too long in service file \"%s\"\n"),
+				  libpq_gettext("line %d too long in service file \"%s\"\n"),
 							  linenr,
 							  serviceFile);
 			return 2;
@@ -3359,8 +3360,7 @@ parseServiceFile(const char *serviceFile,
 			if (*group_found)
 			{
 				/*
-				 * Finally, we are in the right group and can parse
-				 * the line
+				 * Finally, we are in the right group and can parse the line
 				 */
 				char	   *key,
 						   *val;
@@ -3745,20 +3745,20 @@ conninfo_array_parse(const char **keywords, const char **values,
 					 PQExpBuffer errorMessage, bool use_defaults,
 					 int expand_dbname)
 {
-	char			   *tmp;
-	PQconninfoOption   *options;
-	PQconninfoOption   *str_options = NULL;
-	PQconninfoOption   *option;
-	int					i = 0;
+	char	   *tmp;
+	PQconninfoOption *options;
+	PQconninfoOption *str_options = NULL;
+	PQconninfoOption *option;
+	int			i = 0;
 
 	/*
-	 * If expand_dbname is non-zero, check keyword "dbname"
-	 * to see if val is actually a conninfo string
+	 * If expand_dbname is non-zero, check keyword "dbname" to see if val is
+	 * actually a conninfo string
 	 */
-	while(expand_dbname && keywords[i])
+	while (expand_dbname && keywords[i])
 	{
 		const char *pname = keywords[i];
-		const char *pvalue  = values[i];
+		const char *pvalue = values[i];
 
 		/* first find "dbname" if any */
 		if (strcmp(pname, "dbname") == 0)
@@ -3767,10 +3767,9 @@ conninfo_array_parse(const char **keywords, const char **values,
 			if (pvalue && strchr(pvalue, '='))
 			{
 				/*
-				 * Must be a conninfo string, so parse it, but do not
-				 * use defaults here -- those get picked up later.
-				 * We only want to override for those parameters actually
-				 * passed.
+				 * Must be a conninfo string, so parse it, but do not use
+				 * defaults here -- those get picked up later. We only want to
+				 * override for those parameters actually passed.
 				 */
 				str_options = conninfo_parse(pvalue, errorMessage, false);
 				if (str_options == NULL)
@@ -3793,10 +3792,10 @@ conninfo_array_parse(const char **keywords, const char **values,
 
 	i = 0;
 	/* Parse the keywords/values arrays */
-	while(keywords[i])
+	while (keywords[i])
 	{
 		const char *pname = keywords[i];
-		const char *pvalue  = values[i];
+		const char *pvalue = values[i];
 
 		if (pvalue != NULL)
 		{
@@ -3811,7 +3810,7 @@ conninfo_array_parse(const char **keywords, const char **values,
 			if (option->keyword == NULL)
 			{
 				printfPQExpBuffer(errorMessage,
-							 libpq_gettext("invalid connection option \"%s\"\n"),
+						 libpq_gettext("invalid connection option \"%s\"\n"),
 								  pname);
 				PQconninfoFree(options);
 				return NULL;
@@ -3819,8 +3818,8 @@ conninfo_array_parse(const char **keywords, const char **values,
 
 			/*
 			 * If we are on the dbname parameter, and we have a parsed
-			 * conninfo string, copy those parameters across, overriding
-			 * any existing previous settings
+			 * conninfo string, copy those parameters across, overriding any
+			 * existing previous settings
 			 */
 			if (strcmp(pname, "dbname") == 0 && str_options)
 			{
diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c
index bded5aedd9d6c4de870dbec476f86f5628d8e2aa..b20587f0e482cc18a1f04d9482364957e15fb960 100644
--- a/src/interfaces/libpq/fe-exec.c
+++ b/src/interfaces/libpq/fe-exec.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/interfaces/libpq/fe-exec.c,v 1.210 2010/02/17 04:19:41 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/interfaces/libpq/fe-exec.c,v 1.211 2010/02/26 02:01:32 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -3070,13 +3070,13 @@ static char *
 PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident)
 {
 	const char *s;
-	char   *result;
-	char   *rp;
-	int		num_quotes = 0;		/* single or double, depending on as_ident */
-	int		num_backslashes = 0;
-	int		input_len;
-	int		result_size;
-	char	quote_char = as_ident ? '"' : '\'';
+	char	   *result;
+	char	   *rp;
+	int			num_quotes = 0; /* single or double, depending on as_ident */
+	int			num_backslashes = 0;
+	int			input_len;
+	int			result_size;
+	char		quote_char = as_ident ? '"' : '\'';
 
 	/* We must have a connection, else fail immediately. */
 	if (!conn)
@@ -3091,7 +3091,7 @@ PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident)
 			++num_backslashes;
 		else if (IS_HIGHBIT_SET(*s))
 		{
-			int charlen;
+			int			charlen;
 
 			/* Slow path for possible multibyte characters */
 			charlen = pg_encoding_mblen(conn->client_encoding, s);
@@ -3111,7 +3111,7 @@ PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident)
 
 	/* Allocate output buffer. */
 	input_len = s - str;
-	result_size = input_len + num_quotes + 3;  /* two quotes, plus a NUL */
+	result_size = input_len + num_quotes + 3;	/* two quotes, plus a NUL */
 	if (!as_ident && num_backslashes > 0)
 		result_size += num_backslashes + 2;
 	result = rp = (char *) malloc(result_size);
@@ -3125,7 +3125,7 @@ PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident)
 	/*
 	 * If we are escaping a literal that contains backslashes, we use the
 	 * escape string syntax so that the result is correct under either value
-	 * of standard_conforming_strings.  We also emit a leading space in this
+	 * of standard_conforming_strings.	We also emit a leading space in this
 	 * case, to guard against the possibility that the result might be
 	 * interpolated immediately following an identifier.
 	 */
@@ -3143,8 +3143,8 @@ PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident)
 	 *
 	 * We've already verified that the input string is well-formed in the
 	 * current encoding.  If it contains no quotes and, in the case of
-	 * literal-escaping, no backslashes, then we can just copy it directly
-	 * to the output buffer, adding the necessary quotes.
+	 * literal-escaping, no backslashes, then we can just copy it directly to
+	 * the output buffer, adding the necessary quotes.
 	 *
 	 * If not, we must rescan the input and process each character
 	 * individually.
@@ -3167,13 +3167,14 @@ PQescapeInternal(PGconn *conn, const char *str, size_t len, bool as_ident)
 				*rp++ = *s;
 			else
 			{
-				int i = pg_encoding_mblen(conn->client_encoding, s);
+				int			i = pg_encoding_mblen(conn->client_encoding, s);
+
 				while (1)
 				{
 					*rp++ = *s;
 					if (--i == 0)
 						break;
-					++s;	/* for loop will provide the final increment */
+					++s;		/* for loop will provide the final increment */
 				}
 			}
 		}
@@ -3391,9 +3392,9 @@ PQunescapeBytea(const unsigned char *strtext, size_t *retbuflen)
 	if (strtext[0] == '\\' && strtext[1] == 'x')
 	{
 		const unsigned char *s;
-		unsigned char	*p;
+		unsigned char *p;
 
-		buflen = (strtextlen - 2)/2;
+		buflen = (strtextlen - 2) / 2;
 		/* Avoid unportable malloc(0) */
 		buffer = (unsigned char *) malloc(buflen > 0 ? buflen : 1);
 		if (buffer == NULL)
@@ -3403,8 +3404,8 @@ PQunescapeBytea(const unsigned char *strtext, size_t *retbuflen)
 		p = buffer;
 		while (*s)
 		{
-			char	v1,
-					v2;
+			char		v1,
+						v2;
 
 			/*
 			 * Bad input is silently ignored.  Note that this includes
@@ -3422,52 +3423,52 @@ PQunescapeBytea(const unsigned char *strtext, size_t *retbuflen)
 	}
 	else
 	{
-	/*
-	 * Length of input is max length of output, but add one to avoid
-	 * unportable malloc(0) if input is zero-length.
-	 */
-	buffer = (unsigned char *) malloc(strtextlen + 1);
-	if (buffer == NULL)
-		return NULL;
+		/*
+		 * Length of input is max length of output, but add one to avoid
+		 * unportable malloc(0) if input is zero-length.
+		 */
+		buffer = (unsigned char *) malloc(strtextlen + 1);
+		if (buffer == NULL)
+			return NULL;
 
-	for (i = j = 0; i < strtextlen;)
-	{
-		switch (strtext[i])
+		for (i = j = 0; i < strtextlen;)
 		{
-			case '\\':
-				i++;
-				if (strtext[i] == '\\')
-					buffer[j++] = strtext[i++];
-				else
-				{
-					if ((ISFIRSTOCTDIGIT(strtext[i])) &&
-						(ISOCTDIGIT(strtext[i + 1])) &&
-						(ISOCTDIGIT(strtext[i + 2])))
+			switch (strtext[i])
+			{
+				case '\\':
+					i++;
+					if (strtext[i] == '\\')
+						buffer[j++] = strtext[i++];
+					else
 					{
-						int byte;
-
-						byte = OCTVAL(strtext[i++]);
-						byte = (byte <<3) +OCTVAL(strtext[i++]);
-						byte = (byte <<3) +OCTVAL(strtext[i++]);
-						buffer[j++] = byte;
+						if ((ISFIRSTOCTDIGIT(strtext[i])) &&
+							(ISOCTDIGIT(strtext[i + 1])) &&
+							(ISOCTDIGIT(strtext[i + 2])))
+						{
+							int byte;
+
+							byte = OCTVAL(strtext[i++]);
+							byte = (byte <<3) +OCTVAL(strtext[i++]);
+							byte = (byte <<3) +OCTVAL(strtext[i++]);
+							buffer[j++] = byte;
+						}
 					}
-				}
 
-				/*
-				 * Note: if we see '\' followed by something that isn't a
-				 * recognized escape sequence, we loop around having done
-				 * nothing except advance i.  Therefore the something will be
-				 * emitted as ordinary data on the next cycle. Corner case:
-				 * '\' at end of string will just be discarded.
-				 */
-				break;
+					/*
+					 * Note: if we see '\' followed by something that isn't a
+					 * recognized escape sequence, we loop around having done
+					 * nothing except advance i.  Therefore the something will
+					 * be emitted as ordinary data on the next cycle. Corner
+					 * case: '\' at end of string will just be discarded.
+					 */
+					break;
 
-			default:
-				buffer[j++] = strtext[i++];
-				break;
+				default:
+					buffer[j++] = strtext[i++];
+					break;
+			}
 		}
-	}
-	buflen = j;					/* buflen is the length of the dequoted data */
+		buflen = j;				/* buflen is the length of the dequoted data */
 	}
 
 	/* Shrink the buffer to be no larger than necessary */
diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c
index 25511bea1d6729d6c9eae05f2ee46d3519fb3324..9ffcfff679829bc8fc0c5e79aecebc66d0d3a51b 100644
--- a/src/interfaces/libpq/fe-secure.c
+++ b/src/interfaces/libpq/fe-secure.c
@@ -11,7 +11,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.131 2010/01/02 16:58:12 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.132 2010/02/26 02:01:33 momjian Exp $
  *
  * NOTES
  *
@@ -159,8 +159,7 @@ struct sigpipe_info
 			pq_reset_sigpipe(&(spinfo).oldsigmask, (spinfo).sigpipe_pending, \
 							 (spinfo).got_epipe); \
 	} while (0)
-
-#else /* !ENABLE_THREAD_SAFETY */
+#else							/* !ENABLE_THREAD_SAFETY */
 
 #define DECLARE_SIGPIPE_INFO(spinfo) pqsigfunc spinfo = NULL
 
@@ -177,17 +176,14 @@ struct sigpipe_info
 		if (!SIGPIPE_MASKED(conn)) \
 			pqsignal(SIGPIPE, spinfo); \
 	} while (0)
-
-#endif	/* ENABLE_THREAD_SAFETY */
-
-#else	/* WIN32 */
+#endif   /* ENABLE_THREAD_SAFETY */
+#else							/* WIN32 */
 
 #define DECLARE_SIGPIPE_INFO(spinfo)
 #define DISABLE_SIGPIPE(conn, spinfo, failaction)
 #define REMEMBER_EPIPE(spinfo, cond)
 #define RESTORE_SIGPIPE(conn, spinfo)
-
-#endif	/* WIN32 */
+#endif   /* WIN32 */
 
 /* ------------------------------------------------------------ */
 /*			 Procedures common to all secure sessions			*/
@@ -318,6 +314,7 @@ pqsecure_read(PGconn *conn, void *ptr, size_t len)
 	if (conn->ssl)
 	{
 		int			err;
+
 		DECLARE_SIGPIPE_INFO(spinfo);
 
 		/* SSL_read can write to the socket, so we need to disable SIGPIPE */
@@ -401,6 +398,7 @@ ssize_t
 pqsecure_write(PGconn *conn, const void *ptr, size_t len)
 {
 	ssize_t		n;
+
 	DECLARE_SIGPIPE_INFO(spinfo);
 
 #ifdef USE_SSL
@@ -473,15 +471,14 @@ pqsecure_write(PGconn *conn, const void *ptr, size_t len)
 	else
 #endif
 	{
-		int		flags = 0;
+		int			flags = 0;
 
 #ifdef MSG_NOSIGNAL
 		if (conn->sigpipe_flag)
 			flags |= MSG_NOSIGNAL;
 
 retry_masked:
-
-#endif /* MSG_NOSIGNAL */
+#endif   /* MSG_NOSIGNAL */
 
 		DISABLE_SIGPIPE(conn, spinfo, return -1);
 
@@ -501,7 +498,7 @@ retry_masked:
 				flags = 0;
 				goto retry_masked;
 			}
-#endif /* MSG_NOSIGNAL */
+#endif   /* MSG_NOSIGNAL */
 
 			REMEMBER_EPIPE(spinfo, SOCK_ERRNO == EPIPE);
 		}
@@ -764,7 +761,7 @@ client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
 				char	   *err = SSLerrmessage();
 
 				printfPQExpBuffer(&conn->errorMessage,
-					 libpq_gettext("could not initialize SSL engine \"%s\": %s\n"),
+				libpq_gettext("could not initialize SSL engine \"%s\": %s\n"),
 								  engine_str, err);
 				SSLerrfree(err);
 				ENGINE_free(conn->engine);
@@ -1268,8 +1265,8 @@ open_client_SSL(PGconn *conn)
 	conn->peer_dn[sizeof(conn->peer_dn) - 1] = '\0';
 
 	r = X509_NAME_get_text_by_NID(X509_get_subject_name(conn->peer),
-							  NID_commonName, conn->peer_cn, SM_USER);
-	conn->peer_cn[SM_USER] = '\0'; /* buffer is SM_USER+1 chars! */
+								  NID_commonName, conn->peer_cn, SM_USER);
+	conn->peer_cn[SM_USER] = '\0';		/* buffer is SM_USER+1 chars! */
 	if (r == -1)
 	{
 		/* Unable to get the CN, set it to blank so it can't be used */
@@ -1278,8 +1275,8 @@ open_client_SSL(PGconn *conn)
 	else
 	{
 		/*
-		 * Reject embedded NULLs in certificate common name to prevent attacks like
-		 * CVE-2009-4034.
+		 * Reject embedded NULLs in certificate common name to prevent attacks
+		 * like CVE-2009-4034.
 		 */
 		if (r != strlen(conn->peer_cn))
 		{
diff --git a/src/interfaces/libpq/libpq-fe.h b/src/interfaces/libpq/libpq-fe.h
index c2698fe257e1c2667c31cecf44b3cd24a1cf2fe1..f32b2d3d5b58c4e9419166681a1c662093eacfdd 100644
--- a/src/interfaces/libpq/libpq-fe.h
+++ b/src/interfaces/libpq/libpq-fe.h
@@ -7,7 +7,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-fe.h,v 1.151 2010/02/05 03:09:05 joe Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-fe.h,v 1.152 2010/02/26 02:01:33 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -227,13 +227,13 @@ typedef struct pgresAttDesc
 /* Asynchronous (non-blocking) */
 extern PGconn *PQconnectStart(const char *conninfo);
 extern PGconn *PQconnectStartParams(const char **keywords,
-			 const char **values, int expand_dbname);
+					 const char **values, int expand_dbname);
 extern PostgresPollingStatusType PQconnectPoll(PGconn *conn);
 
 /* Synchronous (blocking) */
 extern PGconn *PQconnectdb(const char *conninfo);
 extern PGconn *PQconnectdbParams(const char **keywords,
-			 const char **values, int expand_dbname);
+				  const char **values, int expand_dbname);
 extern PGconn *PQsetdbLogin(const char *pghost, const char *pgport,
 			 const char *pgoptions, const char *pgtty,
 			 const char *dbName,
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
index b19e52663491a437780ea55ace196e205eefe0dc..56ee13dbf6d756e5261ebe7c4d518e61786c486e 100644
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -12,7 +12,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.148 2010/01/15 09:19:10 heikki Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.149 2010/02/26 02:01:33 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -80,8 +80,7 @@ typedef struct
 #if (SSLEAY_VERSION_NUMBER >= 0x00907000L) && !defined(OPENSSL_NO_ENGINE)
 #define USE_SSL_ENGINE
 #endif
-
-#endif /* USE_SSL */
+#endif   /* USE_SSL */
 
 /*
  * POSTGRES backend dependent Constants.
@@ -397,10 +396,10 @@ struct pg_conn
 #ifdef USE_SSL_ENGINE
 	ENGINE	   *engine;			/* SSL engine, if any */
 #else
-	void	   *engine;			/* dummy field to keep struct the same
-								   if OpenSSL version changes */
+	void	   *engine;			/* dummy field to keep struct the same if
+								 * OpenSSL version changes */
 #endif
-#endif /* USE_SSL */
+#endif   /* USE_SSL */
 
 #ifdef ENABLE_GSS
 	gss_ctx_id_t gctx;			/* GSS context */
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 31ff7057a0944f9663c3956c892acee728e54105..449b283462e51ea72b187e95da83ca888528cafb 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -1,7 +1,7 @@
 /**********************************************************************
  * plperl.c - perl as a procedural language for PostgreSQL
  *
- *	  $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.168 2010/02/16 21:39:52 adunstan Exp $
+ *	  $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.169 2010/02/26 02:01:33 momjian Exp $
  *
  **********************************************************************/
 
@@ -133,7 +133,7 @@ static InterpState interp_state = INTERP_NONE;
 static PerlInterpreter *plperl_trusted_interp = NULL;
 static PerlInterpreter *plperl_untrusted_interp = NULL;
 static PerlInterpreter *plperl_held_interp = NULL;
-static OP *(*pp_require_orig)(pTHX) = NULL;
+static OP  *(*pp_require_orig) (pTHX) = NULL;
 static bool trusted_context;
 static HTAB *plperl_proc_hash = NULL;
 static HTAB *plperl_query_hash = NULL;
@@ -178,8 +178,8 @@ static void plperl_compile_callback(void *arg);
 static void plperl_exec_callback(void *arg);
 static void plperl_inline_callback(void *arg);
 static char *strip_trailing_ws(const char *msg);
-static OP * pp_require_safe(pTHX);
-static int restore_context(bool);
+static OP  *pp_require_safe(pTHX);
+static int	restore_context(bool);
 
 /*
  * Convert an SV to char * and verify the encoding via pg_verifymbstr()
@@ -187,15 +187,15 @@ static int restore_context(bool);
 static inline char *
 sv2text_mbverified(SV *sv)
 {
-	char * val;
-	STRLEN len;
-
-	/* The value returned here might include an
-	 * embedded nul byte, because perl allows such things.
-	 * That's OK, because pg_verifymbstr will choke on it,  If
-	 * we just used strlen() instead of getting perl's idea of
-	 * the length, whatever uses the "verified" value might
-	 * get something quite weird.
+	char	   *val;
+	STRLEN		len;
+
+	/*
+	 * The value returned here might include an embedded nul byte, because
+	 * perl allows such things. That's OK, because pg_verifymbstr will choke
+	 * on it,  If we just used strlen() instead of getting perl's idea of the
+	 * length, whatever uses the "verified" value might get something quite
+	 * weird.
 	 */
 	val = SvPV(sv, len);
 	pg_verifymbstr(val, len, false);
@@ -246,36 +246,37 @@ _PG_init(void)
 							 NULL, NULL);
 
 	DefineCustomStringVariable("plperl.on_init",
-							gettext_noop("Perl initialization code to execute when a perl interpreter is initialized."),
-							NULL,
-							&plperl_on_init,
-							NULL,
-							PGC_SIGHUP, 0,
-							NULL, NULL);
+							   gettext_noop("Perl initialization code to execute when a perl interpreter is initialized."),
+							   NULL,
+							   &plperl_on_init,
+							   NULL,
+							   PGC_SIGHUP, 0,
+							   NULL, NULL);
 
 	/*
-	 * plperl.on_plperl_init is currently PGC_SUSET to avoid issues whereby a user
-	 * who doesn't have USAGE privileges on the plperl language could possibly use
-	 * SET plperl.on_plperl_init='...' to influence the behaviour of any existing
-	 * plperl function that they can EXECUTE (which may be security definer).
-	 * Set http://archives.postgresql.org/pgsql-hackers/2010-02/msg00281.php
-	 * and the overall thread.
+	 * plperl.on_plperl_init is currently PGC_SUSET to avoid issues whereby a
+	 * user who doesn't have USAGE privileges on the plperl language could
+	 * possibly use SET plperl.on_plperl_init='...' to influence the behaviour
+	 * of any existing plperl function that they can EXECUTE (which may be
+	 * security definer). Set
+	 * http://archives.postgresql.org/pgsql-hackers/2010-02/msg00281.php and
+	 * the overall thread.
 	 */
 	DefineCustomStringVariable("plperl.on_plperl_init",
-							gettext_noop("Perl initialization code to execute once when plperl is first used."),
-							NULL,
-							&plperl_on_plperl_init,
-							NULL,
-							PGC_SUSET, 0,
-							NULL, NULL);
+							   gettext_noop("Perl initialization code to execute once when plperl is first used."),
+							   NULL,
+							   &plperl_on_plperl_init,
+							   NULL,
+							   PGC_SUSET, 0,
+							   NULL, NULL);
 
 	DefineCustomStringVariable("plperl.on_plperlu_init",
-							gettext_noop("Perl initialization code to execute once when plperlu is first used."),
-							NULL,
-							&plperl_on_plperlu_init,
-							NULL,
-							PGC_SUSET, 0,
-							NULL, NULL);
+							   gettext_noop("Perl initialization code to execute once when plperlu is first used."),
+							   NULL,
+							   &plperl_on_plperlu_init,
+							   NULL,
+							   PGC_SUSET, 0,
+							   NULL, NULL);
 
 	EmitWarningsOnPlaceholders("plperl");
 
@@ -312,16 +313,16 @@ plperl_fini(int code, Datum arg)
 	elog(DEBUG3, "plperl_fini");
 
 	/*
-	 * Indicate that perl is terminating.
-	 * Disables use of spi_* functions when running END/DESTROY code.
-	 * See check_spi_usage_allowed().
-	 * Could be enabled in future, with care, using a transaction
+	 * Indicate that perl is terminating. Disables use of spi_* functions when
+	 * running END/DESTROY code. See check_spi_usage_allowed(). Could be
+	 * enabled in future, with care, using a transaction
 	 * http://archives.postgresql.org/pgsql-hackers/2010-01/msg02743.php
 	 */
 	plperl_ending = true;
 
 	/* Only perform perl cleanup if we're exiting cleanly */
-	if (code) {
+	if (code)
+	{
 		elog(DEBUG3, "plperl_fini: skipped");
 		return;
 	}
@@ -386,11 +387,14 @@ select_perl_context(bool trusted)
 	{
 #ifdef MULTIPLICITY
 		PerlInterpreter *plperl = plperl_init_interp();
-		if (trusted) {
+
+		if (trusted)
+		{
 			plperl_trusted_init();
 			plperl_trusted_interp = plperl;
 		}
-		else {
+		else
+		{
 			plperl_untrusted_init();
 			plperl_untrusted_interp = plperl;
 		}
@@ -404,20 +408,21 @@ select_perl_context(bool trusted)
 	trusted_context = trusted;
 
 	/*
-	 * Since the timing of first use of PL/Perl can't be predicted,
-	 * any database interaction during initialization is problematic.
-	 * Including, but not limited to, security definer issues.
-	 * So we only enable access to the database AFTER on_*_init code has run.
-	 * See http://archives.postgresql.org/message-id/20100127143318.GE713@timac.local
+	 * Since the timing of first use of PL/Perl can't be predicted, any
+	 * database interaction during initialization is problematic. Including,
+	 * but not limited to, security definer issues. So we only enable access
+	 * to the database AFTER on_*_init code has run. See
+	 * http://archives.postgresql.org/message-id/20100127143318.GE713@timac.loc
+	 * al
 	 */
 	newXS("PostgreSQL::InServer::SPI::bootstrap",
-		boot_PostgreSQL__InServer__SPI, __FILE__);
+		  boot_PostgreSQL__InServer__SPI, __FILE__);
 
 	eval_pv("PostgreSQL::InServer::SPI::bootstrap()", FALSE);
 	if (SvTRUE(ERRSV))
 		ereport(ERROR,
 				(errmsg("%s", strip_trailing_ws(SvPV_nolen(ERRSV))),
-				 errdetail("While executing PostgreSQL::InServer::SPI::bootstrap.")));
+		errdetail("While executing PostgreSQL::InServer::SPI::bootstrap.")));
 }
 
 /*
@@ -427,34 +432,37 @@ static int
 restore_context(bool trusted)
 {
 	if (interp_state == INTERP_BOTH ||
-		( trusted && interp_state == INTERP_TRUSTED) ||
+		(trusted && interp_state == INTERP_TRUSTED) ||
 		(!trusted && interp_state == INTERP_UNTRUSTED))
 	{
 		if (trusted_context != trusted)
 		{
-			if (trusted) {
+			if (trusted)
+			{
 				PERL_SET_CONTEXT(plperl_trusted_interp);
 				PL_ppaddr[OP_REQUIRE] = pp_require_safe;
 			}
-			else {
+			else
+			{
 				PERL_SET_CONTEXT(plperl_untrusted_interp);
 				PL_ppaddr[OP_REQUIRE] = pp_require_orig;
 			}
 			trusted_context = trusted;
 		}
-		return 1; /* context restored */
+		return 1;				/* context restored */
 	}
 
-	return 0;     /* unable - appropriate interpreter not available */
+	return 0;					/* unable - appropriate interpreter not
+								 * available */
 }
 
 static PerlInterpreter *
 plperl_init_interp(void)
 {
 	PerlInterpreter *plperl;
-	static int perl_sys_init_done;
+	static int	perl_sys_init_done;
 
-	static char *embedding[3+2] = {
+	static char *embedding[3 + 2] = {
 		"", "-e", PLC_PERLBOOT
 	};
 	int			nargs = 3;
@@ -525,7 +533,7 @@ plperl_init_interp(void)
 		PERL_SYS_INIT3(&nargs, (char ***) &embedding, (char ***) &dummy_env);
 		perl_sys_init_done = 1;
 		/* quiet warning if PERL_SYS_INIT3 doesn't use the third argument */
-		dummy_env[0] = NULL; 
+		dummy_env[0] = NULL;
 	}
 #endif
 
@@ -540,8 +548,8 @@ plperl_init_interp(void)
 	PL_exit_flags |= PERL_EXIT_DESTRUCT_END;
 
 	/*
-	 * Record the original function for the 'require' opcode.
-	 * Ensure it's used for new interpreters.
+	 * Record the original function for the 'require' opcode. Ensure it's used
+	 * for new interpreters.
 	 */
 	if (!pp_require_orig)
 		pp_require_orig = PL_ppaddr[OP_REQUIRE];
@@ -549,7 +557,7 @@ plperl_init_interp(void)
 		PL_ppaddr[OP_REQUIRE] = pp_require_orig;
 
 	if (perl_parse(plperl, plperl_init_shared_libs,
-			   nargs, embedding, NULL) != 0)
+				   nargs, embedding, NULL) != 0)
 		ereport(ERROR,
 				(errmsg("%s", strip_trailing_ws(SvPV_nolen(ERRSV))),
 				 errcontext("While parsing perl initialization.")));
@@ -611,18 +619,20 @@ plperl_init_interp(void)
  * If not, it'll die.
  * So now "use Foo;" will work iff Foo has already been loaded.
  */
-static OP *
+static OP  *
 pp_require_safe(pTHX)
 {
-	dVAR; dSP;
-	SV *sv, **svp;
-	char *name;
-	STRLEN len;
+	dVAR;
+	dSP;
+	SV		   *sv,
+			  **svp;
+	char	   *name;
+	STRLEN		len;
 
-    sv = POPs;
-    name = SvPV(sv, len);
-    if (!(name && len > 0 && *name))
-        RETPUSHNO;
+	sv = POPs;
+	name = SvPV(sv, len);
+	if (!(name && len > 0 && *name))
+		RETPUSHNO;
 
 	svp = hv_fetch(GvHVn(PL_incgv), name, len, 0);
 	if (svp && *svp != &PL_sv_undef)
@@ -638,22 +648,23 @@ plperl_destroy_interp(PerlInterpreter **interp)
 	if (interp && *interp)
 	{
 		/*
-		 * Only a very minimal destruction is performed:
-		 * - just call END blocks.
+		 * Only a very minimal destruction is performed: - just call END
+		 * blocks.
 		 *
-		 * We could call perl_destruct() but we'd need to audit its
-		 * actions very carefully and work-around any that impact us.
-		 * (Calling sv_clean_objs() isn't an option because it's not
-		 * part of perl's public API so isn't portably available.)
-		 * Meanwhile END blocks can be used to perform manual cleanup.
+		 * We could call perl_destruct() but we'd need to audit its actions
+		 * very carefully and work-around any that impact us. (Calling
+		 * sv_clean_objs() isn't an option because it's not part of perl's
+		 * public API so isn't portably available.) Meanwhile END blocks can
+		 * be used to perform manual cleanup.
 		 */
 
 		PERL_SET_CONTEXT(*interp);
 
 		/* Run END blocks - based on perl's perl_destruct() */
-		if (PL_exit_flags & PERL_EXIT_DESTRUCT_END) {
+		if (PL_exit_flags & PERL_EXIT_DESTRUCT_END)
+		{
 			dJMPENV;
-			int x = 0;
+			int			x = 0;
 
 			JMPENV_PUSH(x);
 			PERL_UNUSED_VAR(x);
@@ -675,15 +686,16 @@ plperl_trusted_init(void)
 	SV		   *safe_version_sv;
 	IV			safe_version_x100;
 
-	safe_version_sv = eval_pv(SAFE_MODULE, FALSE);/* TRUE = croak if failure */
-	safe_version_x100 = (int)(SvNV(safe_version_sv) * 100);
+	safe_version_sv = eval_pv(SAFE_MODULE, FALSE);		/* TRUE = croak if
+														 * failure */
+	safe_version_x100 = (int) (SvNV(safe_version_sv) * 100);
 
 	/*
-	 * Reject too-old versions of Safe and some others:
-	 * 2.20: http://rt.perl.org/rt3/Ticket/Display.html?id=72068
-	 * 2.21: http://rt.perl.org/rt3/Ticket/Display.html?id=72700
+	 * Reject too-old versions of Safe and some others: 2.20:
+	 * http://rt.perl.org/rt3/Ticket/Display.html?id=72068 2.21:
+	 * http://rt.perl.org/rt3/Ticket/Display.html?id=72700
 	 */
-	if (safe_version_x100 < 209 || safe_version_x100 == 220 || 
+	if (safe_version_x100 < 209 || safe_version_x100 == 220 ||
 		safe_version_x100 == 221)
 	{
 		/* not safe, so disallow all trusted funcs */
@@ -732,7 +744,7 @@ plperl_trusted_init(void)
 			if (SvTRUE(ERRSV))
 				ereport(ERROR,
 						(errmsg("%s", strip_trailing_ws(SvPV_nolen(ERRSV))),
-						 errcontext("While executing plperl.on_plperl_init.")));
+					  errcontext("While executing plperl.on_plperl_init.")));
 		}
 
 	}
@@ -812,6 +824,7 @@ plperl_convert_to_pg_array(SV *src)
 {
 	SV		   *rv;
 	int			count;
+
 	dSP;
 
 	PUSHMARK(SP);
@@ -848,7 +861,7 @@ plperl_trigger_build_args(FunctionCallInfo fcinfo)
 	HV		   *hv;
 
 	hv = newHV();
-	hv_ksplit(hv, 12); /* pre-grow the hash */
+	hv_ksplit(hv, 12);			/* pre-grow the hash */
 
 	tdata = (TriggerData *) fcinfo->context;
 	tupdesc = tdata->tg_relation->rd_att;
@@ -1077,7 +1090,7 @@ plperl_inline_handler(PG_FUNCTION_ARGS)
 {
 	InlineCodeBlock *codeblock = (InlineCodeBlock *) PG_GETARG_POINTER(0);
 	FunctionCallInfoData fake_fcinfo;
-	FmgrInfo flinfo;
+	FmgrInfo	flinfo;
 	plperl_proc_desc desc;
 	plperl_call_data *save_call_data = current_call_data;
 	bool		oldcontext = trusted_context;
@@ -1236,24 +1249,24 @@ static void
 plperl_create_sub(plperl_proc_desc *prodesc, char *s, Oid fn_oid)
 {
 	dSP;
-	bool        trusted = prodesc->lanpltrusted;
-	char        subname[NAMEDATALEN+40];
-	HV         *pragma_hv = newHV();
-	SV         *subref = NULL;
-	int         count;
-	char       *compile_sub;
+	bool		trusted = prodesc->lanpltrusted;
+	char		subname[NAMEDATALEN + 40];
+	HV		   *pragma_hv = newHV();
+	SV		   *subref = NULL;
+	int			count;
+	char	   *compile_sub;
 
 	sprintf(subname, "%s__%u", prodesc->proname, fn_oid);
 
 	if (plperl_use_strict)
-		hv_store_string(pragma_hv, "strict", (SV*)newAV());
+		hv_store_string(pragma_hv, "strict", (SV *) newAV());
 
 	ENTER;
 	SAVETMPS;
 	PUSHMARK(SP);
-	EXTEND(SP,4);
+	EXTEND(SP, 4);
 	PUSHs(sv_2mortal(newSVstring(subname)));
-	PUSHs(sv_2mortal(newRV_noinc((SV*)pragma_hv)));
+	PUSHs(sv_2mortal(newRV_noinc((SV *) pragma_hv)));
 	PUSHs(sv_2mortal(newSVstring("our $_TD; local $_TD=shift;")));
 	PUSHs(sv_2mortal(newSVstring(s)));
 	PUTBACK;
@@ -1269,10 +1282,14 @@ plperl_create_sub(plperl_proc_desc *prodesc, char *s, Oid fn_oid)
 	count = perl_call_pv(compile_sub, G_SCALAR | G_EVAL | G_KEEPERR);
 	SPAGAIN;
 
-	if (count == 1) {
-		GV *sub_glob = (GV*)POPs;
-		if (sub_glob && SvTYPE(sub_glob) == SVt_PVGV) {
-			SV *sv = (SV*)GvCVu((GV*)sub_glob);
+	if (count == 1)
+	{
+		GV		   *sub_glob = (GV *) POPs;
+
+		if (sub_glob && SvTYPE(sub_glob) == SVt_PVGV)
+		{
+			SV		   *sv = (SV *) GvCVu((GV *) sub_glob);
+
 			if (sv)
 				subref = newRV_inc(sv);
 		}
@@ -1316,7 +1333,7 @@ plperl_init_shared_libs(pTHX)
 
 	newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file);
 	newXS("PostgreSQL::InServer::Util::bootstrap",
-		boot_PostgreSQL__InServer__Util, file);
+		  boot_PostgreSQL__InServer__Util, file);
 	/* newXS for...::SPI::bootstrap is in select_perl_context() */
 }
 
@@ -1794,7 +1811,8 @@ compile_plperl_function(Oid fn_oid, bool is_trigger)
 		{
 			hash_search(plperl_proc_hash, internal_proname,
 						HASH_REMOVE, NULL);
-			if (prodesc->reference) {
+			if (prodesc->reference)
+			{
 				select_perl_context(prodesc->lanpltrusted);
 				SvREFCNT_dec(prodesc->reference);
 				restore_context(oldcontext);
@@ -1864,7 +1882,7 @@ compile_plperl_function(Oid fn_oid, bool is_trigger)
 		{
 			typeTup =
 				SearchSysCache1(TYPEOID,
-							    ObjectIdGetDatum(procStruct->prorettype));
+								ObjectIdGetDatum(procStruct->prorettype));
 			if (!HeapTupleIsValid(typeTup))
 			{
 				free(prodesc->proname);
@@ -1924,7 +1942,7 @@ compile_plperl_function(Oid fn_oid, bool is_trigger)
 			for (i = 0; i < prodesc->nargs; i++)
 			{
 				typeTup = SearchSysCache1(TYPEOID,
-						 ObjectIdGetDatum(procStruct->proargtypes.values[i]));
+						ObjectIdGetDatum(procStruct->proargtypes.values[i]));
 				if (!HeapTupleIsValid(typeTup))
 				{
 					free(prodesc->proname);
@@ -2011,7 +2029,7 @@ plperl_hash_from_tuple(HeapTuple tuple, TupleDesc tupdesc)
 	int			i;
 
 	hv = newHV();
-	hv_ksplit(hv, tupdesc->natts); /* pre-grow the hash */
+	hv_ksplit(hv, tupdesc->natts);		/* pre-grow the hash */
 
 	for (i = 0; i < tupdesc->natts; i++)
 	{
@@ -2054,7 +2072,8 @@ static void
 check_spi_usage_allowed()
 {
 	/* see comment in plperl_fini() */
-	if (plperl_ending) {
+	if (plperl_ending)
+	{
 		/* simple croak as we don't want to involve PostgreSQL code */
 		croak("SPI functions can not be used in END blocks");
 	}
@@ -2987,7 +3006,8 @@ hv_fetch_string(HV *hv, const char *key)
 static void
 plperl_exec_callback(void *arg)
 {
-	char *procname = (char *) arg;
+	char	   *procname = (char *) arg;
+
 	if (procname)
 		errcontext("PL/Perl function \"%s\"", procname);
 }
@@ -2998,7 +3018,8 @@ plperl_exec_callback(void *arg)
 static void
 plperl_compile_callback(void *arg)
 {
-	char *procname = (char *) arg;
+	char	   *procname = (char *) arg;
+
 	if (procname)
 		errcontext("compilation of PL/Perl function \"%s\"", procname);
 }
diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c
index f0cf7c4ab77c218b3db4d569092c3da1133be074..25d2760cb72d9af46d5f99d32aec965e71f2a5d7 100644
--- a/src/pl/plpgsql/src/pl_comp.c
+++ b/src/pl/plpgsql/src/pl_comp.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.149 2010/02/14 18:42:18 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.150 2010/02/26 02:01:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -332,8 +332,8 @@ do_compile(FunctionCallInfo fcinfo,
 	plpgsql_curr_compile = function;
 
 	/*
-	 * All the permanent output of compilation (e.g. parse tree) is kept in
-	 * a per-function memory context, so it can be reclaimed easily.
+	 * All the permanent output of compilation (e.g. parse tree) is kept in a
+	 * per-function memory context, so it can be reclaimed easily.
 	 */
 	func_cxt = AllocSetContextCreate(TopMemoryContext,
 									 "PL/PgSQL function context",
@@ -364,7 +364,7 @@ do_compile(FunctionCallInfo fcinfo,
 	plpgsql_nDatums = 0;
 	/* This is short-lived, so needn't allocate in function's cxt */
 	plpgsql_Datums = MemoryContextAlloc(compile_tmp_cxt,
-										sizeof(PLpgSQL_datum *) * datums_alloc);
+									 sizeof(PLpgSQL_datum *) * datums_alloc);
 	datums_last = 0;
 
 	switch (is_trigger)
@@ -640,7 +640,7 @@ do_compile(FunctionCallInfo fcinfo,
 
 			/* Add the variable tg_argv */
 			var = plpgsql_build_variable("tg_argv", 0,
-										 plpgsql_build_datatype(TEXTARRAYOID, -1),
+									plpgsql_build_datatype(TEXTARRAYOID, -1),
 										 true);
 			function->tg_argv_varno = var->dno;
 
@@ -800,8 +800,8 @@ plpgsql_compile_inline(char *proc_source)
 	fmgr_info(typinput, &(function->fn_retinput));
 
 	/*
-	 * Remember if function is STABLE/IMMUTABLE.  XXX would it be better
-	 * to set this TRUE inside a read-only transaction?  Not clear.
+	 * Remember if function is STABLE/IMMUTABLE.  XXX would it be better to
+	 * set this TRUE inside a read-only transaction?  Not clear.
 	 */
 	function->fn_readonly = false;
 
@@ -970,8 +970,8 @@ plpgsql_post_column_ref(ParseState *pstate, ColumnRef *cref, Node *var)
 	if (myvar != NULL && var != NULL)
 	{
 		/*
-		 * We could leave it to the core parser to throw this error, but
-		 * we can add a more useful detail message than the core could.
+		 * We could leave it to the core parser to throw this error, but we
+		 * can add a more useful detail message than the core could.
 		 */
 		ereport(ERROR,
 				(errcode(ERRCODE_AMBIGUOUS_COLUMN),
@@ -1147,8 +1147,8 @@ resolve_column_ref(PLpgSQL_expr *expr, ColumnRef *cref)
 				/*
 				 * We should not get here, because a RECFIELD datum should
 				 * have been built at parse time for every possible qualified
-				 * reference to fields of this record.  But if we do, fall
-				 * out and return NULL.
+				 * reference to fields of this record.	But if we do, fall out
+				 * and return NULL.
 				 */
 			}
 			break;
@@ -1239,8 +1239,8 @@ plpgsql_parse_word(char *word1, const char *yytxt,
 
 	/*
 	 * We should do nothing in DECLARE sections.  In SQL expressions, there's
-	 * no need to do anything either --- lookup will happen when the expression
-	 * is compiled.
+	 * no need to do anything either --- lookup will happen when the
+	 * expression is compiled.
 	 */
 	if (plpgsql_IdentifierLookup == IDENTIFIER_LOOKUP_NORMAL)
 	{
@@ -1299,9 +1299,9 @@ plpgsql_parse_dblword(char *word1, char *word2,
 						makeString(word2));
 
 	/*
-	 * We should do nothing in DECLARE sections.  In SQL expressions,
-	 * we really only need to make sure that RECFIELD datums are created
-	 * when needed.
+	 * We should do nothing in DECLARE sections.  In SQL expressions, we
+	 * really only need to make sure that RECFIELD datums are created when
+	 * needed.
 	 */
 	if (plpgsql_IdentifierLookup != IDENTIFIER_LOOKUP_DECLARE)
 	{
@@ -1319,7 +1319,7 @@ plpgsql_parse_dblword(char *word1, char *word2,
 					/* Block-qualified reference to scalar variable. */
 					wdatum->datum = plpgsql_Datums[ns->itemno];
 					wdatum->ident = NULL;
-					wdatum->quoted = false; /* not used */
+					wdatum->quoted = false;		/* not used */
 					wdatum->idents = idents;
 					return true;
 
@@ -1349,7 +1349,7 @@ plpgsql_parse_dblword(char *word1, char *word2,
 						wdatum->datum = plpgsql_Datums[ns->itemno];
 					}
 					wdatum->ident = NULL;
-					wdatum->quoted = false; /* not used */
+					wdatum->quoted = false;		/* not used */
 					wdatum->idents = idents;
 					return true;
 
@@ -1357,8 +1357,8 @@ plpgsql_parse_dblword(char *word1, char *word2,
 					if (nnames == 1)
 					{
 						/*
-						 * First word is a row name, so second word could be
-						 * a field in this row.  Again, no error now if it
+						 * First word is a row name, so second word could be a
+						 * field in this row.  Again, no error now if it
 						 * isn't.
 						 */
 						PLpgSQL_row *row;
@@ -1420,9 +1420,9 @@ plpgsql_parse_tripword(char *word1, char *word2, char *word3,
 						makeString(word3));
 
 	/*
-	 * We should do nothing in DECLARE sections.  In SQL expressions,
-	 * we really only need to make sure that RECFIELD datums are created
-	 * when needed.
+	 * We should do nothing in DECLARE sections.  In SQL expressions, we
+	 * really only need to make sure that RECFIELD datums are created when
+	 * needed.
 	 */
 	if (plpgsql_IdentifierLookup != IDENTIFIER_LOOKUP_DECLARE)
 	{
@@ -1438,52 +1438,52 @@ plpgsql_parse_tripword(char *word1, char *word2, char *word3,
 			switch (ns->itemtype)
 			{
 				case PLPGSQL_NSTYPE_REC:
-				{
-					/*
-					 * words 1/2 are a record name, so third word could be a
-					 * field in this record.
-					 */
-					PLpgSQL_recfield *new;
+					{
+						/*
+						 * words 1/2 are a record name, so third word could be
+						 * a field in this record.
+						 */
+						PLpgSQL_recfield *new;
 
-					new = palloc(sizeof(PLpgSQL_recfield));
-					new->dtype = PLPGSQL_DTYPE_RECFIELD;
-					new->fieldname = pstrdup(word3);
-					new->recparentno = ns->itemno;
+						new = palloc(sizeof(PLpgSQL_recfield));
+						new->dtype = PLPGSQL_DTYPE_RECFIELD;
+						new->fieldname = pstrdup(word3);
+						new->recparentno = ns->itemno;
 
-					plpgsql_adddatum((PLpgSQL_datum *) new);
+						plpgsql_adddatum((PLpgSQL_datum *) new);
 
-					wdatum->datum = (PLpgSQL_datum *) new;
-					wdatum->ident = NULL;
-					wdatum->quoted = false; /* not used */
-					wdatum->idents = idents;
-					return true;
-				}
+						wdatum->datum = (PLpgSQL_datum *) new;
+						wdatum->ident = NULL;
+						wdatum->quoted = false; /* not used */
+						wdatum->idents = idents;
+						return true;
+					}
 
 				case PLPGSQL_NSTYPE_ROW:
-				{
-					/*
-					 * words 1/2 are a row name, so third word could be a
-					 * field in this row.
-					 */
-					PLpgSQL_row *row;
-					int			i;
-
-					row = (PLpgSQL_row *) (plpgsql_Datums[ns->itemno]);
-					for (i = 0; i < row->nfields; i++)
 					{
-						if (row->fieldnames[i] &&
-							strcmp(row->fieldnames[i], word3) == 0)
+						/*
+						 * words 1/2 are a row name, so third word could be a
+						 * field in this row.
+						 */
+						PLpgSQL_row *row;
+						int			i;
+
+						row = (PLpgSQL_row *) (plpgsql_Datums[ns->itemno]);
+						for (i = 0; i < row->nfields; i++)
 						{
-							wdatum->datum = plpgsql_Datums[row->varnos[i]];
-							wdatum->ident = NULL;
-							wdatum->quoted = false; /* not used */
-							wdatum->idents = idents;
-							return true;
+							if (row->fieldnames[i] &&
+								strcmp(row->fieldnames[i], word3) == 0)
+							{
+								wdatum->datum = plpgsql_Datums[row->varnos[i]];
+								wdatum->ident = NULL;
+								wdatum->quoted = false; /* not used */
+								wdatum->idents = idents;
+								return true;
+							}
 						}
+						/* fall through to return CWORD */
+						break;
 					}
-					/* fall through to return CWORD */
-					break;
-				}
 
 				default:
 					break;
@@ -1533,8 +1533,8 @@ plpgsql_parse_wordtype(char *ident)
 	}
 
 	/*
-	 * Word wasn't found in the namespace stack. Try to find a data type
-	 * with that name, but ignore shell types and complex types.
+	 * Word wasn't found in the namespace stack. Try to find a data type with
+	 * that name, but ignore shell types and complex types.
 	 */
 	typeTup = LookupTypeName(NULL, makeTypeName(ident), NULL);
 	if (typeTup)
@@ -1586,9 +1586,9 @@ plpgsql_parse_cwordtype(List *idents)
 	if (list_length(idents) == 2)
 	{
 		/*
-		 * Do a lookup in the current namespace stack.
-		 * We don't need to check number of names matched, because we will
-		 * only consider scalar variables.
+		 * Do a lookup in the current namespace stack. We don't need to check
+		 * number of names matched, because we will only consider scalar
+		 * variables.
 		 */
 		nse = plpgsql_ns_lookup(plpgsql_ns_top(), false,
 								strVal(linitial(idents)),
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index a271f57151c9d769feefd7147fe4302a3e69e1f7..bcbf6126da5ae44706349e413e0c7e8fc7b4aa0a 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.255 2010/02/12 19:37:36 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.256 2010/02/26 02:01:34 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -171,7 +171,7 @@ static int exec_run_select(PLpgSQL_execstate *estate,
 static int exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt,
 			   Portal portal, bool prefetch_ok);
 static ParamListInfo setup_param_list(PLpgSQL_execstate *estate,
-									  PLpgSQL_expr *expr);
+				 PLpgSQL_expr *expr);
 static void plpgsql_param_fetch(ParamListInfo params, int paramid);
 static void exec_move_row(PLpgSQL_execstate *estate,
 			  PLpgSQL_rec *rec,
@@ -515,10 +515,10 @@ plpgsql_exec_trigger(PLpgSQL_function *func,
 	/*
 	 * Put the OLD and NEW tuples into record variables
 	 *
-	 * We make the tupdescs available in both records even though only one
-	 * may have a value.  This allows parsing of record references to succeed
-	 * in functions that are used for multiple trigger types.  For example,
-	 * we might have a test like "if (TG_OP = 'INSERT' and NEW.foo = 'xyz')",
+	 * We make the tupdescs available in both records even though only one may
+	 * have a value.  This allows parsing of record references to succeed in
+	 * functions that are used for multiple trigger types.	For example, we
+	 * might have a test like "if (TG_OP = 'INSERT' and NEW.foo = 'xyz')",
 	 * which should parse regardless of the current trigger type.
 	 */
 	rec_new = (PLpgSQL_rec *) (estate.datums[func->new_varno]);
@@ -1968,8 +1968,8 @@ exec_stmt_forc(PLpgSQL_execstate *estate, PLpgSQL_stmt_forc *stmt)
 		exec_prepare_plan(estate, query, curvar->cursor_options);
 
 	/*
-	 * Set up ParamListInfo (note this is only carrying a hook function,
-	 * not any actual data values, at this point)
+	 * Set up ParamListInfo (note this is only carrying a hook function, not
+	 * any actual data values, at this point)
 	 */
 	paramLI = setup_param_list(estate, query);
 
@@ -2343,7 +2343,7 @@ exec_stmt_return_query(PLpgSQL_execstate *estate,
 
 	tupmap = convert_tuples_by_position(portal->tupDesc,
 										estate->rettupdesc,
-										gettext_noop("structure of query does not match function result type"));
+	 gettext_noop("structure of query does not match function result type"));
 
 	while (true)
 	{
@@ -2398,11 +2398,11 @@ exec_init_tuple_store(PLpgSQL_execstate *estate)
 				 errmsg("set-valued function called in context that cannot accept a set")));
 
 	/*
-	 * Switch to the right memory context and resource owner for storing
-	 * the tuplestore for return set. If we're within a subtransaction opened
-	 * for an exception-block, for example, we must still create the
-	 * tuplestore in the resource owner that was active when this function was
-	 * entered, and not in the subtransaction resource owner.
+	 * Switch to the right memory context and resource owner for storing the
+	 * tuplestore for return set. If we're within a subtransaction opened for
+	 * an exception-block, for example, we must still create the tuplestore in
+	 * the resource owner that was active when this function was entered, and
+	 * not in the subtransaction resource owner.
 	 */
 	oldcxt = MemoryContextSwitchTo(estate->tuple_store_cxt);
 	oldowner = CurrentResourceOwner;
@@ -2445,7 +2445,7 @@ exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt)
 
 	if (stmt->message)
 	{
-		StringInfoData	ds;
+		StringInfoData ds;
 		ListCell   *current_param;
 		char	   *cp;
 
@@ -2718,8 +2718,8 @@ exec_prepare_plan(PLpgSQL_execstate *estate,
 	SPIPlanPtr	plan;
 
 	/*
-	 * The grammar can't conveniently set expr->func while building the
-	 * parse tree, so make sure it's set before parser hooks need it.
+	 * The grammar can't conveniently set expr->func while building the parse
+	 * tree, so make sure it's set before parser hooks need it.
 	 */
 	expr->func = estate->func;
 
@@ -2800,8 +2800,8 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
 	}
 
 	/*
-	 * Set up ParamListInfo (note this is only carrying a hook function,
-	 * not any actual data values, at this point)
+	 * Set up ParamListInfo (note this is only carrying a hook function, not
+	 * any actual data values, at this point)
 	 */
 	paramLI = setup_param_list(estate, expr);
 
@@ -3266,8 +3266,8 @@ exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt)
 	}
 
 	/*
-	 * Set up ParamListInfo (note this is only carrying a hook function,
-	 * not any actual data values, at this point)
+	 * Set up ParamListInfo (note this is only carrying a hook function, not
+	 * any actual data values, at this point)
 	 */
 	paramLI = setup_param_list(estate, query);
 
@@ -4035,7 +4035,7 @@ exec_get_datum_type(PLpgSQL_execstate *estate,
 
 		default:
 			elog(ERROR, "unrecognized dtype: %d", datum->dtype);
-			typeid = InvalidOid;			/* keep compiler quiet */
+			typeid = InvalidOid;	/* keep compiler quiet */
 			break;
 	}
 
@@ -4210,8 +4210,8 @@ exec_run_select(PLpgSQL_execstate *estate,
 		exec_prepare_plan(estate, expr, 0);
 
 	/*
-	 * Set up ParamListInfo (note this is only carrying a hook function,
-	 * not any actual data values, at this point)
+	 * Set up ParamListInfo (note this is only carrying a hook function, not
+	 * any actual data values, at this point)
 	 */
 	paramLI = setup_param_list(estate, expr);
 
@@ -4497,9 +4497,9 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate,
 	}
 
 	/*
-	 * Create the param list in econtext's temporary memory context.
-	 * We won't need to free it explicitly, since it will go away at the
-	 * next reset of that context.
+	 * Create the param list in econtext's temporary memory context. We won't
+	 * need to free it explicitly, since it will go away at the next reset of
+	 * that context.
 	 *
 	 * XXX think about avoiding repeated palloc's for param lists?  It should
 	 * be possible --- this routine isn't re-entrant anymore.
@@ -4547,7 +4547,7 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate,
  *
  * The ParamListInfo array is initially all zeroes, in particular the
  * ptype values are all InvalidOid.  This causes the executor to call the
- * paramFetch hook each time it wants a value.  We thus evaluate only the
+ * paramFetch hook each time it wants a value.	We thus evaluate only the
  * parameters actually demanded.
  *
  * The result is a locally palloc'd array that should be pfree'd after use;
@@ -4559,16 +4559,16 @@ setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr)
 	ParamListInfo paramLI;
 
 	/*
-	 * Could we re-use these arrays instead of palloc'ing a new one each
-	 * time?  However, we'd have to zero the array each time anyway,
-	 * since new values might have been assigned to the variables.
+	 * Could we re-use these arrays instead of palloc'ing a new one each time?
+	 * However, we'd have to zero the array each time anyway, since new values
+	 * might have been assigned to the variables.
 	 */
 	if (estate->ndatums > 0)
 	{
 		/* sizeof(ParamListInfoData) includes the first array element */
 		paramLI = (ParamListInfo)
 			palloc0(sizeof(ParamListInfoData) +
-					(estate->ndatums - 1) * sizeof(ParamExternData));
+					(estate->ndatums - 1) *sizeof(ParamExternData));
 		paramLI->paramFetch = plpgsql_param_fetch;
 		paramLI->paramFetchArg = (void *) estate;
 		paramLI->parserSetup = (ParserSetupHook) plpgsql_parser_setup;
@@ -4577,15 +4577,15 @@ setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr)
 
 		/*
 		 * Set up link to active expr where the hook functions can find it.
-		 * Callers must save and restore cur_expr if there is any chance
-		 * that they are interrupting an active use of parameters.
+		 * Callers must save and restore cur_expr if there is any chance that
+		 * they are interrupting an active use of parameters.
 		 */
 		estate->cur_expr = expr;
 
 		/*
-		 * Also make sure this is set before parser hooks need it.  There
-		 * is no need to save and restore, since the value is always correct
-		 * once set.
+		 * Also make sure this is set before parser hooks need it.	There is
+		 * no need to save and restore, since the value is always correct once
+		 * set.
 		 */
 		expr->func = estate->func;
 	}
@@ -4616,9 +4616,9 @@ plpgsql_param_fetch(ParamListInfo params, int paramid)
 	Assert(params->numParams == estate->ndatums);
 
 	/*
-	 * Do nothing if asked for a value that's not supposed to be used by
-	 * this SQL expression.  This avoids unwanted evaluations when functions
-	 * such as copyParamList try to materialize all the values.
+	 * Do nothing if asked for a value that's not supposed to be used by this
+	 * SQL expression.	This avoids unwanted evaluations when functions such
+	 * as copyParamList try to materialize all the values.
 	 */
 	if (!bms_is_member(dno, expr->paramnos))
 		return;
@@ -4760,6 +4760,7 @@ exec_move_row(PLpgSQL_execstate *estate,
 			{
 				value = (Datum) 0;
 				isnull = true;
+
 				/*
 				 * InvalidOid is OK because exec_assign_value doesn't care
 				 * about the type of a source NULL
@@ -5545,7 +5546,7 @@ exec_dynquery_with_params(PLpgSQL_execstate *estate,
 										   querystr,
 										   ppd->nargs, ppd->types,
 										   ppd->values, ppd->nulls,
-										   estate->readonly_func, 
+										   estate->readonly_func,
 										   cursorOptions);
 		free_params_data(ppd);
 	}
@@ -5555,7 +5556,7 @@ exec_dynquery_with_params(PLpgSQL_execstate *estate,
 										   querystr,
 										   0, NULL,
 										   NULL, NULL,
-										   estate->readonly_func, 
+										   estate->readonly_func,
 										   cursorOptions);
 	}
 
diff --git a/src/pl/plpgsql/src/pl_funcs.c b/src/pl/plpgsql/src/pl_funcs.c
index 57334523b4c9fed0239c1cac27d98fafada5aaee..155a123223a0de9c5a859dffcf83cf2093a3f150 100644
--- a/src/pl/plpgsql/src/pl_funcs.c
+++ b/src/pl/plpgsql/src/pl_funcs.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_funcs.c,v 1.89 2010/02/17 01:48:45 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_funcs.c,v 1.90 2010/02/26 02:01:35 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -23,7 +23,7 @@
  * list or "chain" (from the youngest item to the root) is accessible from
  * any one plpgsql statement.  During initial parsing of a function, ns_top
  * points to the youngest item accessible from the block currently being
- * parsed.  We store the entire tree, however, since at runtime we will need
+ * parsed.	We store the entire tree, however, since at runtime we will need
  * to access the chain that's relevant to any one statement.
  *
  * Block boundaries in the namespace chain are marked by PLPGSQL_NSTYPE_LABEL
diff --git a/src/pl/plpgsql/src/pl_handler.c b/src/pl/plpgsql/src/pl_handler.c
index d8443fabf44e78fe4f2e370daecfe0c765b466ab..12661d32cf4ae39970e2ca31d6567af9333d5c8d 100644
--- a/src/pl/plpgsql/src/pl_handler.c
+++ b/src/pl/plpgsql/src/pl_handler.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_handler.c,v 1.50 2010/02/14 18:42:18 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_handler.c,v 1.51 2010/02/26 02:01:35 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -34,7 +34,7 @@ static const struct config_enum_entry variable_conflict_options[] = {
 	{NULL, 0, false}
 };
 
-int		plpgsql_variable_conflict = PLPGSQL_RESOLVE_ERROR;
+int			plpgsql_variable_conflict = PLPGSQL_RESOLVE_ERROR;
 
 /* Hook for plugins */
 PLpgSQL_plugin **plugin_ptr = NULL;
diff --git a/src/pl/plpgsql/src/pl_scanner.c b/src/pl/plpgsql/src/pl_scanner.c
index 8e97ffde9154bc9a2b3d69909b949efc01b01579..9b8e15c1ac1576c4e61f02a2f586de0c84d60bf0 100644
--- a/src/pl/plpgsql/src/pl_scanner.c
+++ b/src/pl/plpgsql/src/pl_scanner.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_scanner.c,v 1.4 2010/01/10 17:15:18 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_scanner.c,v 1.5 2010/02/26 02:01:35 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -24,7 +24,7 @@
 
 
 /* Klugy flag to tell scanner how to look up identifiers */
-IdentifierLookup	plpgsql_IdentifierLookup = IDENTIFIER_LOOKUP_NORMAL;
+IdentifierLookup plpgsql_IdentifierLookup = IDENTIFIER_LOOKUP_NORMAL;
 
 /*
  * A word about keywords:
@@ -44,7 +44,7 @@ IdentifierLookup	plpgsql_IdentifierLookup = IDENTIFIER_LOOKUP_NORMAL;
  *
  * For the most part, the reserved keywords are those that start a PL/pgSQL
  * statement (and so would conflict with an assignment to a variable of the
- * same name).  We also don't sweat it much about reserving keywords that
+ * same name).	We also don't sweat it much about reserving keywords that
  * are reserved in the core grammar.  Try to avoid reserving other words.
  */
 
@@ -154,7 +154,7 @@ typedef struct
 
 /*
  * Scanner working state.  At some point we might wish to fold all this
- * into a YY_EXTRA struct.  For the moment, there is no need for plpgsql's
+ * into a YY_EXTRA struct.	For the moment, there is no need for plpgsql's
  * lexer to be re-entrant, and the notational burden of passing a yyscanner
  * pointer around is great enough to not want to do it without need.
  */
@@ -167,14 +167,14 @@ static core_yy_extra_type core_yy;
 static const char *scanorig;
 
 /* Current token's length (corresponds to plpgsql_yylval and plpgsql_yylloc) */
-static int		plpgsql_yyleng;
+static int	plpgsql_yyleng;
 
 /* Token pushback stack */
 #define MAX_PUSHBACKS 4
 
-static int			num_pushbacks;
-static int			pushback_token[MAX_PUSHBACKS];
-static TokenAuxData	pushback_auxdata[MAX_PUSHBACKS];
+static int	num_pushbacks;
+static int	pushback_token[MAX_PUSHBACKS];
+static TokenAuxData pushback_auxdata[MAX_PUSHBACKS];
 
 /* State for plpgsql_location_to_lineno() */
 static const char *cur_line_start;
@@ -322,7 +322,7 @@ plpgsql_yylex(void)
 
 /*
  * Internal yylex function.  This wraps the core lexer and adds one feature:
- * a token pushback stack.  We also make a couple of trivial single-token
+ * a token pushback stack.	We also make a couple of trivial single-token
  * translations from what the core lexer does to what we want, in particular
  * interfacing from the core_YYSTYPE to YYSTYPE union.
  */
@@ -391,7 +391,7 @@ push_back_token(int token, TokenAuxData *auxdata)
 void
 plpgsql_push_back_token(int token)
 {
-	TokenAuxData	auxdata;
+	TokenAuxData auxdata;
 
 	auxdata.lval = plpgsql_yylval;
 	auxdata.lloc = plpgsql_yylloc;
@@ -426,7 +426,7 @@ plpgsql_append_source_text(StringInfo buf,
 int
 plpgsql_scanner_errposition(int location)
 {
-	int		pos;
+	int			pos;
 
 	if (location < 0 || scanorig == NULL)
 		return 0;				/* no-op if location is unknown */
@@ -459,7 +459,7 @@ plpgsql_yyerror(const char *message)
 	{
 		ereport(ERROR,
 				(errcode(ERRCODE_SYNTAX_ERROR),
-				 /* translator: %s is typically the translation of "syntax error" */
+		/* translator: %s is typically the translation of "syntax error" */
 				 errmsg("%s at end of input", _(message)),
 				 plpgsql_scanner_errposition(plpgsql_yylloc)));
 	}
@@ -467,15 +467,15 @@ plpgsql_yyerror(const char *message)
 	{
 		/*
 		 * If we have done any lookahead then flex will have restored the
-		 * character after the end-of-token.  Zap it again so that we
-		 * report only the single token here.  This modifies scanbuf but
-		 * we no longer care about that.
+		 * character after the end-of-token.  Zap it again so that we report
+		 * only the single token here.	This modifies scanbuf but we no longer
+		 * care about that.
 		 */
 		yytext[plpgsql_yyleng] = '\0';
 
 		ereport(ERROR,
 				(errcode(ERRCODE_SYNTAX_ERROR),
-				 /* translator: first %s is typically the translation of "syntax error" */
+		/* translator: first %s is typically the translation of "syntax error" */
 				 errmsg("%s at or near \"%s\"", _(message), yytext),
 				 plpgsql_scanner_errposition(plpgsql_yylloc)));
 	}
@@ -527,10 +527,10 @@ location_lineno_init(void)
 	 * we will think "line 1" is what the programmer thinks of as line 1.
 	 *----------
 	 */
-    if (*cur_line_start == '\r')
-        cur_line_start++;
-    if (*cur_line_start == '\n')
-        cur_line_start++;
+	if (*cur_line_start == '\r')
+		cur_line_start++;
+	if (*cur_line_start == '\n')
+		cur_line_start++;
 
 	cur_line_end = strchr(cur_line_start, '\n');
 }
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
index 2aba85374687d91133b003c3570d9bce0a8b9776..16e073c210971a0fdd40ab15b4d19d3179c4f39e 100644
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.129 2010/01/19 01:35:31 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.130 2010/02/26 02:01:35 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -798,9 +798,9 @@ typedef struct
 
 typedef enum
 {
-	IDENTIFIER_LOOKUP_NORMAL,		/* normal processing of var names */
-	IDENTIFIER_LOOKUP_DECLARE,		/* In DECLARE --- don't look up names */
-	IDENTIFIER_LOOKUP_EXPR			/* In SQL expression --- special case */
+	IDENTIFIER_LOOKUP_NORMAL,	/* normal processing of var names */
+	IDENTIFIER_LOOKUP_DECLARE,	/* In DECLARE --- don't look up names */
+	IDENTIFIER_LOOKUP_EXPR		/* In SQL expression --- special case */
 } IdentifierLookup;
 
 extern IdentifierLookup plpgsql_IdentifierLookup;
@@ -834,13 +834,13 @@ extern PLpgSQL_function *plpgsql_compile(FunctionCallInfo fcinfo,
 				bool forValidator);
 extern PLpgSQL_function *plpgsql_compile_inline(char *proc_source);
 extern void plpgsql_parser_setup(struct ParseState *pstate,
-								 PLpgSQL_expr *expr);
+					 PLpgSQL_expr *expr);
 extern bool plpgsql_parse_word(char *word1, const char *yytxt,
-							   PLwdatum *wdatum, PLword *word);
+				   PLwdatum *wdatum, PLword *word);
 extern bool plpgsql_parse_dblword(char *word1, char *word2,
-								  PLwdatum *wdatum, PLcword *cword);
+					  PLwdatum *wdatum, PLcword *cword);
 extern bool plpgsql_parse_tripword(char *word1, char *word2, char *word3,
-								   PLwdatum *wdatum, PLcword *cword);
+					   PLwdatum *wdatum, PLcword *cword);
 extern PLpgSQL_type *plpgsql_parse_wordtype(char *ident);
 extern PLpgSQL_type *plpgsql_parse_cwordtype(List *idents);
 extern PLpgSQL_type *plpgsql_parse_wordrowtype(char *ident);
@@ -879,7 +879,7 @@ extern void plpgsql_xact_cb(XactEvent event, void *arg);
 extern void plpgsql_subxact_cb(SubXactEvent event, SubTransactionId mySubid,
 				   SubTransactionId parentSubid, void *arg);
 extern Oid exec_get_datum_type(PLpgSQL_execstate *estate,
-							   PLpgSQL_datum *datum);
+					PLpgSQL_datum *datum);
 extern Oid exec_get_rec_fieldtype(PLpgSQL_rec *rec, const char *fieldname,
 					   int *fieldno);
 
@@ -893,10 +893,10 @@ extern void plpgsql_ns_pop(void);
 extern PLpgSQL_nsitem *plpgsql_ns_top(void);
 extern void plpgsql_ns_additem(int itemtype, int itemno, const char *name);
 extern PLpgSQL_nsitem *plpgsql_ns_lookup(PLpgSQL_nsitem *ns_cur, bool localmode,
-										 const char *name1, const char *name2,
-										 const char *name3, int *names_used);
+				  const char *name1, const char *name2,
+				  const char *name3, int *names_used);
 extern PLpgSQL_nsitem *plpgsql_ns_lookup_label(PLpgSQL_nsitem *ns_cur,
-											   const char *name);
+						const char *name);
 
 /* ----------
  * Other functions in pl_funcs.c
@@ -913,7 +913,7 @@ extern int	plpgsql_base_yylex(void);
 extern int	plpgsql_yylex(void);
 extern void plpgsql_push_back_token(int token);
 extern void plpgsql_append_source_text(StringInfo buf,
-									   int startlocation, int endlocation);
+						   int startlocation, int endlocation);
 extern int	plpgsql_scanner_errposition(int location);
 extern void plpgsql_yyerror(const char *message);
 extern int	plpgsql_location_to_lineno(int location);
diff --git a/src/pl/plpython/plpython.c b/src/pl/plpython/plpython.c
index 4bba81aceb79d72e24df2404d3bbb2d16560a7f4..c9adb533f77a16a79a8c6ea1aeafd53e397d9783 100644
--- a/src/pl/plpython/plpython.c
+++ b/src/pl/plpython/plpython.c
@@ -1,7 +1,7 @@
 /**********************************************************************
  * plpython.c - python as a procedural language for PostgreSQL
  *
- *	$PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.138 2010/02/18 23:50:06 tgl Exp $
+ *	$PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.139 2010/02/26 02:01:36 momjian Exp $
  *
  *********************************************************************
  */
@@ -79,7 +79,7 @@ typedef int Py_ssize_t;
  * definition is for Python <=2.5
  */
 #ifndef PyVarObject_HEAD_INIT
-#define PyVarObject_HEAD_INIT(type, size) 		\
+#define PyVarObject_HEAD_INIT(type, size)		\
 		PyObject_HEAD_INIT(type) size,
 #endif
 
@@ -122,7 +122,7 @@ PG_MODULE_MAGIC;
  */
 
 struct PLyDatumToOb;
-typedef PyObject *(*PLyDatumToObFunc) (struct PLyDatumToOb*, Datum);
+typedef PyObject *(*PLyDatumToObFunc) (struct PLyDatumToOb *, Datum);
 
 typedef struct PLyDatumToOb
 {
@@ -154,9 +154,9 @@ typedef union PLyTypeInput
 
 struct PLyObToDatum;
 struct PLyTypeInfo;
-typedef Datum (*PLyObToDatumFunc) (struct PLyTypeInfo*,
-								   struct PLyObToDatum*,
-								   PyObject *);
+typedef Datum (*PLyObToDatumFunc) (struct PLyTypeInfo *,
+											   struct PLyObToDatum *,
+											   PyObject *);
 
 typedef struct PLyObToDatum
 {
@@ -189,9 +189,10 @@ typedef struct PLyTypeInfo
 {
 	PLyTypeInput in;
 	PLyTypeOutput out;
+
 	/*
-	 * is_rowtype can be: -1 = not known yet (initial state); 0 = scalar datatype;
-	 * 1 = rowtype; 2 = rowtype, but I/O functions not set up yet
+	 * is_rowtype can be: -1 = not known yet (initial state); 0 = scalar
+	 * datatype; 1 = rowtype; 2 = rowtype, but I/O functions not set up yet
 	 */
 	int			is_rowtype;
 } PLyTypeInfo;
@@ -286,9 +287,10 @@ static void *PLy_malloc0(size_t);
 static char *PLy_strdup(const char *);
 static void PLy_free(void *);
 
-static PyObject*PLyUnicode_Str(PyObject *unicode);
-static PyObject*PLyUnicode_Bytes(PyObject *unicode);
+static PyObject *PLyUnicode_Str(PyObject *unicode);
+static PyObject *PLyUnicode_Bytes(PyObject *unicode);
 static char *PLyUnicode_AsString(PyObject *unicode);
+
 #if PY_MAJOR_VERSION >= 3
 static PyObject *PLyUnicode_FromString(const char *s);
 #endif
@@ -340,13 +342,13 @@ static PyObject *PLyList_FromArray(PLyDatumToOb *arg, Datum d);
 static PyObject *PLyDict_FromTuple(PLyTypeInfo *, HeapTuple, TupleDesc);
 
 static Datum PLyObject_ToBool(PLyTypeInfo *, PLyObToDatum *,
-							  PyObject *);
+				 PyObject *);
 static Datum PLyObject_ToBytea(PLyTypeInfo *, PLyObToDatum *,
-							   PyObject *);
+				  PyObject *);
 static Datum PLyObject_ToDatum(PLyTypeInfo *, PLyObToDatum *,
-							   PyObject *);
+				  PyObject *);
 static Datum PLySequence_ToArray(PLyTypeInfo *, PLyObToDatum *,
-								 PyObject *);
+					PyObject *);
 
 static HeapTuple PLyMapping_ToTuple(PLyTypeInfo *, PyObject *);
 static HeapTuple PLySequence_ToTuple(PLyTypeInfo *, PyObject *);
@@ -451,11 +453,11 @@ plpython_call_handler(PG_FUNCTION_ARGS)
 	save_curr_proc = PLy_curr_procedure;
 
 	/*
-     * Setup error traceback support for ereport()
-     */
-    plerrcontext.callback = plpython_error_callback;
-    plerrcontext.previous = error_context_stack;
-    error_context_stack = &plerrcontext;
+	 * Setup error traceback support for ereport()
+	 */
+	plerrcontext.callback = plpython_error_callback;
+	plerrcontext.previous = error_context_stack;
+	error_context_stack = &plerrcontext;
 
 	PG_TRY();
 	{
@@ -491,7 +493,7 @@ plpython_call_handler(PG_FUNCTION_ARGS)
 	PG_END_TRY();
 
 	/* Pop the error context stack */
-    error_context_stack = plerrcontext.previous;
+	error_context_stack = plerrcontext.previous;
 
 	PLy_curr_procedure = save_curr_proc;
 
@@ -707,7 +709,7 @@ PLy_modify_tuple(PLyProcedure *proc, PyObject *pltd, TriggerData *tdata,
 			{
 				ereport(ERROR,
 						(errmsg("TD[\"new\"] dictionary key at ordinal position %d is not a string", i)));
-				plattstr = NULL; /* keep compiler quiet */
+				plattstr = NULL;	/* keep compiler quiet */
 			}
 			attn = SPI_fnumber(tupdesc, plattstr);
 			if (attn == SPI_ERROR_NOATTRIBUTE)
@@ -732,6 +734,7 @@ PLy_modify_tuple(PLyProcedure *proc, PyObject *pltd, TriggerData *tdata,
 			else if (plval != Py_None)
 			{
 				PLyObToDatum *att = &proc->result.out.r.atts[atti];
+
 				modvalues[i] = (att->func) (&proc->result, att, plval);
 				modnulls[i] = ' ';
 			}
@@ -1398,7 +1401,7 @@ PLy_procedure_create(HeapTuple procTup, Oid tgreloid, char *key)
 			Form_pg_type rvTypeStruct;
 
 			rvTypeTup = SearchSysCache1(TYPEOID,
-									ObjectIdGetDatum(procStruct->prorettype));
+								   ObjectIdGetDatum(procStruct->prorettype));
 			if (!HeapTupleIsValid(rvTypeTup))
 				elog(ERROR, "cache lookup failed for type %u",
 					 procStruct->prorettype);
@@ -1761,7 +1764,7 @@ static void
 PLy_output_datum_func2(PLyObToDatum *arg, HeapTuple typeTup)
 {
 	Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTup);
-	Oid element_type;
+	Oid			element_type;
 
 	perm_fmgr_info(typeStruct->typinput, &arg->typfunc);
 	arg->typoid = HeapTupleGetOid(typeTup);
@@ -1771,9 +1774,8 @@ PLy_output_datum_func2(PLyObToDatum *arg, HeapTuple typeTup)
 	element_type = get_element_type(arg->typoid);
 
 	/*
-	 * Select a conversion function to convert Python objects to
-	 * PostgreSQL datums.  Most data types can go through the generic
-	 * function.
+	 * Select a conversion function to convert Python objects to PostgreSQL
+	 * datums.	Most data types can go through the generic function.
 	 */
 	switch (getBaseType(element_type ? element_type : arg->typoid))
 	{
@@ -1790,8 +1792,8 @@ PLy_output_datum_func2(PLyObToDatum *arg, HeapTuple typeTup)
 
 	if (element_type)
 	{
-		char dummy_delim;
-		Oid funcid;
+		char		dummy_delim;
+		Oid			funcid;
 
 		if (type_is_rowtype(element_type))
 			ereport(ERROR,
@@ -1825,7 +1827,7 @@ static void
 PLy_input_datum_func2(PLyDatumToOb *arg, Oid typeOid, HeapTuple typeTup)
 {
 	Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTup);
-	Oid element_type = get_element_type(typeOid);
+	Oid			element_type = get_element_type(typeOid);
 
 	/* Get the type's conversion information */
 	perm_fmgr_info(typeStruct->typoutput, &arg->typfunc);
@@ -1926,12 +1928,12 @@ static PyObject *
 PLyFloat_FromNumeric(PLyDatumToOb *arg, Datum d)
 {
 	/*
-	 * Numeric is cast to a PyFloat:
-	 *   This results in a loss of precision
-	 *   Would it be better to cast to PyString?
+	 * Numeric is cast to a PyFloat: This results in a loss of precision Would
+	 * it be better to cast to PyString?
 	 */
-	Datum  f = DirectFunctionCall1(numeric_float8, d);
-	double x = DatumGetFloat8(f);
+	Datum		f = DirectFunctionCall1(numeric_float8, d);
+	double		x = DatumGetFloat8(f);
+
 	return PyFloat_FromDouble(x);
 }
 
@@ -1960,9 +1962,9 @@ PLyLong_FromInt64(PLyDatumToOb *arg, Datum d)
 static PyObject *
 PLyBytes_FromBytea(PLyDatumToOb *arg, Datum d)
 {
-	text     *txt = DatumGetByteaP(d);
-	char     *str = VARDATA(txt);
-	size_t    size = VARSIZE(txt) - VARHDRSZ;
+	text	   *txt = DatumGetByteaP(d);
+	char	   *str = VARDATA(txt);
+	size_t		size = VARSIZE(txt) - VARHDRSZ;
 
 	return PyBytes_FromStringAndSize(str, size);
 }
@@ -1970,8 +1972,9 @@ PLyBytes_FromBytea(PLyDatumToOb *arg, Datum d)
 static PyObject *
 PLyString_FromDatum(PLyDatumToOb *arg, Datum d)
 {
-	char     *x = OutputFunctionCall(&arg->typfunc, d);
-	PyObject *r = PyString_FromString(x);
+	char	   *x = OutputFunctionCall(&arg->typfunc, d);
+	PyObject   *r = PyString_FromString(x);
+
 	pfree(x);
 	return r;
 }
@@ -1991,8 +1994,8 @@ PLyList_FromArray(PLyDatumToOb *arg, Datum d)
 	if (ARR_NDIM(array) != 1)
 		ereport(ERROR,
 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("cannot convert multidimensional array to Python list"),
-				 errdetail("PL/Python only supports one-dimensional arrays.")));
+			  errmsg("cannot convert multidimensional array to Python list"),
+			  errdetail("PL/Python only supports one-dimensional arrays.")));
 
 	length = ARR_DIMS(array)[0];
 	lbound = ARR_LBOUND(array)[0];
@@ -2000,9 +2003,9 @@ PLyList_FromArray(PLyDatumToOb *arg, Datum d)
 
 	for (i = 0; i < length; i++)
 	{
-		Datum elem;
-		bool isnull;
-		int offset;
+		Datum		elem;
+		bool		isnull;
+		int			offset;
 
 		offset = lbound + i;
 		elem = array_ref(array, 1, &offset, arg->typlen, arg->elm->typlen, arg->elm->typbyval, arg->elm->typalign, &isnull);
@@ -2064,7 +2067,7 @@ PLyDict_FromTuple(PLyTypeInfo *info, HeapTuple tuple, TupleDesc desc)
 }
 
 /*
- * Convert a Python object to a PostgreSQL bool datum.  This can't go
+ * Convert a Python object to a PostgreSQL bool datum.	This can't go
  * through the generic conversion function, because Python attaches a
  * Boolean value to everything, more things than the PostgreSQL bool
  * type can parse.
@@ -2096,7 +2099,7 @@ PLyObject_ToBytea(PLyTypeInfo *info,
 				  PyObject *plrv)
 {
 	PyObject   *volatile plrv_so = NULL;
-	Datum       rv;
+	Datum		rv;
 
 	Assert(plrv != Py_None);
 
@@ -2106,10 +2109,10 @@ PLyObject_ToBytea(PLyTypeInfo *info,
 
 	PG_TRY();
 	{
-		char *plrv_sc = PyBytes_AsString(plrv_so);
-		size_t len = PyBytes_Size(plrv_so);
-		size_t size = len + VARHDRSZ;
-		bytea *result = palloc(size);
+		char	   *plrv_sc = PyBytes_AsString(plrv_so);
+		size_t		len = PyBytes_Size(plrv_so);
+		size_t		size = len + VARHDRSZ;
+		bytea	   *result = palloc(size);
 
 		SET_VARSIZE(result, size);
 		memcpy(VARDATA(result), plrv_sc, len);
@@ -2139,8 +2142,8 @@ PLyObject_ToDatum(PLyTypeInfo *info,
 				  PLyObToDatum *arg,
 				  PyObject *plrv)
 {
-	PyObject *volatile plrv_bo = NULL;
-	Datum     rv;
+	PyObject   *volatile plrv_bo = NULL;
+	Datum		rv;
 
 	Assert(plrv != Py_None);
 
@@ -2149,7 +2152,8 @@ PLyObject_ToDatum(PLyTypeInfo *info,
 	else
 	{
 #if PY_MAJOR_VERSION >= 3
-		PyObject *s = PyObject_Str(plrv);
+		PyObject   *s = PyObject_Str(plrv);
+
 		plrv_bo = PLyUnicode_Bytes(s);
 		Py_XDECREF(s);
 #else
@@ -2161,9 +2165,9 @@ PLyObject_ToDatum(PLyTypeInfo *info,
 
 	PG_TRY();
 	{
-		char *plrv_sc = PyBytes_AsString(plrv_bo);
-		size_t plen = PyBytes_Size(plrv_bo);
-		size_t slen = strlen(plrv_sc);
+		char	   *plrv_sc = PyBytes_AsString(plrv_bo);
+		size_t		plen = PyBytes_Size(plrv_bo);
+		size_t		slen = strlen(plrv_sc);
 
 		if (slen < plen)
 			ereport(ERROR,
@@ -2190,10 +2194,10 @@ PLySequence_ToArray(PLyTypeInfo *info,
 					PLyObToDatum *arg,
 					PyObject *plrv)
 {
-	ArrayType *array;
+	ArrayType  *array;
 	int			i;
-	Datum		*elems;
-	bool		*nulls;
+	Datum	   *elems;
+	bool	   *nulls;
 	int			len;
 	int			lbs;
 
@@ -2208,15 +2212,18 @@ PLySequence_ToArray(PLyTypeInfo *info,
 
 	for (i = 0; i < len; i++)
 	{
-		PyObject *obj = PySequence_GetItem(plrv, i);
+		PyObject   *obj = PySequence_GetItem(plrv, i);
 
 		if (obj == Py_None)
 			nulls[i] = true;
 		else
 		{
 			nulls[i] = false;
-			/* We don't support arrays of row types yet, so the first
-			 * argument can be NULL. */
+
+			/*
+			 * We don't support arrays of row types yet, so the first argument
+			 * can be NULL.
+			 */
 			elems[i] = arg->elm->func(NULL, arg->elm, obj);
 		}
 		Py_XDECREF(obj);
@@ -2833,7 +2840,7 @@ PLy_spi_prepare(PyObject *self, PyObject *args)
 					{
 						ereport(ERROR,
 								(errmsg("plpy.prepare: type name at ordinal position %d is not a string", i)));
-						sptr = NULL; /* keep compiler quiet */
+						sptr = NULL;	/* keep compiler quiet */
 					}
 
 					/********************************************************
@@ -3341,9 +3348,12 @@ PLy_output(volatile int level, PyObject *self, PyObject *args)
 
 	if (PyTuple_Size(args) == 1)
 	{
-		/* Treat single argument specially to avoid undesirable
-		 * ('tuple',) decoration. */
-		PyObject *o;
+		/*
+		 * Treat single argument specially to avoid undesirable ('tuple',)
+		 * decoration.
+		 */
+		PyObject   *o;
+
 		PyArg_UnpackTuple(args, "plpy.elog", 1, 1, &o);
 		so = PyObject_Str(o);
 	}
@@ -3367,8 +3377,11 @@ PLy_output(volatile int level, PyObject *self, PyObject *args)
 		FlushErrorState();
 
 		PyErr_SetString(PLy_exc_error, sv);
-		/* Note: If sv came from PyString_AsString(), it points into
-		 * storage owned by so.  So free so after using sv. */
+
+		/*
+		 * Note: If sv came from PyString_AsString(), it points into storage
+		 * owned by so.  So free so after using sv.
+		 */
 		Py_XDECREF(so);
 
 		/*
@@ -3443,7 +3456,7 @@ PLy_exception_set_plural(PyObject *exc,
 
 /* Emit a PG error or notice, together with any available info about
  * the current Python error, previously set by PLy_exception_set().
- * This should be used to propagate Python errors into PG.  If fmt is
+ * This should be used to propagate Python errors into PG.	If fmt is
  * NULL, the Python error becomes the primary error message, otherwise
  * it becomes the detail.
  */
@@ -3459,7 +3472,7 @@ PLy_elog(int elevel, const char *fmt,...)
 	if (fmt)
 	{
 		initStringInfo(&emsg);
-		for(;;)
+		for (;;)
 		{
 			va_list		ap;
 			bool		success;
@@ -3627,7 +3640,7 @@ PLy_free(void *ptr)
 /*
  * Convert a Unicode object to a Python string.
  */
-static PyObject*
+static PyObject *
 PLyUnicode_Str(PyObject *unicode)
 {
 #if PY_MAJOR_VERSION >= 3
@@ -3635,26 +3648,29 @@ PLyUnicode_Str(PyObject *unicode)
 	Py_INCREF(unicode);
 	return unicode;
 #else
-	/* In Python 2, this means converting the Unicode to bytes in the
-	 * server encoding. */
+
+	/*
+	 * In Python 2, this means converting the Unicode to bytes in the server
+	 * encoding.
+	 */
 	return PLyUnicode_Bytes(unicode);
 #endif
 }
 
 /*
  * Convert a Python unicode object to a Python string/bytes object in
- * PostgreSQL server encoding.  Reference ownership is passed to the
+ * PostgreSQL server encoding.	Reference ownership is passed to the
  * caller.
  */
-static PyObject*
+static PyObject *
 PLyUnicode_Bytes(PyObject *unicode)
 {
-	PyObject *rv;
+	PyObject   *rv;
 	const char *serverenc;
 
 	/*
-	 * Python understands almost all PostgreSQL encoding names, but it
-	 * doesn't know SQL_ASCII.
+	 * Python understands almost all PostgreSQL encoding names, but it doesn't
+	 * know SQL_ASCII.
 	 */
 	if (GetDatabaseEncoding() == PG_SQL_ASCII)
 		serverenc = "ascii";
@@ -3672,7 +3688,7 @@ PLyUnicode_Bytes(PyObject *unicode)
  * function.  The result is palloc'ed.
  *
  * Note that this function is disguised as PyString_AsString() when
- * using Python 3.  That function retuns a pointer into the internal
+ * using Python 3.	That function retuns a pointer into the internal
  * memory of the argument, which isn't exactly the interface of this
  * function.  But in either case you get a rather short-lived
  * reference that you ought to better leave alone.
@@ -3680,8 +3696,9 @@ PLyUnicode_Bytes(PyObject *unicode)
 static char *
 PLyUnicode_AsString(PyObject *unicode)
 {
-	PyObject *o = PLyUnicode_Bytes(unicode);
-	char *rv = pstrdup(PyBytes_AsString(o));
+	PyObject   *o = PLyUnicode_Bytes(unicode);
+	char	   *rv = pstrdup(PyBytes_AsString(o));
+
 	Py_XDECREF(o);
 	return rv;
 }
@@ -3689,24 +3706,25 @@ PLyUnicode_AsString(PyObject *unicode)
 #if PY_MAJOR_VERSION >= 3
 /*
  * Convert a C string in the PostgreSQL server encoding to a Python
- * unicode object.  Reference ownership is passed to the caller.
+ * unicode object.	Reference ownership is passed to the caller.
  */
 static PyObject *
 PLyUnicode_FromString(const char *s)
 {
-    char       *utf8string;
+	char	   *utf8string;
 	PyObject   *o;
 
-    utf8string = (char *) pg_do_encoding_conversion((unsigned char *) s,
+	utf8string = (char *) pg_do_encoding_conversion((unsigned char *) s,
 													strlen(s),
 													GetDatabaseEncoding(),
 													PG_UTF8);
 
 	o = PyUnicode_FromString(utf8string);
 
-    if (utf8string != s)
-        pfree(utf8string);
+	if (utf8string != s)
+		pfree(utf8string);
 
 	return o;
 }
-#endif /* PY_MAJOR_VERSION >= 3 */
+
+#endif   /* PY_MAJOR_VERSION >= 3 */
diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c
index 31922a885a0586b2f76eb34b19a01b248fd1d4b3..038378f267277ab7d3f9c78eea0e460a60b8391c 100644
--- a/src/pl/tcl/pltcl.c
+++ b/src/pl/tcl/pltcl.c
@@ -2,7 +2,7 @@
  * pltcl.c		- PostgreSQL support for Tcl as
  *				  procedural language (PL)
  *
- *	  $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.131 2010/02/14 18:42:19 rhaas Exp $
+ *	  $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.132 2010/02/26 02:01:37 momjian Exp $
  *
  **********************************************************************/
 
@@ -959,7 +959,7 @@ pltcl_trigger_handler(PG_FUNCTION_ARGS)
 			 * for the input function
 			 ************************************************************/
 			typeTup = SearchSysCache1(TYPEOID,
-					  ObjectIdGetDatum(tupdesc->attrs[attnum - 1]->atttypid));
+					 ObjectIdGetDatum(tupdesc->attrs[attnum - 1]->atttypid));
 			if (!HeapTupleIsValid(typeTup))
 				elog(ERROR, "cache lookup failed for type %u",
 					 tupdesc->attrs[attnum - 1]->atttypid);
@@ -1165,7 +1165,7 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid)
 		{
 			typeTup =
 				SearchSysCache1(TYPEOID,
-							    ObjectIdGetDatum(procStruct->prorettype));
+								ObjectIdGetDatum(procStruct->prorettype));
 			if (!HeapTupleIsValid(typeTup))
 			{
 				free(prodesc->user_proname);
@@ -1229,7 +1229,7 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid)
 			for (i = 0; i < prodesc->nargs; i++)
 			{
 				typeTup = SearchSysCache1(TYPEOID,
-						 ObjectIdGetDatum(procStruct->proargtypes.values[i]));
+						ObjectIdGetDatum(procStruct->proargtypes.values[i]));
 				if (!HeapTupleIsValid(typeTup))
 				{
 					free(prodesc->user_proname);
@@ -2333,7 +2333,7 @@ pltcl_set_tuple_values(Tcl_Interp *interp, CONST84 char *arrayname,
 		 * for the output function
 		 ************************************************************/
 		typeTup = SearchSysCache1(TYPEOID,
-							   ObjectIdGetDatum(tupdesc->attrs[i]->atttypid));
+							  ObjectIdGetDatum(tupdesc->attrs[i]->atttypid));
 		if (!HeapTupleIsValid(typeTup))
 			elog(ERROR, "cache lookup failed for type %u",
 				 tupdesc->attrs[i]->atttypid);
@@ -2401,7 +2401,7 @@ pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc,
 		 * for the output function
 		 ************************************************************/
 		typeTup = SearchSysCache1(TYPEOID,
-							   ObjectIdGetDatum(tupdesc->attrs[i]->atttypid));
+							  ObjectIdGetDatum(tupdesc->attrs[i]->atttypid));
 		if (!HeapTupleIsValid(typeTup))
 			elog(ERROR, "cache lookup failed for type %u",
 				 tupdesc->attrs[i]->atttypid);
diff --git a/src/port/chklocale.c b/src/port/chklocale.c
index 189fe354509e9875f67298fb65d58df6fc917d92..6866353977ea6200c93d002d7d3b28c5c9df7099 100644
--- a/src/port/chklocale.c
+++ b/src/port/chklocale.c
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/port/chklocale.c,v 1.14 2010/01/02 16:58:13 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/port/chklocale.c,v 1.15 2010/02/26 02:01:38 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -244,17 +244,17 @@ pg_get_encoding_from_locale(const char *ctype)
 
 		save = setlocale(LC_CTYPE, NULL);
 		if (!save)
-			return -1;				/* setlocale() broken? */
+			return -1;			/* setlocale() broken? */
 		/* must copy result, or it might change after setlocale */
 		save = strdup(save);
 		if (!save)
-			return -1;				/* out of memory; unlikely */
+			return -1;			/* out of memory; unlikely */
 
 		name = setlocale(LC_CTYPE, ctype);
 		if (!name)
 		{
 			free(save);
-			return -1;				/* bogus ctype passed in? */
+			return -1;			/* bogus ctype passed in? */
 		}
 
 #ifndef WIN32
@@ -273,7 +273,7 @@ pg_get_encoding_from_locale(const char *ctype)
 		/* much easier... */
 		ctype = setlocale(LC_CTYPE, NULL);
 		if (!ctype)
-			return -1;				/* setlocale() broken? */
+			return -1;			/* setlocale() broken? */
 
 		/* If locale is C or POSIX, we can allow all encodings */
 		if (pg_strcasecmp(ctype, "C") == 0 ||
@@ -290,7 +290,7 @@ pg_get_encoding_from_locale(const char *ctype)
 	}
 
 	if (!sys)
-		return -1;					/* out of memory; unlikely */
+		return -1;				/* out of memory; unlikely */
 
 	/* Check the table */
 	for (i = 0; encoding_match_list[i].system_enc_name; i++)
diff --git a/src/port/copydir.c b/src/port/copydir.c
index 14c6ffe769123c0aa62840d68fcdc8ab19ccd2e6..e1675b6639162c919795b5627480cc4d60b60bb7 100644
--- a/src/port/copydir.c
+++ b/src/port/copydir.c
@@ -11,7 +11,7 @@
  *	as a service.
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/port/copydir.c,v 1.32 2010/02/23 05:44:55 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/port/copydir.c,v 1.33 2010/02/26 02:01:38 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -126,12 +126,12 @@ copydir(char *fromdir, char *todir, bool recurse)
 	FreeDir(xldir);
 
 #ifdef NOTYET
+
 	/*
-	 * It's important to fsync the destination directory itself as
-	 * individual file fsyncs don't guarantee that the directory entry
-	 * for the file is synced. Recent versions of ext4 have made the
-	 * window much wider but it's been true for ext3 and other
-	 * filesystems in the past.
+	 * It's important to fsync the destination directory itself as individual
+	 * file fsyncs don't guarantee that the directory entry for the file is
+	 * synced. Recent versions of ext4 have made the window much wider but
+	 * it's been true for ext3 and other filesystems in the past.
 	 *
 	 * However we can't do this just yet, it has portability issues.
 	 */
@@ -175,7 +175,7 @@ copy_file(char *fromfile, char *tofile)
 	/*
 	 * Do the data copying.
 	 */
-	for (offset=0; ; offset+=nbytes)
+	for (offset = 0;; offset += nbytes)
 	{
 		nbytes = read(srcfd, buffer, COPY_BUF_SIZE);
 		if (nbytes < 0)
@@ -196,9 +196,9 @@ copy_file(char *fromfile, char *tofile)
 		}
 
 		/*
-		 * We fsync the files later but first flush them to avoid spamming
-		 * the cache and hopefully get the kernel to start writing them
-		 * out before the fsync comes.
+		 * We fsync the files later but first flush them to avoid spamming the
+		 * cache and hopefully get the kernel to start writing them out before
+		 * the fsync comes.
 		 */
 		pg_flush_data(dstfd, offset, nbytes);
 	}
@@ -220,9 +220,9 @@ copy_file(char *fromfile, char *tofile)
 static void
 fsync_fname(char *fname)
 {
-	int	fd = BasicOpenFile(fname, 
-						   O_RDWR | PG_BINARY,
-						   S_IRUSR | S_IWUSR);
+	int			fd = BasicOpenFile(fname,
+								   O_RDWR | PG_BINARY,
+								   S_IRUSR | S_IWUSR);
 
 	if (fd < 0)
 		ereport(ERROR,
diff --git a/src/port/dirmod.c b/src/port/dirmod.c
index 8b66e656e70e27e5c26d9e7d6abca0055d9944d7..ede728afeef3ce10ffbe14bf6a059f45e92573a6 100644
--- a/src/port/dirmod.c
+++ b/src/port/dirmod.c
@@ -10,7 +10,7 @@
  *	Win32 (NT4 and newer).
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/port/dirmod.c,v 1.60 2010/01/02 16:58:13 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/port/dirmod.c,v 1.61 2010/02/26 02:01:38 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -135,8 +135,8 @@ pgrename(const char *from, const char *to)
 		_dosmaperr(err);
 
 		/*
-		 * Modern NT-based Windows versions return ERROR_SHARING_VIOLATION
-		 * if another process has the file open without FILE_SHARE_DELETE.
+		 * Modern NT-based Windows versions return ERROR_SHARING_VIOLATION if
+		 * another process has the file open without FILE_SHARE_DELETE.
 		 * ERROR_LOCK_VIOLATION has also been seen with some anti-virus
 		 * software. This used to check for just ERROR_ACCESS_DENIED, so
 		 * presumably you can get that too with some OS versions. We don't
diff --git a/src/port/exec.c b/src/port/exec.c
index a4f8b16419f94ab7fb942cf3b7cdb20f8bf4e1d9..68bce6f962c160a9645612f66c443f5681a5d789 100644
--- a/src/port/exec.c
+++ b/src/port/exec.c
@@ -9,7 +9,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/port/exec.c,v 1.67 2010/01/14 00:14:06 tgl Exp $
+ *	  $PostgreSQL: pgsql/src/port/exec.c,v 1.68 2010/02/26 02:01:38 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -625,7 +625,7 @@ AddUserToTokenDacl(HANDLE hToken)
 	DWORD		dwSize = 0;
 	DWORD		dwTokenInfoLength = 0;
 	PACL		pacl = NULL;
-	PTOKEN_USER	pTokenUser = NULL;
+	PTOKEN_USER pTokenUser = NULL;
 	TOKEN_DEFAULT_DACL tddNew;
 	TOKEN_DEFAULT_DACL *ptdd = NULL;
 	TOKEN_INFORMATION_CLASS tic = TokenDefaultDacl;
@@ -666,8 +666,8 @@ AddUserToTokenDacl(HANDLE hToken)
 	}
 
 	/*
-	 * Get the user token for the current user, which provides us with the
-	 * SID that is needed for creating the ACL.
+	 * Get the user token for the current user, which provides us with the SID
+	 * that is needed for creating the ACL.
 	 */
 	if (!GetTokenUser(hToken, &pTokenUser))
 	{
diff --git a/src/port/win32env.c b/src/port/win32env.c
index 85a1247fba818f650a453e012ff1a898db484f6c..2ab5d79112ef5f83daed503e7be0c336d00d14aa 100644
--- a/src/port/win32env.c
+++ b/src/port/win32env.c
@@ -10,7 +10,7 @@
  *
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/port/win32env.c,v 1.7 2010/01/02 16:58:13 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/port/win32env.c,v 1.8 2010/02/26 02:01:38 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -27,30 +27,44 @@ pgwin32_putenv(const char *envval)
 	 * Each version of MSVCRT has its own _putenv() call in the runtime
 	 * library.
 	 *
-	 * mingw always uses MSVCRT.DLL, but if we are in a Visual C++ environment,
-	 * attempt to update the environment in all MSVCRT modules that are
-	 * currently loaded, to work properly with any third party libraries
-	 * linked against a different MSVCRT but still relying on environment
-	 * variables.
+	 * mingw always uses MSVCRT.DLL, but if we are in a Visual C++
+	 * environment, attempt to update the environment in all MSVCRT modules
+	 * that are currently loaded, to work properly with any third party
+	 * libraries linked against a different MSVCRT but still relying on
+	 * environment variables.
 	 *
 	 * Also separately update the system environment that gets inherited by
 	 * subprocesses.
 	 */
 #ifdef _MSC_VER
 	typedef int (_cdecl * PUTENVPROC) (const char *);
-	static struct {
+	static struct
+	{
 		char	   *modulename;
 		HMODULE		hmodule;
-		PUTENVPROC putenvFunc;
-	} rtmodules[] = {
-		{ "msvcrt", 0, NULL},  /* Visual Studio 6.0 / mingw */
-		{ "msvcr70", 0, NULL}, /* Visual Studio 2002 */
-		{ "msvcr71", 0, NULL}, /* Visual Studio 2003 */
-		{ "msvcr80", 0, NULL}, /* Visual Studio 2005 */
-		{ "msvcr90", 0, NULL}, /* Visual Studio 2008 */
-		{ NULL, 0, NULL}
+		PUTENVPROC	putenvFunc;
+	}			rtmodules[] =
+	{
+		{
+			"msvcrt", 0, NULL
+		},						/* Visual Studio 6.0 / mingw */
+		{
+			"msvcr70", 0, NULL
+		},						/* Visual Studio 2002 */
+		{
+			"msvcr71", 0, NULL
+		},						/* Visual Studio 2003 */
+		{
+			"msvcr80", 0, NULL
+		},						/* Visual Studio 2005 */
+		{
+			"msvcr90", 0, NULL
+		},						/* Visual Studio 2008 */
+		{
+			NULL, 0, NULL
+		}
 	};
-	int i;
+	int			i;
 
 	for (i = 0; rtmodules[i].modulename; i++)
 	{
@@ -63,8 +77,8 @@ pgwin32_putenv(const char *envval)
 				if (rtmodules[i].hmodule == NULL)
 				{
 					/*
-					 * Set to INVALID_HANDLE_VALUE so we know we have tried this one
-					 * before, and won't try again.
+					 * Set to INVALID_HANDLE_VALUE so we know we have tried
+					 * this one before, and won't try again.
 					 */
 					rtmodules[i].hmodule = INVALID_HANDLE_VALUE;
 					continue;
@@ -83,8 +97,8 @@ pgwin32_putenv(const char *envval)
 			else
 			{
 				/*
-				 * Module loaded, but we did not find the function last time. We're
-				 * not going to find it this time either...
+				 * Module loaded, but we did not find the function last time.
+				 * We're not going to find it this time either...
 				 */
 				continue;
 			}
@@ -92,7 +106,7 @@ pgwin32_putenv(const char *envval)
 		/* At this point, putenvFunc is set or we have exited the loop */
 		rtmodules[i].putenvFunc(envval);
 	}
-#endif		/* _MSC_VER */
+#endif   /* _MSC_VER */
 
 	/*
 	 * Update the process environment - to make modifications visible to child
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index c91a09f960a9ad89d779aff66cd74459e166f465..8a246ff6caa43cecd092d0928cdb5624ff7b41e7 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -11,7 +11,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
- * $PostgreSQL: pgsql/src/test/regress/pg_regress.c,v 1.70 2010/02/24 01:35:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/test/regress/pg_regress.c,v 1.71 2010/02/26 02:01:39 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -67,7 +67,7 @@ static char *shellprog = SHELLPROG;
 
 /*
  * On Windows we use -w in diff switches to avoid problems with inconsistent
- * newline representation.  The actual result files will generally have
+ * newline representation.	The actual result files will generally have
  * Windows-style newlines, but the comparison files might or might not.
  */
 #ifndef WIN32
@@ -1791,7 +1791,7 @@ create_database(const char *dbname)
 				 dbname, dbname, dbname, dbname, dbname);
 
 	/*
-	 * Install any requested procedural languages.  We use CREATE OR REPLACE
+	 * Install any requested procedural languages.	We use CREATE OR REPLACE
 	 * so that this will work whether or not the language is preinstalled.
 	 */
 	for (sl = loadlanguage; sl != NULL; sl = sl->next)
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index bcb891c7c956804d53462ee81b4cf89c3366786d..8c48c24845d40009b726450964a16610802ef520 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -6,7 +6,7 @@
  * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
  *
  * IDENTIFICATION
- *	  $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.65 2010/01/02 16:58:16 momjian Exp $
+ *	  $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.66 2010/02/26 02:01:39 momjian Exp $
  *
  *-------------------------------------------------------------------------
  */
@@ -975,7 +975,7 @@ static const struct
 		"Australia/Perth"
 	},							/* (GMT+08:00) Perth */
 /*	{"W. Central Africa Standard Time", "W. Central Africa Daylight Time",
-	 *	 *	 *	 *	 *	 *	 *	 *	""}, Could not find a match for this one. Excluded for now. *//* (
+	 *	 *	 *	 *	 *	 *	 *	 *	 *	""}, Could not find a match for this one. Excluded for now. *//* (
 	 * G MT+01:00) West Central Africa */
 	{
 		"W. Europe Standard Time", "W. Europe Daylight Time",
diff --git a/src/tools/fsync/test_fsync.c b/src/tools/fsync/test_fsync.c
index f63f4fb20617bd293d6add37b1c7d84f311b62a5..1256d428250d518421106a0b5791f8e1c1d1f642 100644
--- a/src/tools/fsync/test_fsync.c
+++ b/src/tools/fsync/test_fsync.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/src/tools/fsync/test_fsync.c,v 1.26 2009/11/28 15:04:54 momjian Exp $
+ * $PostgreSQL: pgsql/src/tools/fsync/test_fsync.c,v 1.27 2010/02/26 02:01:39 momjian Exp $
  *
  *
  *	test_fsync.c
@@ -30,7 +30,7 @@
 #define FSYNC_FILENAME	"/var/tmp/test_fsync.out"
 #endif
 
-#define WRITE_SIZE	(8 * 1024) /* 8k */
+#define WRITE_SIZE	(8 * 1024)	/* 8k */
 
 #define LABEL_FORMAT	"\t%-30s"
 
@@ -74,7 +74,7 @@ main(int argc, char *argv[])
 	buf = (char *) TYPEALIGN(ALIGNOF_XLOG_BUFFER, full_buf);
 
 	/*
-	 *	Simple write
+	 * Simple write
 	 */
 	printf("Simple 8k write timing:\n");
 	/* write only */
@@ -92,7 +92,7 @@ main(int argc, char *argv[])
 	print_elapse(start_t, stop_t);
 
 	/*
-	 *	Compare file sync methods with one 8k write
+	 * Compare file sync methods with one 8k write
 	 */
 	printf("\nCompare file sync methods using one 8k write:\n");
 
@@ -176,7 +176,7 @@ main(int argc, char *argv[])
 	print_elapse(start_t, stop_t);
 
 	/*
-	 *	Compare file sync methods with two 8k write
+	 * Compare file sync methods with two 8k write
 	 */
 	printf("\nCompare file sync methods using two 8k writes:\n");
 
@@ -266,7 +266,7 @@ main(int argc, char *argv[])
 	print_elapse(start_t, stop_t);
 
 	/*
-	 *	Compare 1 to 2 writes
+	 * Compare 1 to 2 writes
 	 */
 	printf("\nCompare open_sync sizes:\n");
 
@@ -309,7 +309,7 @@ main(int argc, char *argv[])
 #endif
 
 	/*
-	 *	Fsync another file descriptor?
+	 * Fsync another file descriptor?
 	 */
 	printf("\nCompare fsync times on write() and new file descriptors (if the times\n");
 	printf("are similar, fsync() can sync data written on a different descriptor):\n");
diff --git a/src/tools/ifaddrs/test_ifaddrs.c b/src/tools/ifaddrs/test_ifaddrs.c
index 78013106ccd92cf8fbd1ae94917e0aac08d54b47..9ba6f118092c40b5d32f52207922523d1d6a1b84 100644
--- a/src/tools/ifaddrs/test_ifaddrs.c
+++ b/src/tools/ifaddrs/test_ifaddrs.c
@@ -1,5 +1,5 @@
 /*
- * $PostgreSQL: pgsql/src/tools/ifaddrs/test_ifaddrs.c,v 1.1 2009/10/01 01:58:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/tools/ifaddrs/test_ifaddrs.c,v 1.2 2010/02/26 02:01:40 momjian Exp $
  *
  *
  *	test_ifaddrs.c
@@ -16,10 +16,11 @@
 
 
 static void
-print_addr(struct sockaddr *addr)
+print_addr(struct sockaddr * addr)
 {
-	char buffer[256];
-	int ret, len;
+	char		buffer[256];
+	int			ret,
+				len;
 
 	switch (addr->sa_family)
 	{
@@ -45,7 +46,7 @@ print_addr(struct sockaddr *addr)
 }
 
 static void
-callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
+callback(struct sockaddr * addr, struct sockaddr * mask, void *unused)
 {
 	printf("addr: ");
 	print_addr(addr);
@@ -58,7 +59,7 @@ int
 main(int argc, char *argv[])
 {
 #ifdef WIN32
-	WSADATA wsaData;
+	WSADATA		wsaData;
 
 	if (WSAStartup(MAKEWORD(2, 2), &wsaData) != 0)
 	{