diff --git a/contrib/btree_gist/btree_utils_var.h b/contrib/btree_gist/btree_utils_var.h
index fe91d122e67baf1f1d66806523a657f04dd9be25..57d10e80f197a58649a0cc2d0bdba483c169c362 100644
--- a/contrib/btree_gist/btree_utils_var.h
+++ b/contrib/btree_gist/btree_utils_var.h
@@ -33,12 +33,12 @@ typedef struct
 
 	/* Methods */
 
-	bool		(*f_gt) (const void *, const void *, Oid);	/* greater than */
-	bool		(*f_ge) (const void *, const void *, Oid);	/* greater equal */
-	bool		(*f_eq) (const void *, const void *, Oid);	/* equal */
-	bool		(*f_le) (const void *, const void *, Oid);	/* less equal */
-	bool		(*f_lt) (const void *, const void *, Oid);	/* less than */
-	int32		(*f_cmp) (const void *, const void *, Oid);	/* compare */
+	bool		(*f_gt) (const void *, const void *, Oid);		/* greater than */
+	bool		(*f_ge) (const void *, const void *, Oid);		/* greater equal */
+	bool		(*f_eq) (const void *, const void *, Oid);		/* equal */
+	bool		(*f_le) (const void *, const void *, Oid);		/* less equal */
+	bool		(*f_lt) (const void *, const void *, Oid);		/* less than */
+	int32		(*f_cmp) (const void *, const void *, Oid);		/* compare */
 	GBT_VARKEY *(*f_l2n) (GBT_VARKEY *);		/* convert leaf to node */
 } gbtree_vinfo;
 
diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c
index 281d5892ee1e685965f9f3d77cd638ec4dd76cec..d2e0d582f279f2ab806f2a87d7d9facb28e67dea 100644
--- a/contrib/pg_standby/pg_standby.c
+++ b/contrib/pg_standby/pg_standby.c
@@ -533,7 +533,7 @@ usage(void)
 		   "Main intended use as restore_command in recovery.conf:\n"
 		   "  restore_command = 'pg_standby [OPTION]... ARCHIVELOCATION %%f %%p %%r'\n"
 		   "e.g.\n"
-		   "  restore_command = 'pg_standby /mnt/server/archiverdir %%f %%p %%r'\n");
+	"  restore_command = 'pg_standby /mnt/server/archiverdir %%f %%p %%r'\n");
 	printf("\nReport bugs to <pgsql-bugs@postgresql.org>.\n");
 }
 
diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c
index 60c1fbbf9761110e2f59b1171e68ddd35c110d36..fdec6e34af166362f5553b21f0356e0ef4468db0 100644
--- a/contrib/pg_upgrade/check.c
+++ b/contrib/pg_upgrade/check.c
@@ -362,7 +362,7 @@ check_new_cluster_is_empty(void)
 			/* pg_largeobject and its index should be skipped */
 			if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
 				pg_log(PG_FATAL, "New cluster database \"%s\" is not empty\n",
-					new_cluster.dbarr.dbs[dbnum].db_name);
+					   new_cluster.dbarr.dbs[dbnum].db_name);
 		}
 	}
 
@@ -381,17 +381,18 @@ check_new_cluster_is_empty(void)
 static void
 check_old_cluster_has_new_cluster_dbs(void)
 {
-	int			old_dbnum, new_dbnum;
+	int			old_dbnum,
+				new_dbnum;
 
 	for (new_dbnum = 0; new_dbnum < new_cluster.dbarr.ndbs; new_dbnum++)
 	{
 		for (old_dbnum = 0; old_dbnum < old_cluster.dbarr.ndbs; old_dbnum++)
 			if (strcmp(old_cluster.dbarr.dbs[old_dbnum].db_name,
-				new_cluster.dbarr.dbs[new_dbnum].db_name) == 0)
+					   new_cluster.dbarr.dbs[new_dbnum].db_name) == 0)
 				break;
 		if (old_dbnum == old_cluster.dbarr.ndbs)
 			pg_log(PG_FATAL, "New cluster database \"%s\" does not exist in the old cluster\n",
-				new_cluster.dbarr.dbs[new_dbnum].db_name);
+				   new_cluster.dbarr.dbs[new_dbnum].db_name);
 	}
 }
 
@@ -495,7 +496,7 @@ check_is_super_user(ClusterInfo *cluster)
 
 	if (PQntuples(res) != 1 || strcmp(PQgetvalue(res, 0, 0), "t") != 0)
 		pg_log(PG_FATAL, "database user \"%s\" is not a superuser\n",
-		os_info.user);
+			   os_info.user);
 
 	PQclear(res);
 
diff --git a/contrib/pg_upgrade/controldata.c b/contrib/pg_upgrade/controldata.c
index c282ec2452d6a7a4cd1541dbee783167861e6c22..25ee81f2a0908df72b9026a7558a64159727565b 100644
--- a/contrib/pg_upgrade/controldata.c
+++ b/contrib/pg_upgrade/controldata.c
@@ -90,10 +90,10 @@ get_control_data(ClusterInfo *cluster, bool live_check)
 	pg_putenv("LC_TIME", NULL);
 	pg_putenv("LANG",
 #ifndef WIN32
-			NULL);
+			  NULL);
 #else
 	/* On Windows the default locale cannot be English, so force it */
-			"en");
+			  "en");
 #endif
 	pg_putenv("LANGUAGE", NULL);
 	pg_putenv("LC_ALL", NULL);
diff --git a/contrib/pg_upgrade/exec.c b/contrib/pg_upgrade/exec.c
index 93c923c556ae13fe890036b37a21150451a507fc..b7d82663ff376ff9c4954473edd61a7388abd5a0 100644
--- a/contrib/pg_upgrade/exec.c
+++ b/contrib/pg_upgrade/exec.c
@@ -99,16 +99,17 @@ verify_directories(void)
 
 	if (access(".", R_OK | W_OK
 #ifndef WIN32
+
 	/*
-	 *	Do a directory execute check only on Unix because execute permission
-	 *	on NTFS means "can execute scripts", which we don't care about.
-	 *	Also, X_OK is not defined in the Windows API.
+	 * Do a directory execute check only on Unix because execute permission on
+	 * NTFS means "can execute scripts", which we don't care about. Also, X_OK
+	 * is not defined in the Windows API.
 	 */
-					| X_OK
+			   | X_OK
 #endif
-		) != 0)
+			   ) != 0)
 		pg_log(PG_FATAL,
-		"You must have read and write access in the current directory.\n");
+		  "You must have read and write access in the current directory.\n");
 
 	check_bin_dir(&old_cluster);
 	check_data_dir(old_cluster.pgdata);
@@ -132,16 +133,18 @@ check_data_dir(const char *pg_data)
 {
 	char		subDirName[MAXPGPATH];
 	int			subdirnum;
+
 	/* start check with top-most directory */
 	const char *requiredSubdirs[] = {"", "base", "global", "pg_clog",
 		"pg_multixact", "pg_subtrans", "pg_tblspc", "pg_twophase",
-		"pg_xlog"};
+	"pg_xlog"};
 
 	for (subdirnum = 0;
 		 subdirnum < sizeof(requiredSubdirs) / sizeof(requiredSubdirs[0]);
 		 ++subdirnum)
 	{
 		struct stat statBuf;
+
 		snprintf(subDirName, sizeof(subDirName), "%s/%s", pg_data,
 				 requiredSubdirs[subdirnum]);
 
@@ -173,8 +176,8 @@ check_bin_dir(ClusterInfo *cluster)
 		report_status(PG_FATAL, "check for %s failed:  %s\n",
 					  cluster->bindir, getErrorText(errno));
 	else if (!S_ISDIR(statBuf.st_mode))
-			report_status(PG_FATAL, "%s is not a directory\n",
-						  cluster->bindir);
+		report_status(PG_FATAL, "%s is not a directory\n",
+					  cluster->bindir);
 
 	validate_exec(cluster->bindir, "postgres");
 	validate_exec(cluster->bindir, "pg_ctl");
diff --git a/contrib/pg_upgrade/option.c b/contrib/pg_upgrade/option.c
index e545458a75a86f4c874c57dc59aac4a0d5eb82d5..abb74a5bfa44b38881790885fd5d31f6d2e013a2 100644
--- a/contrib/pg_upgrade/option.c
+++ b/contrib/pg_upgrade/option.c
@@ -158,6 +158,7 @@ parseCommandLine(int argc, char *argv[])
 			case 'u':
 				pg_free(os_info.user);
 				os_info.user = pg_strdup(optarg);
+
 				/*
 				 * Push the user name into the environment so pre-9.1
 				 * pg_ctl/libpq uses it.
diff --git a/contrib/pg_upgrade/pg_upgrade.h b/contrib/pg_upgrade/pg_upgrade.h
index 1f31daecfe9b45a5ad16d29f085fb5d9c7a6e0f3..a3a085630910849e3aaf804e5a06c607dd48ca53 100644
--- a/contrib/pg_upgrade/pg_upgrade.h
+++ b/contrib/pg_upgrade/pg_upgrade.h
@@ -378,7 +378,7 @@ void	   *pg_malloc(int size);
 void		pg_free(void *ptr);
 const char *getErrorText(int errNum);
 unsigned int str2uint(const char *str);
-void 		pg_putenv(const char *var, const char *val);
+void		pg_putenv(const char *var, const char *val);
 
 
 /* version.c */
diff --git a/contrib/pg_upgrade/server.c b/contrib/pg_upgrade/server.c
index 839f39f572fbe7504ce8ac57251e451092d1adea..58c1234a9482ed5b3f872b60be42f70e39133dc5 100644
--- a/contrib/pg_upgrade/server.c
+++ b/contrib/pg_upgrade/server.c
@@ -52,8 +52,8 @@ get_db_conn(ClusterInfo *cluster, const char *db_name)
 	char		conn_opts[MAXPGPATH];
 
 	snprintf(conn_opts, sizeof(conn_opts),
-		 "dbname = '%s' user = '%s' port = %d", db_name, os_info.user,
-		 cluster->port);
+			 "dbname = '%s' user = '%s' port = %d", db_name, os_info.user,
+			 cluster->port);
 
 	return PQconnectdb(conn_opts);
 }
@@ -146,16 +146,18 @@ start_postmaster(ClusterInfo *cluster)
 	PGconn	   *conn;
 	bool		exit_hook_registered = false;
 	int			pg_ctl_return = 0;
+
 #ifndef WIN32
-	char		*output_filename = log_opts.filename;
+	char	   *output_filename = log_opts.filename;
 #else
+
 	/*
 	 * On Win32, we can't send both pg_upgrade output and pg_ctl output to the
 	 * same file because we get the error: "The process cannot access the file
 	 * because it is being used by another process." so we have to send all
 	 * other output to 'nul'.
 	 */
-	char		*output_filename = DEVNULL;
+	char	   *output_filename = DEVNULL;
 #endif
 
 	if (!exit_hook_registered)
@@ -180,13 +182,13 @@ start_postmaster(ClusterInfo *cluster)
 			 "-o \"-p %d %s\" start >> \"%s\" 2>&1" SYSTEMQUOTE,
 			 cluster->bindir, output_filename, cluster->pgdata, cluster->port,
 			 (cluster->controldata.cat_ver >=
-				BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
-				"-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
+			  BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
+			 "-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
 			 log_opts.filename);
 
 	/*
-	 * Don't throw an error right away, let connecting throw the error
-	 * because it might supply a reason for the failure.
+	 * Don't throw an error right away, let connecting throw the error because
+	 * it might supply a reason for the failure.
 	 */
 	pg_ctl_return = exec_prog(false, "%s", cmd);
 
@@ -196,7 +198,7 @@ start_postmaster(ClusterInfo *cluster)
 	{
 		pg_log(PG_REPORT, "\nconnection to database failed: %s\n",
 			   PQerrorMessage(conn));
- 		if (conn)
+		if (conn)
 			PQfinish(conn);
 		pg_log(PG_FATAL, "unable to connect to %s postmaster started with the command: %s\n",
 			   CLUSTER_NAME(cluster), cmd);
@@ -206,8 +208,8 @@ start_postmaster(ClusterInfo *cluster)
 	/* If the connection didn't fail, fail now */
 	if (pg_ctl_return != 0)
 		pg_log(PG_FATAL, "pg_ctl failed to start the %s server\n",
-				CLUSTER_NAME(cluster));
-	
+			   CLUSTER_NAME(cluster));
+
 	os_info.running_cluster = cluster;
 }
 
@@ -218,11 +220,12 @@ stop_postmaster(bool fast)
 	char		cmd[MAXPGPATH];
 	const char *bindir;
 	const char *datadir;
+
 #ifndef WIN32
-	char		*output_filename = log_opts.filename;
+	char	   *output_filename = log_opts.filename;
 #else
 	/* See comment in start_postmaster() about why win32 output is ignored. */
-	char		*output_filename = DEVNULL;
+	char	   *output_filename = DEVNULL;
 #endif
 
 	if (os_info.running_cluster == &old_cluster)
@@ -268,17 +271,17 @@ check_pghost_envvar(void)
 	for (option = start; option->keyword != NULL; option++)
 	{
 		if (option->envvar && (strcmp(option->envvar, "PGHOST") == 0 ||
-			strcmp(option->envvar, "PGHOSTADDR") == 0))
+							   strcmp(option->envvar, "PGHOSTADDR") == 0))
 		{
 			const char *value = getenv(option->envvar);
 
 			if (value && strlen(value) > 0 &&
-				/* check for 'local' host values */
+			/* check for 'local' host values */
 				(strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 &&
 				 strcmp(value, "::1") != 0 && value[0] != '/'))
 				pg_log(PG_FATAL,
-					"libpq environment variable %s has a non-local server value: %s\n",
-					option->envvar, value);
+					   "libpq environment variable %s has a non-local server value: %s\n",
+					   option->envvar, value);
 		}
 	}
 
diff --git a/contrib/pg_upgrade/util.c b/contrib/pg_upgrade/util.c
index 4094895f46c28632f0672679aea6ca41d3859996..f6582f5e3852462c63196217ed3d75ee33a93ab4 100644
--- a/contrib/pg_upgrade/util.c
+++ b/contrib/pg_upgrade/util.c
@@ -281,4 +281,3 @@ pg_putenv(const char *var, const char *val)
 #endif
 	}
 }
-
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 5b35b50034a1623a144e5c8ffb0718896a1d378e..866785837f5ced5e27fb3ba5aeefac97ab9aa15f 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -56,15 +56,15 @@ callConsistentFn(GinState *ginstate, GinScanKey key)
 	key->recheckCurItem = true;
 
 	return DatumGetBool(FunctionCall8Coll(&ginstate->consistentFn[key->attnum - 1],
-										  ginstate->supportCollation[key->attnum - 1],
+								 ginstate->supportCollation[key->attnum - 1],
 										  PointerGetDatum(key->entryRes),
 										  UInt16GetDatum(key->strategy),
 										  key->query,
 										  UInt32GetDatum(key->nuserentries),
 										  PointerGetDatum(key->extra_data),
-										  PointerGetDatum(&key->recheckCurItem),
+									   PointerGetDatum(&key->recheckCurItem),
 										  PointerGetDatum(key->queryValues),
-										  PointerGetDatum(key->queryCategories)));
+									 PointerGetDatum(key->queryCategories)));
 }
 
 /*
@@ -252,7 +252,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
 			 *----------
 			 */
 			cmp = DatumGetInt32(FunctionCall4Coll(&btree->ginstate->comparePartialFn[attnum - 1],
-												  btree->ginstate->supportCollation[attnum - 1],
+							   btree->ginstate->supportCollation[attnum - 1],
 												  scanEntry->queryKey,
 												  idatum,
 										 UInt16GetDatum(scanEntry->strategy),
@@ -1178,10 +1178,10 @@ matchPartialInPendingList(GinState *ginstate, Page page,
 		 *----------
 		 */
 		cmp = DatumGetInt32(FunctionCall4Coll(&ginstate->comparePartialFn[entry->attnum - 1],
-											  ginstate->supportCollation[entry->attnum - 1],
+							   ginstate->supportCollation[entry->attnum - 1],
 											  entry->queryKey,
 											  datum[off - 1],
-										  UInt16GetDatum(entry->strategy),
+											  UInt16GetDatum(entry->strategy),
 										PointerGetDatum(entry->extra_data)));
 		if (cmp == 0)
 			return true;
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index d9f5b8c012e6093b1e5749e636c8e80b30cb655d..f8d54b1b4629409f85d3c6f213b0cf23777d6994 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -306,11 +306,11 @@ ginNewScanKey(IndexScanDesc scan)
 		/* OK to call the extractQueryFn */
 		queryValues = (Datum *)
 			DatumGetPointer(FunctionCall7Coll(&so->ginstate.extractQueryFn[skey->sk_attno - 1],
-											  so->ginstate.supportCollation[skey->sk_attno - 1],
+						   so->ginstate.supportCollation[skey->sk_attno - 1],
 											  skey->sk_argument,
 											  PointerGetDatum(&nQueryValues),
-											  UInt16GetDatum(skey->sk_strategy),
-											  PointerGetDatum(&partial_matches),
+										   UInt16GetDatum(skey->sk_strategy),
+										   PointerGetDatum(&partial_matches),
 											  PointerGetDatum(&extra_data),
 											  PointerGetDatum(&nullFlags),
 											  PointerGetDatum(&searchMode)));
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 1ae51b106023b5411cbf6018d46054e3c47ed38a..ba142bc874dbff539f8751e4a40942fb813030e4 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -94,8 +94,8 @@ initGinState(GinState *state, Relation index)
 		 * type for a noncollatable indexed data type (for instance, hstore
 		 * uses text index entries).  If there's no index collation then
 		 * specify default collation in case the support functions need
-		 * collation.  This is harmless if the support functions don't
-		 * care about collation, so we just do it unconditionally.  (We could
+		 * collation.  This is harmless if the support functions don't care
+		 * about collation, so we just do it unconditionally.  (We could
 		 * alternatively call get_typcollation, but that seems like expensive
 		 * overkill --- there aren't going to be any cases where a GIN storage
 		 * type has a nondefault collation.)
@@ -293,7 +293,7 @@ ginCompareEntries(GinState *ginstate, OffsetNumber attnum,
 
 	/* both not null, so safe to call the compareFn */
 	return DatumGetInt32(FunctionCall2Coll(&ginstate->compareFn[attnum - 1],
-										   ginstate->supportCollation[attnum - 1],
+									  ginstate->supportCollation[attnum - 1],
 										   a, b));
 }
 
@@ -400,7 +400,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 	nullFlags = NULL;			/* in case extractValue doesn't set it */
 	entries = (Datum *)
 		DatumGetPointer(FunctionCall3Coll(&ginstate->extractValueFn[attnum - 1],
-										  ginstate->supportCollation[attnum - 1],
+									  ginstate->supportCollation[attnum - 1],
 										  value,
 										  PointerGetDatum(nentries),
 										  PointerGetDatum(&nullFlags)));
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 0e779e09d7143a02b5a583b991cef76a0d81f705..8227bfdb88ba8bf2b92ba133b541d796a018e2e1 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1399,7 +1399,7 @@ initGISTstate(GISTSTATE *giststate, Relation index)
 		/*
 		 * If the index column has a specified collation, we should honor that
 		 * while doing comparisons.  However, we may have a collatable storage
-		 * type for a noncollatable indexed data type.  If there's no index
+		 * type for a noncollatable indexed data type.	If there's no index
 		 * collation then specify default collation in case the support
 		 * functions need collation.  This is harmless if the support
 		 * functions don't care about collation, so we just do it
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index 1aabcc527ac54aa80a044ee29fa22f4f4201a1ad..1754a103699ffa32bb2e0df30bc14188c2eb6dd3 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -448,7 +448,7 @@ gistdentryinit(GISTSTATE *giststate, int nkey, GISTENTRY *e,
 		gistentryinit(*e, k, r, pg, o, l);
 		dep = (GISTENTRY *)
 			DatumGetPointer(FunctionCall1Coll(&giststate->decompressFn[nkey],
-											  giststate->supportCollation[nkey],
+										   giststate->supportCollation[nkey],
 											  PointerGetDatum(e)));
 		/* decompressFn may just return the given pointer */
 		if (dep != e)
@@ -475,7 +475,7 @@ gistcentryinit(GISTSTATE *giststate, int nkey,
 		gistentryinit(*e, k, r, pg, o, l);
 		cep = (GISTENTRY *)
 			DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[nkey],
-											  giststate->supportCollation[nkey],
+										   giststate->supportCollation[nkey],
 											  PointerGetDatum(e)));
 		/* compressFn may just return the given pointer */
 		if (cep != e)
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 27c37d6173fdef36ad83fbeee6733b6999c38f44..02087659647cd8abb84487dd8d3c6269ba0df04f 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -80,7 +80,7 @@
  *
  * Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there
  * to check that we don't try to scan or do retail insertions into an index
- * that is currently being rebuilt or pending rebuild.  This helps to catch
+ * that is currently being rebuilt or pending rebuild.	This helps to catch
  * things that don't work when reindexing system catalogs.  The assertion
  * doesn't prevent the actual rebuild because we don't use RELATION_CHECKS
  * when calling the index AM's ambuild routine, and there is no reason for
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 55136e9cc4c067322b771abdc4e88166d799ef76..93a928c66b283cfa4eddbbe0759e569193dae17e 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -738,7 +738,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
 					{
 						compare =
 							DatumGetInt32(FunctionCall2Coll(&entry->sk_func,
-															entry->sk_collation,
+														 entry->sk_collation,
 															attrDatum1,
 															attrDatum2));
 
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 71bcb42c1908e85abddd73631666573dec2c2387..2e896a258f7cb3763552096af5d3d76eafa4d2f4 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -635,7 +635,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
 			*result = DatumGetBool(OidFunctionCall2Coll(cmp_proc,
 														op->sk_collation,
 														leftarg->sk_argument,
-														rightarg->sk_argument));
+													 rightarg->sk_argument));
 			return true;
 		}
 	}
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index e71090f71b5e8441aa06cd5b0a8368d87293f142..5c3ca479fb33fff646e3a7b08b53efea92b9a97f 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -6656,15 +6656,15 @@ StartupXLOG(void)
 			ereport(FATAL,
 					(errmsg("requested recovery stop point is before consistent recovery point")));
 		}
+
 		/*
-		 * Ran off end of WAL before reaching end-of-backup WAL record,
-		 * or minRecoveryPoint. That's usually a bad sign, indicating that
-		 * you tried to recover from an online backup but never called
+		 * Ran off end of WAL before reaching end-of-backup WAL record, or
+		 * minRecoveryPoint. That's usually a bad sign, indicating that you
+		 * tried to recover from an online backup but never called
 		 * pg_stop_backup(), or you didn't archive all the WAL up to that
-		 * point. However, this also happens in crash recovery, if the
-		 * system crashes while an online backup is in progress. We
-		 * must not treat that as an error, or the database will refuse
-		 * to start up.
+		 * point. However, this also happens in crash recovery, if the system
+		 * crashes while an online backup is in progress. We must not treat
+		 * that as an error, or the database will refuse to start up.
 		 */
 		if (InArchiveRecovery)
 		{
@@ -6674,7 +6674,7 @@ StartupXLOG(void)
 						 errhint("Online backup started with pg_start_backup() must be ended with pg_stop_backup(), and all WAL up to that point must be available at recovery.")));
 			else
 				ereport(FATAL,
-						(errmsg("WAL ends before consistent recovery point")));
+					  (errmsg("WAL ends before consistent recovery point")));
 		}
 	}
 
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 0898cf363e9219efb3049b0fbbda000fce8e6cc1..39ba4869af6fde2aa0be5784027645b064f993d2 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1773,8 +1773,8 @@ index_build(Relation heapRelation,
 	 * However, when reindexing an existing index, we should do nothing here.
 	 * Any HOT chains that are broken with respect to the index must predate
 	 * the index's original creation, so there is no need to change the
-	 * index's usability horizon.  Moreover, we *must not* try to change
-	 * the index's pg_index entry while reindexing pg_index itself, and this
+	 * index's usability horizon.  Moreover, we *must not* try to change the
+	 * index's pg_index entry while reindexing pg_index itself, and this
 	 * optimization nicely prevents that.
 	 */
 	if (indexInfo->ii_BrokenHotChain && !isreindex)
@@ -1824,7 +1824,7 @@ index_build(Relation heapRelation,
 
 	/*
 	 * If it's for an exclusion constraint, make a second pass over the heap
-	 * to verify that the constraint is satisfied.  We must not do this until
+	 * to verify that the constraint is satisfied.	We must not do this until
 	 * the index is fully valid.  (Broken HOT chains shouldn't matter, though;
 	 * see comments for IndexCheckExclusion.)
 	 */
@@ -2136,8 +2136,8 @@ IndexBuildHeapScan(Relation heapRelation,
 						/*
 						 * It's a HOT-updated tuple deleted by our own xact.
 						 * We can assume the deletion will commit (else the
-						 * index contents don't matter), so treat the same
-						 * as RECENTLY_DEAD HOT-updated tuples.
+						 * index contents don't matter), so treat the same as
+						 * RECENTLY_DEAD HOT-updated tuples.
 						 */
 						indexIt = false;
 						/* mark the index as unsafe for old snapshots */
@@ -2146,9 +2146,9 @@ IndexBuildHeapScan(Relation heapRelation,
 					else
 					{
 						/*
-						 * It's a regular tuple deleted by our own xact.
-						 * Index it but don't check for uniqueness, the same
-						 * as a RECENTLY_DEAD tuple.
+						 * It's a regular tuple deleted by our own xact. Index
+						 * it but don't check for uniqueness, the same as a
+						 * RECENTLY_DEAD tuple.
 						 */
 						indexIt = true;
 					}
@@ -2281,9 +2281,8 @@ IndexCheckExclusion(Relation heapRelation,
 
 	/*
 	 * If we are reindexing the target index, mark it as no longer being
-	 * reindexed, to forestall an Assert in index_beginscan when we try to
-	 * use the index for probes.  This is OK because the index is now
-	 * fully valid.
+	 * reindexed, to forestall an Assert in index_beginscan when we try to use
+	 * the index for probes.  This is OK because the index is now fully valid.
 	 */
 	if (ReindexIsCurrentlyProcessingIndex(RelationGetRelid(indexRelation)))
 		ResetReindexProcessing();
@@ -2855,9 +2854,9 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
 	 *
 	 * We can also reset indcheckxmin, because we have now done a
 	 * non-concurrent index build, *except* in the case where index_build
-	 * found some still-broken HOT chains.  If it did, we normally leave
+	 * found some still-broken HOT chains.	If it did, we normally leave
 	 * indcheckxmin alone (note that index_build won't have changed it,
-	 * because this is a reindex).  But if the index was invalid or not ready
+	 * because this is a reindex).	But if the index was invalid or not ready
 	 * and there were broken HOT chains, it seems best to force indcheckxmin
 	 * true, because the normal argument that the HOT chains couldn't conflict
 	 * with the index is suspect for an invalid index.
@@ -2929,7 +2928,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
  * the data in a manner that risks a change in constraint validity.
  *
  * Returns true if any indexes were rebuilt (including toast table's index
- * when relevant).  Note that a CommandCounterIncrement will occur after each
+ * when relevant).	Note that a CommandCounterIncrement will occur after each
  * index rebuild.
  */
 bool
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index d803d28a0679194197cc3f8267b12559f7ec5612..41e92992deec95301fdda6d51972f090ec5f9b72 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -362,7 +362,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
 
 /*
  * RangeVarGetAndCheckCreationNamespace
- *      As RangeVarGetCreationNamespace, but with a permissions check.
+ *		As RangeVarGetCreationNamespace, but with a permissions check.
  */
 Oid
 RangeVarGetAndCheckCreationNamespace(const RangeVar *newRelation)
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index a8cf0dbe2f25896ad4f1663ed1bd99997f3de68e..b059f9d784bd42e574eeb39228440a5292d84bdb 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -60,8 +60,8 @@ AlterTableCreateToastTable(Oid relOid, Datum reloptions)
 
 	/*
 	 * Grab a DDL-exclusive lock on the target table, since we'll update the
-	 * pg_class tuple.  This is redundant for all present users.  Tuple toasting
-	 * behaves safely in the face of a concurrent TOAST table add.
+	 * pg_class tuple.	This is redundant for all present users.  Tuple
+	 * toasting behaves safely in the face of a concurrent TOAST table add.
 	 */
 	rel = heap_open(relOid, ShareUpdateExclusiveLock);
 
@@ -274,13 +274,13 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, Datum reloptio
 	coloptions[1] = 0;
 
 	index_create(toast_rel, toast_idxname, toastIndexOid,
-							   indexInfo,
-							   list_make2("chunk_id", "chunk_seq"),
-							   BTREE_AM_OID,
-							   rel->rd_rel->reltablespace,
-					 collationObjectId, classObjectId, coloptions, (Datum) 0,
-							   true, false, false, false,
-							   true, false, false);
+				 indexInfo,
+				 list_make2("chunk_id", "chunk_seq"),
+				 BTREE_AM_OID,
+				 rel->rd_rel->reltablespace,
+				 collationObjectId, classObjectId, coloptions, (Datum) 0,
+				 true, false, false, false,
+				 true, false, false);
 
 	heap_close(toast_rel, NoLock);
 
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index fa84989fc6fa8be90d4eecb9c33e94a232d79880..57188bc25a7bab0c13aacd958274131557befd72 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -566,7 +566,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, bool inh)
 	}
 
 	/*
-	 * Report ANALYZE to the stats collector, too.  However, if doing
+	 * Report ANALYZE to the stats collector, too.	However, if doing
 	 * inherited stats we shouldn't report, because the stats collector only
 	 * tracks per-table stats.
 	 */
@@ -1231,7 +1231,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
 		qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
 
 	/*
-	 * Estimate total numbers of rows in relation.  For live rows, use
+	 * Estimate total numbers of rows in relation.	For live rows, use
 	 * vac_estimate_reltuples; for dead rows, we have no source of old
 	 * information, so we have to assume the density is the same in unseen
 	 * pages as in the pages we scanned.
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 0ab3a8bcfae74bd20576c4374bb5372e310f309d..c020dc2e4ecbd8702eb1f7c3e0f97b457d1c5e4f 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -762,12 +762,12 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
 
 	/*
 	 * If the OldHeap has a toast table, get lock on the toast table to keep
-	 * it from being vacuumed.  This is needed because autovacuum processes
+	 * it from being vacuumed.	This is needed because autovacuum processes
 	 * toast tables independently of their main tables, with no lock on the
-	 * latter.  If an autovacuum were to start on the toast table after we
+	 * latter.	If an autovacuum were to start on the toast table after we
 	 * compute our OldestXmin below, it would use a later OldestXmin, and then
 	 * possibly remove as DEAD toast tuples belonging to main tuples we think
-	 * are only RECENTLY_DEAD.  Then we'd fail while trying to copy those
+	 * are only RECENTLY_DEAD.	Then we'd fail while trying to copy those
 	 * tuples.
 	 *
 	 * We don't need to open the toast relation here, just lock it.  The lock
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 479db2c3f116782526e536436836b15bf3ad47f0..b7c021d943a99e96ff5aab93caff7aecc83df66f 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -185,14 +185,15 @@ DefineIndex(RangeVar *heapRelation,
 		rel->rd_rel->relkind != RELKIND_UNCATALOGED)
 	{
 		if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+
 			/*
-			 * Custom error message for FOREIGN TABLE since the term is
-			 * close to a regular table and can confuse the user.
+			 * Custom error message for FOREIGN TABLE since the term is close
+			 * to a regular table and can confuse the user.
 			 */
 			ereport(ERROR,
 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
 					 errmsg("cannot create index on foreign table \"%s\"",
-						 heapRelation->relname)));
+							heapRelation->relname)));
 		else
 			ereport(ERROR,
 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 383690270b8801fc12e8515832c1e45d1a31dd37..be04177a2ee079e18e9bea7f4d6d791730f060d1 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -1077,12 +1077,12 @@ read_info(SeqTable elm, Relation rel, Buffer *buf)
 	tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
 
 	/*
-	 * Previous releases of Postgres neglected to prevent SELECT FOR UPDATE
-	 * on a sequence, which would leave a non-frozen XID in the sequence
-	 * tuple's xmax, which eventually leads to clog access failures or worse.
-	 * If we see this has happened, clean up after it.  We treat this like a
-	 * hint bit update, ie, don't bother to WAL-log it, since we can certainly
-	 * do this again if the update gets lost.
+	 * Previous releases of Postgres neglected to prevent SELECT FOR UPDATE on
+	 * a sequence, which would leave a non-frozen XID in the sequence tuple's
+	 * xmax, which eventually leads to clog access failures or worse. If we
+	 * see this has happened, clean up after it.  We treat this like a hint
+	 * bit update, ie, don't bother to WAL-log it, since we can certainly do
+	 * this again if the update gets lost.
 	 */
 	if (HeapTupleHeaderGetXmax(tuple.t_data) != InvalidTransactionId)
 	{
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 6279f2bf9a5caf7e28308b345e2221923997fe45..2c9f855f531238c7015de9407265806852112d57 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -2679,7 +2679,8 @@ AlterTableGetLockLevel(List *cmds)
 				 * These subcommands affect implicit row type conversion. They
 				 * have affects similar to CREATE/DROP CAST on queries.  We
 				 * don't provide for invalidating parse trees as a result of
-				 * such changes.  Do avoid concurrent pg_class updates, though.
+				 * such changes.  Do avoid concurrent pg_class updates,
+				 * though.
 				 */
 			case AT_AddOf:
 			case AT_DropOf:
@@ -2946,7 +2947,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
 		case AT_DisableRule:
 		case AT_DropInherit:	/* NO INHERIT */
 		case AT_AddOf:			/* OF */
-		case AT_DropOf:			/* NOT OF */
+		case AT_DropOf: /* NOT OF */
 			ATSimplePermissions(rel, ATT_TABLE);
 			/* These commands never recurse */
 			/* No command-specific prep needed */
@@ -4067,7 +4068,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be
  *
  * Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF.  If it
  * isn't suitable, throw an error.  Currently, we require that the type
- * originated with CREATE TYPE AS.  We could support any row type, but doing so
+ * originated with CREATE TYPE AS.	We could support any row type, but doing so
  * would require handling a number of extra corner cases in the DDL commands.
  */
 void
@@ -4083,6 +4084,7 @@ check_of_type(HeapTuple typetuple)
 		Assert(OidIsValid(typ->typrelid));
 		typeRelation = relation_open(typ->typrelid, AccessShareLock);
 		typeOk = (typeRelation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE);
+
 		/*
 		 * Close the parent rel, but keep our AccessShareLock on it until xact
 		 * commit.	That will prevent someone else from deleting or ALTERing
@@ -7406,8 +7408,8 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock
 		default:
 			ereport(ERROR,
 					(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-					 errmsg("\"%s\" is not a table, view, sequence, or foreign table",
-							NameStr(tuple_class->relname))));
+			errmsg("\"%s\" is not a table, view, sequence, or foreign table",
+				   NameStr(tuple_class->relname))));
 	}
 
 	/*
@@ -8603,7 +8605,7 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
  * Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE
  * INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or
  * heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will
- * be TypeRelationId).  There's no convenient way to do this, so go trawling
+ * be TypeRelationId).	There's no convenient way to do this, so go trawling
  * through pg_depend.
  */
 static void
@@ -8730,8 +8732,8 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
 		if (strncmp(table_attname, type_attname, NAMEDATALEN) != 0)
 			ereport(ERROR,
 					(errcode(ERRCODE_DATATYPE_MISMATCH),
-					 errmsg("table has column \"%s\" where type requires \"%s\"",
-							table_attname, type_attname)));
+				 errmsg("table has column \"%s\" where type requires \"%s\"",
+						table_attname, type_attname)));
 
 		/* Compare type. */
 		if (table_attr->atttypid != type_attr->atttypid ||
@@ -8739,8 +8741,8 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
 			table_attr->attcollation != type_attr->attcollation)
 			ereport(ERROR,
 					(errcode(ERRCODE_DATATYPE_MISMATCH),
-					 errmsg("table \"%s\" has different type for column \"%s\"",
-							RelationGetRelationName(rel), type_attname)));
+				  errmsg("table \"%s\" has different type for column \"%s\"",
+						 RelationGetRelationName(rel), type_attname)));
 	}
 	DecrTupleDescRefCount(typeTupleDesc);
 
@@ -8748,6 +8750,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
 	for (; table_attno <= tableTupleDesc->natts; table_attno++)
 	{
 		Form_pg_attribute table_attr = tableTupleDesc->attrs[table_attno - 1];
+
 		if (!table_attr->attisdropped)
 			ereport(ERROR,
 					(errcode(ERRCODE_DATATYPE_MISMATCH),
@@ -8785,7 +8788,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
 /*
  * ALTER TABLE NOT OF
  *
- * Detach a typed table from its originating type.  Just clear reloftype and
+ * Detach a typed table from its originating type.	Just clear reloftype and
  * remove the dependency.
  */
 static void
@@ -8802,8 +8805,8 @@ ATExecDropOf(Relation rel, LOCKMODE lockmode)
 						RelationGetRelationName(rel))));
 
 	/*
-	 * We don't bother to check ownership of the type --- ownership of the table
-	 * is presumed enough rights.  No lock required on the type, either.
+	 * We don't bother to check ownership of the type --- ownership of the
+	 * table is presumed enough rights.  No lock required on the type, either.
 	 */
 
 	drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype);
diff --git a/src/backend/commands/tsearchcmds.c b/src/backend/commands/tsearchcmds.c
index d08c9bbbc5ce4699d7437639dc2154a781850ca8..3355eaafda26a6186b3163204f528c5948e678a4 100644
--- a/src/backend/commands/tsearchcmds.c
+++ b/src/backend/commands/tsearchcmds.c
@@ -96,6 +96,7 @@ get_ts_parser_func(DefElem *defel, int attnum)
 			break;
 		case Anum_pg_ts_parser_prslextype:
 			nargs = 1;
+
 			/*
 			 * Note: because the lextype method returns type internal, it must
 			 * have an internal-type argument for security reasons.  The
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index f8eb5bc4a65cc83f0c6ff2e363444c3b38d3afca..66c11de6723ab95c9fe72e7655fb3df41db8727e 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -1069,7 +1069,7 @@ DefineDomain(CreateDomainStmt *stmt)
 				   basetypeMod, /* typeMod value */
 				   typNDims,	/* Array dimensions for base type */
 				   typNotNull,	/* Type NOT NULL */
-				   domaincoll);	/* type's collation */
+				   domaincoll); /* type's collation */
 
 	/*
 	 * Process constraints which refer to the domain ID returned by TypeCreate
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 224c34f6e7808f79e704264d8036d045c8a2aeb2..5cbf3a04f807f82b58f9acad745babea80db7d4f 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -459,7 +459,7 @@ vacuum_set_xid_limits(int freeze_min_age,
  *		If we scanned the whole relation then we should just use the count of
  *		live tuples seen; but if we did not, we should not trust the count
  *		unreservedly, especially not in VACUUM, which may have scanned a quite
- *		nonrandom subset of the table.  When we have only partial information,
+ *		nonrandom subset of the table.	When we have only partial information,
  *		we take the old value of pg_class.reltuples as a measurement of the
  *		tuple density in the unscanned pages.
  *
@@ -471,7 +471,7 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
 					   BlockNumber scanned_pages,
 					   double scanned_tuples)
 {
-	BlockNumber	old_rel_pages = relation->rd_rel->relpages;
+	BlockNumber old_rel_pages = relation->rd_rel->relpages;
 	double		old_rel_tuples = relation->rd_rel->reltuples;
 	double		old_density;
 	double		new_density;
@@ -483,8 +483,8 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
 		return scanned_tuples;
 
 	/*
-	 * If scanned_pages is zero but total_pages isn't, keep the existing
-	 * value of reltuples.
+	 * If scanned_pages is zero but total_pages isn't, keep the existing value
+	 * of reltuples.
 	 */
 	if (scanned_pages == 0)
 		return old_rel_tuples;
@@ -498,23 +498,23 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
 
 	/*
 	 * Okay, we've covered the corner cases.  The normal calculation is to
-	 * convert the old measurement to a density (tuples per page), then
-	 * update the density using an exponential-moving-average approach,
-	 * and finally compute reltuples as updated_density * total_pages.
+	 * convert the old measurement to a density (tuples per page), then update
+	 * the density using an exponential-moving-average approach, and finally
+	 * compute reltuples as updated_density * total_pages.
 	 *
-	 * For ANALYZE, the moving average multiplier is just the fraction of
-	 * the table's pages we scanned.  This is equivalent to assuming
-	 * that the tuple density in the unscanned pages didn't change.  Of
-	 * course, it probably did, if the new density measurement is different.
-	 * But over repeated cycles, the value of reltuples will converge towards
-	 * the correct value, if repeated measurements show the same new density.
+	 * For ANALYZE, the moving average multiplier is just the fraction of the
+	 * table's pages we scanned.  This is equivalent to assuming that the
+	 * tuple density in the unscanned pages didn't change.  Of course, it
+	 * probably did, if the new density measurement is different. But over
+	 * repeated cycles, the value of reltuples will converge towards the
+	 * correct value, if repeated measurements show the same new density.
 	 *
 	 * For VACUUM, the situation is a bit different: we have looked at a
 	 * nonrandom sample of pages, but we know for certain that the pages we
 	 * didn't look at are precisely the ones that haven't changed lately.
 	 * Thus, there is a reasonable argument for doing exactly the same thing
-	 * as for the ANALYZE case, that is use the old density measurement as
-	 * the value for the unscanned pages.
+	 * as for the ANALYZE case, that is use the old density measurement as the
+	 * value for the unscanned pages.
 	 *
 	 * This logic could probably use further refinement.
 	 */
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index ce5fa180662ed1474849b8e94817cff8ba149dd6..ccc586f12efa5ca40ac018de0864a57af467fba5 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -86,7 +86,7 @@ typedef struct LVRelStats
 	/* Overall statistics about rel */
 	BlockNumber rel_pages;		/* total number of pages */
 	BlockNumber scanned_pages;	/* number of pages we examined */
-	double		scanned_tuples;	/* counts only tuples on scanned pages */
+	double		scanned_tuples; /* counts only tuples on scanned pages */
 	double		old_rel_tuples; /* previous value of pg_class.reltuples */
 	double		new_rel_tuples; /* new estimated total # of tuples */
 	BlockNumber pages_removed;
@@ -211,7 +211,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
 	vac_update_relstats(onerel,
 						vacrelstats->rel_pages, vacrelstats->new_rel_tuples,
 						vacrelstats->hasindex,
-						(vacrelstats->scanned_pages < vacrelstats->rel_pages) ?
+					  (vacrelstats->scanned_pages < vacrelstats->rel_pages) ?
 						InvalidTransactionId :
 						FreezeLimit);
 
@@ -341,9 +341,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
 	 * of pages.
 	 *
 	 * Before entering the main loop, establish the invariant that
-	 * next_not_all_visible_block is the next block number >= blkno that's
-	 * not all-visible according to the visibility map, or nblocks if there's
-	 * no such block.  Also, we set up the skipping_all_visible_blocks flag,
+	 * next_not_all_visible_block is the next block number >= blkno that's not
+	 * all-visible according to the visibility map, or nblocks if there's no
+	 * such block.	Also, we set up the skipping_all_visible_blocks flag,
 	 * which is needed because we need hysteresis in the decision: once we've
 	 * started skipping blocks, we may as well skip everything up to the next
 	 * not-all-visible block.
@@ -804,7 +804,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
 	/* now we can compute the new value for pg_class.reltuples */
 	vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
 														 nblocks,
-														 vacrelstats->scanned_pages,
+												  vacrelstats->scanned_pages,
 														 num_tuples);
 
 	/* If any tuples need to be deleted, perform final vacuum cycle */
@@ -1082,11 +1082,11 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 	if (new_rel_pages != old_rel_pages)
 	{
 		/*
-		 * Note: we intentionally don't update vacrelstats->rel_pages with
-		 * the new rel size here.  If we did, it would amount to assuming that
-		 * the new pages are empty, which is unlikely.  Leaving the numbers
-		 * alone amounts to assuming that the new pages have the same tuple
-		 * density as existing ones, which is less unlikely.
+		 * Note: we intentionally don't update vacrelstats->rel_pages with the
+		 * new rel size here.  If we did, it would amount to assuming that the
+		 * new pages are empty, which is unlikely.	Leaving the numbers alone
+		 * amounts to assuming that the new pages have the same tuple density
+		 * as existing ones, which is less unlikely.
 		 */
 		UnlockRelation(onerel, AccessExclusiveLock);
 		return;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 9efd20f2bcfd29e3006df8582d0a214087dec601..8550869db3d959f0d403e3c165740bc15b02ddd3 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -807,9 +807,9 @@ check_client_encoding(char **newval, void **extra, GucSource source)
 	 *
 	 * XXX Although canonicalizing seems like a good idea in the abstract, it
 	 * breaks pre-9.1 JDBC drivers, which expect that if they send "UNICODE"
-	 * as the client_encoding setting then it will read back the same way.
-	 * As a workaround, don't replace the string if it's "UNICODE".  Remove
-	 * that hack when pre-9.1 JDBC drivers are no longer in use.
+	 * as the client_encoding setting then it will read back the same way. As
+	 * a workaround, don't replace the string if it's "UNICODE".  Remove that
+	 * hack when pre-9.1 JDBC drivers are no longer in use.
 	 */
 	if (strcmp(*newval, canonical_name) != 0 &&
 		strcmp(*newval, "UNICODE") != 0)
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 0e7ca625cee4f9bc5eae71b1433f3af319af80c5..3a6698105f278398a2317555cf0731a24cf9b106 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -265,8 +265,8 @@ ExecHashJoin(HashJoinState *node)
 
 				/*
 				 * We check for interrupts here because this corresponds to
-				 * where we'd fetch a row from a child plan node in other
-				 * join types.
+				 * where we'd fetch a row from a child plan node in other join
+				 * types.
 				 */
 				CHECK_FOR_INTERRUPTS();
 
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index f4099ac23458787d6af8aa2ff82da8130fc7ddcb..778849e4347f13c7fd334b923682df15ad4e09dd 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -929,7 +929,7 @@ pg_GSS_error(int severity, char *errmsg, OM_uint32 maj_stat, OM_uint32 min_stat)
 	/* Fetch major status message */
 	msg_ctx = 0;
 	gss_display_status(&lmin_s, maj_stat, GSS_C_GSS_CODE,
-								GSS_C_NO_OID, &msg_ctx, &gmsg);
+					   GSS_C_NO_OID, &msg_ctx, &gmsg);
 	strlcpy(msg_major, gmsg.value, sizeof(msg_major));
 	gss_release_buffer(&lmin_s, &gmsg);
 
@@ -945,7 +945,7 @@ pg_GSS_error(int severity, char *errmsg, OM_uint32 maj_stat, OM_uint32 min_stat)
 	/* Fetch mechanism minor status message */
 	msg_ctx = 0;
 	gss_display_status(&lmin_s, min_stat, GSS_C_MECH_CODE,
-								GSS_C_NO_OID, &msg_ctx, &gmsg);
+					   GSS_C_NO_OID, &msg_ctx, &gmsg);
 	strlcpy(msg_minor, gmsg.value, sizeof(msg_minor));
 	gss_release_buffer(&lmin_s, &gmsg);
 
@@ -1761,7 +1761,7 @@ auth_peer(hbaPort *port)
 		if (errno == ENOSYS)
 			ereport(LOG,
 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-					 errmsg("peer authentication is not supported on this platform")));
+			errmsg("peer authentication is not supported on this platform")));
 		else
 			ereport(LOG,
 					(errcode_for_socket_access(),
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index f3a3b6e2cc1c1368c2626e294f937193e8f761e5..fe86dc62193de0871930e3aadd8cacc63d8af924 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -494,8 +494,8 @@ check_role(const char *role, Oid roleid, char *param_str)
 				return true;
 		}
 		else if (strcmp(tok, role) == 0 ||
-				 (strcmp(tok, "replication\n") == 0 && 
-				  strcmp(role,"replication") ==0) ||
+				 (strcmp(tok, "replication\n") == 0 &&
+				  strcmp(role, "replication") == 0) ||
 				 strcmp(tok, "all\n") == 0)
 			return true;
 	}
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 51959371ce64c5db7a1d63f9ef0711428d9474ef..8d9cb9428df2e4300f57ac4e40982a0b346ed669 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -392,7 +392,7 @@ get_current_username(const char *progname)
 	/* Allocate new memory because later getpwuid() calls can overwrite it. */
 	return strdup(pw->pw_name);
 #else
-	unsigned long	namesize = 256 /* UNLEN */ + 1;
+	unsigned long namesize = 256 /* UNLEN */ + 1;
 	char	   *name;
 
 	name = malloc(namesize);
diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c
index e2a8bbcc1ac10fa985a9fcfe6c3e3b26054fa560..e521bf7e3cb8ec7102e17beca6c2daa6dbb59e01 100644
--- a/src/backend/optimizer/geqo/geqo_main.c
+++ b/src/backend/optimizer/geqo/geqo_main.c
@@ -74,6 +74,7 @@ geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
 	Pool	   *pool;
 	int			pool_size,
 				number_generations;
+
 #ifdef GEQO_DEBUG
 	int			status_interval;
 #endif
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index c4404b1bd1712548071d1d7b80b72e65dd45d77b..bb38768bd4358f72896e2d2c549bbd64dedcd24d 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -2687,7 +2687,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
 	 * evaluation of AND/OR?  Probably *not*, because that would make the
 	 * results depend on the clause ordering, and we are not in any position
 	 * to expect that the current ordering of the clauses is the one that's
-	 * going to end up being used.  The above per-RestrictInfo caching would
+	 * going to end up being used.	The above per-RestrictInfo caching would
 	 * not mix well with trying to re-order clauses anyway.
 	 */
 	if (IsA(node, FuncExpr))
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 42427d3aca40e570b75ef0f35085de8dbd88c2fe..24e4e59ea6a5b249d3820474468aee9047091830 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -953,7 +953,7 @@ is_dummy_rel(RelOptInfo *rel)
  * dummy.
  *
  * Also, when called during GEQO join planning, we are in a short-lived
- * memory context.  We must make sure that the dummy path attached to a
+ * memory context.	We must make sure that the dummy path attached to a
  * baserel survives the GEQO cycle, else the baserel is trashed for future
  * GEQO cycles.  On the other hand, when we are marking a joinrel during GEQO,
  * we don't want the dummy path to clutter the main planning context.  Upshot
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index ac8051147852345a54702ff31b4ef9017c30679b..e4ccf5ce79167d4045ffadad1b1fdd1d57684d4d 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -3383,9 +3383,9 @@ add_sort_column(AttrNumber colIdx, Oid sortOp, Oid coll, bool nulls_first,
 		 * opposite nulls direction is redundant.
 		 *
 		 * We could probably consider sort keys with the same sortop and
-		 * different collations to be redundant too, but for the moment
-		 * treat them as not redundant.  This will be needed if we ever
-		 * support collations with different notions of equality.
+		 * different collations to be redundant too, but for the moment treat
+		 * them as not redundant.  This will be needed if we ever support
+		 * collations with different notions of equality.
 		 */
 		if (sortColIdx[i] == colIdx &&
 			sortOperators[numCols] == sortOp &&
@@ -3419,7 +3419,7 @@ add_sort_column(AttrNumber colIdx, Oid sortOp, Oid coll, bool nulls_first,
  *
  * We must convert the pathkey information into arrays of sort key column
  * numbers, sort operator OIDs, collation OIDs, and nulls-first flags,
- * which is the representation the executor wants.  These are returned into
+ * which is the representation the executor wants.	These are returned into
  * the output parameters *p_numsortkeys etc.
  *
  * If the pathkeys include expressions that aren't simple Vars, we will
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 7b2b40f62984fdf93d23ab01b70cfdf23a3a9f61..9aafc8adcc6bd5884925c6280c1b9d19904ea58e 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -1034,8 +1034,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 		if (parse->hasAggs)
 		{
 			/*
-			 * Collect statistics about aggregates for estimating costs.
-			 * Note: we do not attempt to detect duplicate aggregates here; a
+			 * Collect statistics about aggregates for estimating costs. Note:
+			 * we do not attempt to detect duplicate aggregates here; a
 			 * somewhat-overestimated cost is okay for our present purposes.
 			 */
 			count_agg_clauses(root, (Node *) tlist, &agg_costs);
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index fcff015cdffc1d3833a85d8279790fbd102391bd..f82ab27b9aae43bfc1fd81ec73da1c6c58165014 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -933,10 +933,10 @@ generate_setop_tlist(List *colTypes, List *colCollations,
 		}
 
 		/*
-		 * Ensure the tlist entry's exposed collation matches the set-op.
-		 * This is necessary because plan_set_operations() reports the result
+		 * Ensure the tlist entry's exposed collation matches the set-op. This
+		 * is necessary because plan_set_operations() reports the result
 		 * ordering as a list of SortGroupClauses, which don't carry collation
-		 * themselves but just refer to tlist entries.  If we don't show the
+		 * themselves but just refer to tlist entries.	If we don't show the
 		 * right collation then planner.c might do the wrong thing in
 		 * higher-level queries.
 		 *
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 8b0d8623db6d775000f65da7579a70075456662d..2914c398186f6525979618f7442d1b444f8ed66e 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -86,7 +86,7 @@ typedef struct
 static bool contain_agg_clause_walker(Node *node, void *context);
 static bool pull_agg_clause_walker(Node *node, List **context);
 static bool count_agg_clauses_walker(Node *node,
-									 count_agg_clauses_context *context);
+						 count_agg_clauses_context *context);
 static bool find_window_functions_walker(Node *node, WindowFuncLists *lists);
 static bool expression_returns_set_rows_walker(Node *node, double *count);
 static bool contain_subplans_walker(Node *node, void *context);
@@ -2884,9 +2884,9 @@ eval_const_expressions_mutator(Node *node,
 			/*
 			 * We can remove null constants from the list. For a non-null
 			 * constant, if it has not been preceded by any other
-			 * non-null-constant expressions then it is the result.
-			 * Otherwise, it's the next argument, but we can drop following
-			 * arguments since they will never be reached.
+			 * non-null-constant expressions then it is the result. Otherwise,
+			 * it's the next argument, but we can drop following arguments
+			 * since they will never be reached.
 			 */
 			if (IsA(e, Const))
 			{
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 4867685c7f9bc2b2b910ef1743e69400bd13918a..e4a4e3a5e48005a62b02e79f83277ce758c43b28 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -575,7 +575,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
 			 * We must assign collations now because assign_query_collations
 			 * doesn't process rangetable entries.  We just assign all the
 			 * collations independently in each row, and don't worry about
-			 * whether they are consistent vertically.  The outer INSERT query
+			 * whether they are consistent vertically.	The outer INSERT query
 			 * isn't going to care about the collations of the VALUES columns,
 			 * so it's not worth the effort to identify a common collation for
 			 * each one here.  (But note this does have one user-visible
@@ -1100,16 +1100,16 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
 	 * doesn't process rangetable entries, and (2) we need to label the VALUES
 	 * RTE with column collations for use in the outer query.  We don't
 	 * consider conflict of implicit collations to be an error here; instead
-	 * the column will just show InvalidOid as its collation, and you'll get
-	 * a failure later if that results in failure to resolve a collation.
+	 * the column will just show InvalidOid as its collation, and you'll get a
+	 * failure later if that results in failure to resolve a collation.
 	 *
 	 * Note we modify the per-column expression lists in-place.
 	 */
 	collations = NIL;
 	for (i = 0; i < sublist_length; i++)
 	{
-		Oid		coltype;
-		Oid		colcoll;
+		Oid			coltype;
+		Oid			colcoll;
 
 		coltype = select_common_type(pstate, colexprs[i], "VALUES", NULL);
 
@@ -1210,7 +1210,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
 				 errmsg("VALUES must not contain table references"),
 				 parser_errposition(pstate,
-						   locate_var_of_level((Node *) exprsLists, 0))));
+							  locate_var_of_level((Node *) exprsLists, 0))));
 
 	/*
 	 * Another thing we can't currently support is NEW/OLD references in rules
@@ -1225,7 +1225,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
 				 errmsg("VALUES must not contain OLD or NEW references"),
 				 errhint("Use SELECT ... UNION ALL ... instead."),
 				 parser_errposition(pstate,
-						   locate_var_of_level((Node *) exprsLists, 0))));
+							  locate_var_of_level((Node *) exprsLists, 0))));
 
 	qry->rtable = pstate->p_rtable;
 	qry->jointree = makeFromExpr(pstate->p_joinlist, NULL);
@@ -1237,13 +1237,13 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
 				(errcode(ERRCODE_GROUPING_ERROR),
 				 errmsg("cannot use aggregate function in VALUES"),
 				 parser_errposition(pstate,
-						   locate_agg_of_level((Node *) exprsLists, 0))));
+							  locate_agg_of_level((Node *) exprsLists, 0))));
 	if (pstate->p_hasWindowFuncs)
 		ereport(ERROR,
 				(errcode(ERRCODE_WINDOWING_ERROR),
 				 errmsg("cannot use window function in VALUES"),
 				 parser_errposition(pstate,
-								locate_windowfunc((Node *) exprsLists))));
+									locate_windowfunc((Node *) exprsLists))));
 
 	assign_query_collations(pstate, qry);
 
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 3c5be6bc8887c19dd0b59504700680b8731dece1..f26c69abddd83f2687df6029379441644070a8a9 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -167,7 +167,7 @@ coerce_type(ParseState *pstate, Node *node,
 		 *
 		 * These cases are unlike the ones above because the exposed type of
 		 * the argument must be an actual array or enum type.  In particular
-		 * the argument must *not* be an UNKNOWN constant.  If it is, we just
+		 * the argument must *not* be an UNKNOWN constant.	If it is, we just
 		 * fall through; below, we'll call anyarray_in or anyenum_in, which
 		 * will produce an error.  Also, if what we have is a domain over
 		 * array or enum, we have to relabel it to its base type.
@@ -1290,7 +1290,7 @@ coerce_to_common_type(ParseState *pstate, Node *node,
  *
  * Domains over arrays match ANYARRAY, and are immediately flattened to their
  * base type.  (Thus, for example, we will consider it a match if one ANYARRAY
- * argument is a domain over int4[] while another one is just int4[].)  Also
+ * argument is a domain over int4[] while another one is just int4[].)	Also
  * notice that such a domain does *not* match ANYNONARRAY.
  *
  * If we have UNKNOWN input (ie, an untyped literal) for any polymorphic
@@ -1444,7 +1444,7 @@ check_generic_type_consistency(Oid *actual_arg_types,
  *	  is an extra restriction if not.)
  *
  * Domains over arrays match ANYARRAY arguments, and are immediately flattened
- * to their base type.  (In particular, if the return type is also ANYARRAY,
+ * to their base type.	(In particular, if the return type is also ANYARRAY,
  * we'll set it to the base type not the domain type.)
  *
  * When allow_poly is false, we are not expecting any of the actual_arg_types
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 5588cfac0bd54a8a8d45ec49569060fa5570a359..efb8fddbc39b4095ae91d6e0e39769135845382b 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -157,7 +157,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
 	stmt = (CreateStmt *) copyObject(stmt);
 
 	/*
-	 * Look up the creation namespace.  This also checks permissions on the
+	 * Look up the creation namespace.	This also checks permissions on the
 	 * target namespace, so that we throw any permissions error as early as
 	 * possible.
 	 */
@@ -169,7 +169,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
 	 */
 	if (stmt->if_not_exists)
 	{
-		Oid		existing_relid;
+		Oid			existing_relid;
 
 		existing_relid = get_relname_relid(stmt->relation->relname,
 										   namespaceid);
@@ -178,7 +178,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
 			ereport(NOTICE,
 					(errcode(ERRCODE_DUPLICATE_TABLE),
 					 errmsg("relation \"%s\" already exists, skipping",
-						 stmt->relation->relname)));
+							stmt->relation->relname)));
 			return NIL;
 		}
 	}
@@ -2544,8 +2544,8 @@ transformColumnType(CreateStmtContext *cxt, ColumnDef *column)
 		Form_pg_type typtup = (Form_pg_type) GETSTRUCT(ctype);
 
 		LookupCollation(cxt->pstate,
-								  column->collClause->collname,
-								  column->collClause->location);
+						column->collClause->collname,
+						column->collClause->location);
 		/* Complain if COLLATE is applied to an uncollatable type */
 		if (!OidIsValid(typtup->typcollation))
 			ereport(ERROR,
diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c
index c64f5114c27336cf5ef7d8ecbeed434dc92a9e6e..2bc4b496116c3ac8e3874e3aa2bcfd28244a2163 100644
--- a/src/backend/port/sysv_sema.c
+++ b/src/backend/port/sysv_sema.c
@@ -121,7 +121,7 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems)
 			 "semaphore sets (SEMMNI), or the system wide maximum number of "
 			"semaphores (SEMMNS), would be exceeded.  You need to raise the "
 		  "respective kernel parameter.  Alternatively, reduce PostgreSQL's "
-		"consumption of semaphores by reducing its max_connections parameter.\n"
+						 "consumption of semaphores by reducing its max_connections parameter.\n"
 			  "The PostgreSQL documentation contains more information about "
 						 "configuring your system for PostgreSQL.") : 0));
 	}
diff --git a/src/backend/port/win32/crashdump.c b/src/backend/port/win32/crashdump.c
index 89509990c523b7ce968f531ea112497f680deb4d..119dd076cae7af5870b88a89f674177d583eca0d 100644
--- a/src/backend/port/win32/crashdump.c
+++ b/src/backend/port/win32/crashdump.c
@@ -135,7 +135,7 @@ crashDumpHandler(struct _EXCEPTION_POINTERS * pExceptionInfo)
 
 		systemTicks = GetTickCount();
 		snprintf(dumpPath, _MAX_PATH,
-				 "crashdumps\\postgres-pid%0i-%0i.mdmp", 
+				 "crashdumps\\postgres-pid%0i-%0i.mdmp",
 				 (int) selfPid, (int) systemTicks);
 		dumpPath[_MAX_PATH - 1] = '\0';
 
diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c
index e2ae0f8e4f55d190ab75522e4731b26818db55b3..c918da981b283a6ee0b4d14b6a0c457ae78ea94e 100644
--- a/src/backend/port/win32/socket.c
+++ b/src/backend/port/win32/socket.c
@@ -373,7 +373,7 @@ pgwin32_recv(SOCKET s, char *buf, int len, int f)
  * The second argument to send() is defined by SUS to be a "const void *"
  * and so we use the same signature here to keep compilers happy when
  * handling callers.
- * 
+ *
  * But the buf member of a WSABUF struct is defined as "char *", so we cast
  * the second argument to that here when assigning it, also to keep compilers
  * happy.
diff --git a/src/backend/port/win32_latch.c b/src/backend/port/win32_latch.c
index 3509302aaae7c8d21ab6037d756e4267c30c9ea0..4bcf7b7a8f34b380e22793750deb8ad44762b0c5 100644
--- a/src/backend/port/win32_latch.c
+++ b/src/backend/port/win32_latch.c
@@ -94,7 +94,7 @@ WaitLatchOrSocket(volatile Latch *latch, SOCKET sock, bool forRead,
 	DWORD		rc;
 	HANDLE		events[3];
 	HANDLE		latchevent;
-	HANDLE		sockevent = WSA_INVALID_EVENT; /* silence compiler */
+	HANDLE		sockevent = WSA_INVALID_EVENT;	/* silence compiler */
 	int			numevents;
 	int			result = 0;
 
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 1e2aa9f0925da6343b8f34346f1e627131dfb1c3..1f0d4e63fb0ce0fdbb3ba0797aff65e28a420c4c 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -1486,10 +1486,10 @@ ServerLoop(void)
 			WalWriterPID = StartWalWriter();
 
 		/*
-		 *	If we have lost the autovacuum launcher, try to start a new one.
-		 *	We don't want autovacuum to run in binary upgrade mode because
-		 *	autovacuum might update relfrozenxid for empty tables before
-		 *	the physical files are put in place.
+		 * If we have lost the autovacuum launcher, try to start a new one. We
+		 * don't want autovacuum to run in binary upgrade mode because
+		 * autovacuum might update relfrozenxid for empty tables before the
+		 * physical files are put in place.
 		 */
 		if (!IsBinaryUpgrade && AutoVacPID == 0 &&
 			(AutoVacuumingActive() || start_autovac_launcher) &&
diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c
index 27d1193d706e4c66c513e420873f26912219c469..bc5ad3157a9d8083961e190154eb059340cfa1ee 100644
--- a/src/backend/regex/regc_pg_locale.c
+++ b/src/backend/regex/regc_pg_locale.c
@@ -23,7 +23,7 @@
  * several implementation strategies depending on the situation:
  *
  * 1. In C/POSIX collations, we use hard-wired code.  We can't depend on
- * the <ctype.h> functions since those will obey LC_CTYPE.  Note that these
+ * the <ctype.h> functions since those will obey LC_CTYPE.	Note that these
  * collations don't give a fig about multibyte characters.
  *
  * 2. In the "default" collation (which is supposed to obey LC_CTYPE):
@@ -35,10 +35,10 @@
  *
  * 2b. In all other encodings, or on machines that lack <wctype.h>, we use
  * the <ctype.h> functions for pg_wchar values up to 255, and punt for values
- * above that.  This is only 100% correct in single-byte encodings such as
- * LATINn.  However, non-Unicode multibyte encodings are mostly Far Eastern
+ * above that.	This is only 100% correct in single-byte encodings such as
+ * LATINn.	However, non-Unicode multibyte encodings are mostly Far Eastern
  * character sets for which the properties being tested here aren't very
- * relevant for higher code values anyway.  The difficulty with using the
+ * relevant for higher code values anyway.	The difficulty with using the
  * <wctype.h> functions with non-Unicode multibyte encodings is that we can
  * have no certainty that the platform's wchar_t representation matches
  * what we do in pg_wchar conversions.
@@ -87,134 +87,134 @@ static pg_locale_t pg_regex_locale;
 #define PG_ISSPACE	0x80
 
 static const unsigned char pg_char_properties[128] = {
-	/* NUL */	0,
-	/* ^A */	0,
-	/* ^B */	0,
-	/* ^C */	0,
-	/* ^D */	0,
-	/* ^E */	0,
-	/* ^F */	0,
-	/* ^G */	0,
-	/* ^H */	0,
-	/* ^I */	PG_ISSPACE,
-	/* ^J */	PG_ISSPACE,
-	/* ^K */	PG_ISSPACE,
-	/* ^L */	PG_ISSPACE,
-	/* ^M */	PG_ISSPACE,
-	/* ^N */	0,
-	/* ^O */	0,
-	/* ^P */	0,
-	/* ^Q */	0,
-	/* ^R */	0,
-	/* ^S */	0,
-	/* ^T */	0,
-	/* ^U */	0,
-	/* ^V */	0,
-	/* ^W */	0,
-	/* ^X */	0,
-	/* ^Y */	0,
-	/* ^Z */	0,
-	/* ^[ */	0,
-	/* ^\ */	0,
-	/* ^] */	0,
-	/* ^^ */	0,
-	/* ^_ */	0,
-	/*    */	PG_ISPRINT | PG_ISSPACE,
-	/* !  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* "  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* #  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* $  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* %  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* &  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* '  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* (  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* )  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* *  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* +  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* ,  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* -  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* .  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* /  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* 0  */	PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
-	/* 1  */	PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
-	/* 2  */	PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
-	/* 3  */	PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
-	/* 4  */	PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
-	/* 5  */	PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
-	/* 6  */	PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
-	/* 7  */	PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
-	/* 8  */	PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
-	/* 9  */	PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
-	/* :  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* ;  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* <  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* =  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* >  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* ?  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* @  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* A  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* B  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* C  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* D  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* E  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* F  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* G  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* H  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* I  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* J  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* K  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* L  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* M  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* N  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* O  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* P  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* Q  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* R  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* S  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* T  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* U  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* V  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* W  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* X  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* Y  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* Z  */	PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
-	/* [  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* \  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* ]  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* ^  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* _  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* `  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* a  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* b  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* c  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* d  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* e  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* f  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* g  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* h  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* i  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* j  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* k  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* l  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* m  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* n  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* o  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* p  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* q  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* r  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* s  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* t  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* u  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* v  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* w  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* x  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* y  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* z  */	PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
-	/* {  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* |  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* }  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* ~  */	PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
-	/* DEL */	0
+	 /* NUL */ 0,
+	 /* ^A */ 0,
+	 /* ^B */ 0,
+	 /* ^C */ 0,
+	 /* ^D */ 0,
+	 /* ^E */ 0,
+	 /* ^F */ 0,
+	 /* ^G */ 0,
+	 /* ^H */ 0,
+	 /* ^I */ PG_ISSPACE,
+	 /* ^J */ PG_ISSPACE,
+	 /* ^K */ PG_ISSPACE,
+	 /* ^L */ PG_ISSPACE,
+	 /* ^M */ PG_ISSPACE,
+	 /* ^N */ 0,
+	 /* ^O */ 0,
+	 /* ^P */ 0,
+	 /* ^Q */ 0,
+	 /* ^R */ 0,
+	 /* ^S */ 0,
+	 /* ^T */ 0,
+	 /* ^U */ 0,
+	 /* ^V */ 0,
+	 /* ^W */ 0,
+	 /* ^X */ 0,
+	 /* ^Y */ 0,
+	 /* ^Z */ 0,
+	 /* ^[ */ 0,
+	 /* ^\ */ 0,
+	 /* ^] */ 0,
+	 /* ^^ */ 0,
+	 /* ^_ */ 0,
+	 /* */ PG_ISPRINT | PG_ISSPACE,
+	 /* !  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* "  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* #  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* $  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* %  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* &  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* '  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* (  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* )  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* *  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* +  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* ,  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* -  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* .  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* /  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* 0  */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+	 /* 1  */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+	 /* 2  */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+	 /* 3  */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+	 /* 4  */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+	 /* 5  */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+	 /* 6  */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+	 /* 7  */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+	 /* 8  */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+	 /* 9  */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
+	 /* :  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* ;  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* <  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* =  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* >  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* ?  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* @  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* A  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* B  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* C  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* D  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* E  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* F  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* G  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* H  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* I  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* J  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* K  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* L  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* M  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* N  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* O  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* P  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* Q  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* R  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* S  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* T  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* U  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* V  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* W  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* X  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* Y  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* Z  */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
+	 /* [  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* \  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* ]  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* ^  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* _  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* `  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* a  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* b  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* c  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* d  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* e  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* f  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* g  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* h  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* i  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* j  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* k  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* l  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* m  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* n  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* o  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* p  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* q  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* r  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* s  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* t  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* u  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* v  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* w  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* x  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* y  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* z  */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
+	 /* {  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* |  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* }  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* ~  */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
+	 /* DEL */ 0
 };
 
 
@@ -242,8 +242,8 @@ pg_set_regex_collation(Oid collation)
 		{
 			/*
 			 * NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T;
-			 * the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does
-			 * not have to be considered below.
+			 * the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not
+			 * have to be considered below.
 			 */
 			pg_regex_locale = pg_newlocale_from_collation(collation);
 		}
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 1d4df8a4488c8900e82ce5f586df174f1940e62a..08a40866f074dd3dd9a388f7fbffad9c369f97ce 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -236,8 +236,8 @@ SyncRepWaitForLSN(XLogRecPtr XactCommitLSN)
 
 		/*
 		 * If the postmaster dies, we'll probably never get an
-		 * acknowledgement, because all the wal sender processes will exit.
-		 * So just bail out.
+		 * acknowledgement, because all the wal sender processes will exit. So
+		 * just bail out.
 		 */
 		if (!PostmasterIsAlive(true))
 		{
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 7b303d9355e019cd2c5d9ea811d9e113c7146a9b..52c55def155e568bf2a42f65c782f2b83d3fee1b 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -488,13 +488,13 @@ DefineQueryRewrite(char *rulename,
 	if (action != NIL || is_instead)
 	{
 		InsertRule(rulename,
-							event_type,
-							event_relid,
-							event_attno,
-							is_instead,
-							event_qual,
-							action,
-							replace);
+				   event_type,
+				   event_relid,
+				   event_attno,
+				   is_instead,
+				   event_qual,
+				   action,
+				   replace);
 
 		/*
 		 * Set pg_class 'relhasrules' field TRUE for event relation. If
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index be9e7a4598954d835773f7d1aebe5ddd73f213c2..6ef20a5bb537c716602eb0cd03bf5cc187b5027a 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -455,17 +455,17 @@ rewriteRuleAction(Query *parsetree,
 	}
 
 	/*
-	 * If the original query has any CTEs, copy them into the rule action.
-	 * But we don't need them for a utility action.
+	 * If the original query has any CTEs, copy them into the rule action. But
+	 * we don't need them for a utility action.
 	 */
 	if (parsetree->cteList != NIL && sub_action->commandType != CMD_UTILITY)
 	{
 		ListCell   *lc;
 
 		/*
-		 * Annoying implementation restriction: because CTEs are identified
-		 * by name within a cteList, we can't merge a CTE from the original
-		 * query if it has the same name as any CTE in the rule action.
+		 * Annoying implementation restriction: because CTEs are identified by
+		 * name within a cteList, we can't merge a CTE from the original query
+		 * if it has the same name as any CTE in the rule action.
 		 *
 		 * This could possibly be fixed by using some sort of internally
 		 * generated ID, instead of names, to link CTE RTEs to their CTEs.
@@ -2116,15 +2116,15 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
 
 	/*
 	 * If the original query has a CTE list, and we generated more than one
-	 * non-utility result query, we have to fail because we'll have copied
-	 * the CTE list into each result query.  That would break the expectation
-	 * of single evaluation of CTEs.  This could possibly be fixed by
+	 * non-utility result query, we have to fail because we'll have copied the
+	 * CTE list into each result query.  That would break the expectation of
+	 * single evaluation of CTEs.  This could possibly be fixed by
 	 * restructuring so that a CTE list can be shared across multiple Query
 	 * and PlannableStatement nodes.
 	 */
 	if (parsetree->cteList != NIL)
 	{
-		int		qcount = 0;
+		int			qcount = 0;
 
 		foreach(lc1, rewritten)
 		{
diff --git a/src/backend/tsearch/ts_locale.c b/src/backend/tsearch/ts_locale.c
index dcaf18b00c2b2e564b0454bdd16cf7cb23a8f198..93b550a1a50112721989e1b535e74f6a58b4de94 100644
--- a/src/backend/tsearch/ts_locale.c
+++ b/src/backend/tsearch/ts_locale.c
@@ -29,7 +29,7 @@ t_isdigit(const char *ptr)
 	int			clen = pg_mblen(ptr);
 	wchar_t		character[2];
 	Oid			collation = DEFAULT_COLLATION_OID;		/* TODO */
-	pg_locale_t	mylocale = 0;	/* TODO */
+	pg_locale_t mylocale = 0;	/* TODO */
 
 	if (clen == 1 || lc_ctype_is_c(collation))
 		return isdigit(TOUCHAR(ptr));
@@ -45,7 +45,7 @@ t_isspace(const char *ptr)
 	int			clen = pg_mblen(ptr);
 	wchar_t		character[2];
 	Oid			collation = DEFAULT_COLLATION_OID;		/* TODO */
-	pg_locale_t	mylocale = 0;	/* TODO */
+	pg_locale_t mylocale = 0;	/* TODO */
 
 	if (clen == 1 || lc_ctype_is_c(collation))
 		return isspace(TOUCHAR(ptr));
@@ -61,7 +61,7 @@ t_isalpha(const char *ptr)
 	int			clen = pg_mblen(ptr);
 	wchar_t		character[2];
 	Oid			collation = DEFAULT_COLLATION_OID;		/* TODO */
-	pg_locale_t	mylocale = 0;	/* TODO */
+	pg_locale_t mylocale = 0;	/* TODO */
 
 	if (clen == 1 || lc_ctype_is_c(collation))
 		return isalpha(TOUCHAR(ptr));
@@ -77,7 +77,7 @@ t_isprint(const char *ptr)
 	int			clen = pg_mblen(ptr);
 	wchar_t		character[2];
 	Oid			collation = DEFAULT_COLLATION_OID;		/* TODO */
-	pg_locale_t	mylocale = 0;	/* TODO */
+	pg_locale_t mylocale = 0;	/* TODO */
 
 	if (clen == 1 || lc_ctype_is_c(collation))
 		return isprint(TOUCHAR(ptr));
@@ -250,7 +250,7 @@ lowerstr_with_len(const char *str, int len)
 
 #ifdef USE_WIDE_UPPER_LOWER
 	Oid			collation = DEFAULT_COLLATION_OID;		/* TODO */
-	pg_locale_t	mylocale = 0;	/* TODO */
+	pg_locale_t mylocale = 0;	/* TODO */
 #endif
 
 	if (len == 0)
diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c
index 3176ddc696b6454567f3107bde65d8e613196fc2..7ba33145889c078f68e66165657be944ded26b09 100644
--- a/src/backend/tsearch/wparser_def.c
+++ b/src/backend/tsearch/wparser_def.c
@@ -300,7 +300,7 @@ TParserInit(char *str, int len)
 	if (prs->charmaxlen > 1)
 	{
 		Oid			collation = DEFAULT_COLLATION_OID;	/* TODO */
-		pg_locale_t	mylocale = 0;	/* TODO */
+		pg_locale_t mylocale = 0;		/* TODO */
 
 		prs->usewide = true;
 		if (lc_ctype_is_c(collation))
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 0289dbdf02fcb2e71024d8dbe5d6caf5a3901f4f..3d320ccdd58ee3784a5c2eef59163136ea0c8c5a 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -4049,10 +4049,11 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
 			/* Compatible with postgresql < 8.4 when DateStyle = 'iso' */
 		case INTSTYLE_POSTGRES:
 			cp = AddPostgresIntPart(cp, year, "year", &is_zero, &is_before);
+
 			/*
-			 *	Ideally we should spell out "month" like we do for "year"
-			 *	and "day".  However, for backward compatibility, we can't
-			 *	easily fix this.  bjm 2011-05-24
+			 * Ideally we should spell out "month" like we do for "year" and
+			 * "day".  However, for backward compatibility, we can't easily
+			 * fix this.  bjm 2011-05-24
 			 */
 			cp = AddPostgresIntPart(cp, mon, "mon", &is_zero, &is_before);
 			cp = AddPostgresIntPart(cp, mday, "day", &is_zero, &is_before);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 7c941dd991f9b1b877bbbcc9ceb46110f0cd3d51..cf663466c354bcbbcf8c066bbbc1ffae23cbf86d 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -564,9 +564,9 @@ strftime_win32(char *dst, size_t dstlen, const wchar_t *format, const struct tm
 	dst[len] = '\0';
 	if (encoding != PG_UTF8)
 	{
-		char	   *convstr = 
-			(char *) pg_do_encoding_conversion((unsigned char *) dst, 
-											   len, PG_UTF8, encoding);
+		char	   *convstr =
+		(char *) pg_do_encoding_conversion((unsigned char *) dst,
+										   len, PG_UTF8, encoding);
 
 		if (dst != convstr)
 		{
@@ -1099,19 +1099,19 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
 #ifdef HAVE_WCSTOMBS_L
 		/* Use wcstombs_l for nondefault locales */
 		result = wcstombs_l(to, from, tolen, locale);
-#else /* !HAVE_WCSTOMBS_L */
+#else							/* !HAVE_WCSTOMBS_L */
 		/* We have to temporarily set the locale as current ... ugh */
 		locale_t	save_locale = uselocale(locale);
 
 		result = wcstombs(to, from, tolen);
 
 		uselocale(save_locale);
-#endif /* HAVE_WCSTOMBS_L */
-#else /* !HAVE_LOCALE_T */
+#endif   /* HAVE_WCSTOMBS_L */
+#else							/* !HAVE_LOCALE_T */
 		/* Can't have locale != 0 without HAVE_LOCALE_T */
 		elog(ERROR, "wcstombs_l is not available");
 		result = 0;				/* keep compiler quiet */
-#endif /* HAVE_LOCALE_T */
+#endif   /* HAVE_LOCALE_T */
 	}
 
 	return result;
@@ -1174,19 +1174,19 @@ char2wchar(wchar_t *to, size_t tolen, const char *from, size_t fromlen,
 #ifdef HAVE_WCSTOMBS_L
 			/* Use mbstowcs_l for nondefault locales */
 			result = mbstowcs_l(to, str, tolen, locale);
-#else /* !HAVE_WCSTOMBS_L */
+#else							/* !HAVE_WCSTOMBS_L */
 			/* We have to temporarily set the locale as current ... ugh */
 			locale_t	save_locale = uselocale(locale);
 
 			result = mbstowcs(to, str, tolen);
 
 			uselocale(save_locale);
-#endif /* HAVE_WCSTOMBS_L */
-#else /* !HAVE_LOCALE_T */
+#endif   /* HAVE_WCSTOMBS_L */
+#else							/* !HAVE_LOCALE_T */
 			/* Can't have locale != 0 without HAVE_LOCALE_T */
 			elog(ERROR, "mbstowcs_l is not available");
-			result = 0;				/* keep compiler quiet */
-#endif /* HAVE_LOCALE_T */
+			result = 0;			/* keep compiler quiet */
+#endif   /* HAVE_LOCALE_T */
 		}
 
 		pfree(str);
@@ -1213,4 +1213,4 @@ char2wchar(wchar_t *to, size_t tolen, const char *from, size_t fromlen,
 	return result;
 }
 
-#endif /* USE_WIDE_UPPER_LOWER */
+#endif   /* USE_WIDE_UPPER_LOWER */
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index fde01a0f5719aa8e3fbd90997a2e803523d80591..fa7b8e6e81a6dea5e580ac503e13c88f0f6c78bd 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -83,7 +83,7 @@
 
 #define RIAttName(rel, attnum)	NameStr(*attnumAttName(rel, attnum))
 #define RIAttType(rel, attnum)	attnumTypeId(rel, attnum)
-#define RIAttCollation(rel, attnum)	attnumCollationId(rel, attnum)
+#define RIAttCollation(rel, attnum) attnumCollationId(rel, attnum)
 
 #define RI_TRIGTYPE_INSERT 1
 #define RI_TRIGTYPE_UPDATE 2
@@ -3024,8 +3024,8 @@ ri_GenerateQualCollation(StringInfo buf, Oid collation)
 	collname = NameStr(colltup->collname);
 
 	/*
-	 * We qualify the name always, for simplicity and to ensure the query
-	 * is not search-path-dependent.
+	 * We qualify the name always, for simplicity and to ensure the query is
+	 * not search-path-dependent.
 	 */
 	quoteOneName(onename, get_namespace_name(colltup->collnamespace));
 	appendStringInfo(buf, " COLLATE %s", onename);
@@ -3964,8 +3964,8 @@ ri_AttributesEqual(Oid eq_opr, Oid typeid,
 	}
 
 	/*
-	 * Apply the comparison operator.  We assume it doesn't
-	 * care about collations.
+	 * Apply the comparison operator.  We assume it doesn't care about
+	 * collations.
 	 */
 	return DatumGetBool(FunctionCall2(&entry->eq_opr_finfo,
 									  oldvalue, newvalue));
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index a67f7986a14f82ff1e923c6e111411c618e4c172..06cf6fa4f7992507fd5b636d5496f74623aa7f68 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -5193,8 +5193,8 @@ get_rule_expr(Node *node, deparse_context *context,
 					if (caseexpr->arg)
 					{
 						/*
-						 * The parser should have produced WHEN clauses of
-						 * the form "CaseTestExpr = RHS", possibly with an
+						 * The parser should have produced WHEN clauses of the
+						 * form "CaseTestExpr = RHS", possibly with an
 						 * implicit coercion inserted above the CaseTestExpr.
 						 * For accurate decompilation of rules it's essential
 						 * that we show just the RHS.  However in an
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index fa8cecafcbe5b4bef44d12549a35f7e27c838c3c..00ba19ec6c9d88e6d65bebb8941154c6394224d5 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -291,12 +291,12 @@ var_eq_const(VariableStatData *vardata, Oid operator,
 				/* be careful to apply operator right way 'round */
 				if (varonleft)
 					match = DatumGetBool(FunctionCall2Coll(&eqproc,
-														   DEFAULT_COLLATION_OID,
+													   DEFAULT_COLLATION_OID,
 														   values[i],
 														   constval));
 				else
 					match = DatumGetBool(FunctionCall2Coll(&eqproc,
-														   DEFAULT_COLLATION_OID,
+													   DEFAULT_COLLATION_OID,
 														   constval,
 														   values[i]));
 				if (match)
@@ -1185,7 +1185,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
 	}
 
 	/*
-	 * Divide pattern into fixed prefix and remainder.  XXX we have to assume
+	 * Divide pattern into fixed prefix and remainder.	XXX we have to assume
 	 * default collation here, because we don't have access to the actual
 	 * input collation for the operator.  FIXME ...
 	 */
@@ -2403,9 +2403,9 @@ eqjoinsel_semi(Oid operator,
 		 * before doing the division.
 		 *
 		 * Crude as the above is, it's completely useless if we don't have
-		 * reliable ndistinct values for both sides.  Hence, if either nd1
-		 * or nd2 is default, punt and assume half of the uncertain rows
-		 * have join partners.
+		 * reliable ndistinct values for both sides.  Hence, if either nd1 or
+		 * nd2 is default, punt and assume half of the uncertain rows have
+		 * join partners.
 		 */
 		if (nd1 != DEFAULT_NUM_DISTINCT && nd2 != DEFAULT_NUM_DISTINCT)
 		{
@@ -4779,7 +4779,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
  * Check whether char is a letter (and, hence, subject to case-folding)
  *
  * In multibyte character sets, we can't use isalpha, and it does not seem
- * worth trying to convert to wchar_t to use iswalpha.  Instead, just assume
+ * worth trying to convert to wchar_t to use iswalpha.	Instead, just assume
  * any multibyte char is potentially case-varying.
  */
 static int
@@ -4823,7 +4823,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
 	int			pos,
 				match_pos;
 	bool		is_multibyte = (pg_database_encoding_max_length() > 1);
-	pg_locale_t	locale = 0;
+	pg_locale_t locale = 0;
 	bool		locale_is_c = false;
 
 	/* the right-hand const is type text or bytea */
@@ -4834,7 +4834,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
 		if (typeid == BYTEAOID)
 			ereport(ERROR,
 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-		   errmsg("case insensitive matching not supported on type bytea")));
+			errmsg("case insensitive matching not supported on type bytea")));
 
 		/* If case-insensitive, we need locale info */
 		if (lc_ctype_is_c(collation))
@@ -4891,7 +4891,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
 
 		/* Stop if case-varying character (it's sort of a wildcard) */
 		if (case_insensitive &&
-			pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
+		  pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
 			break;
 
 		match[match_pos++] = patt[pos];
@@ -4938,7 +4938,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
 	char	   *rest;
 	Oid			typeid = patt_const->consttype;
 	bool		is_multibyte = (pg_database_encoding_max_length() > 1);
-	pg_locale_t	locale = 0;
+	pg_locale_t locale = 0;
 	bool		locale_is_c = false;
 
 	/*
@@ -5050,7 +5050,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
 
 		/* Stop if case-varying character (it's sort of a wildcard) */
 		if (case_insensitive &&
-			pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
+		  pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
 			break;
 
 		/*
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 6005d685764720abf0c8c89288eac9bee3e54474..9acbc2d4e51485004f0a01574bac588edc44cf4b 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -3829,7 +3829,7 @@ text_format(PG_FUNCTION_ARGS)
 		if (*cp < '0' || *cp > '9')
 		{
 			++arg;
-			if (arg <= 0)						/* overflow? */
+			if (arg <= 0)		/* overflow? */
 			{
 				/*
 				 * Should not happen, as you can't pass billions of arguments
@@ -3848,9 +3848,9 @@ text_format(PG_FUNCTION_ARGS)
 			arg = 0;
 			do
 			{
-				int		newarg = arg * 10 + (*cp - '0');
+				int			newarg = arg * 10 + (*cp - '0');
 
-				if (newarg / 10 != arg)			/* overflow? */
+				if (newarg / 10 != arg) /* overflow? */
 					ereport(ERROR,
 							(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
 							 errmsg("argument number is out of range")));
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index 14d9cf7a445d44a2dd3a939182f99ca7a2d26aaa..b16ed6a208965becd2c28c550fc02bb8236cffb1 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -11,7 +11,7 @@
  *
  * Several seemingly-odd choices have been made to support use of the type
  * cache by generic array and record handling routines, such as array_eq(),
- * record_cmp(), and hash_array().  Because those routines are used as index
+ * record_cmp(), and hash_array().	Because those routines are used as index
  * support operations, they cannot leak memory.  To allow them to execute
  * efficiently, all information that they would like to re-use across calls
  * is kept in the type cache.
@@ -276,7 +276,7 @@ lookup_type_cache(Oid type_id, int flags)
 	if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
 		typentry->eq_opr == InvalidOid)
 	{
-		Oid		eq_opr = InvalidOid;
+		Oid			eq_opr = InvalidOid;
 
 		if (typentry->btree_opf != InvalidOid)
 			eq_opr = get_opfamily_member(typentry->btree_opf,
@@ -291,10 +291,10 @@ lookup_type_cache(Oid type_id, int flags)
 										 HTEqualStrategyNumber);
 
 		/*
-		 * If the proposed equality operator is array_eq or record_eq,
-		 * check to see if the element type or column types support equality.
-		 * If not, array_eq or record_eq would fail at runtime, so we don't
-		 * want to report that the type has equality.
+		 * If the proposed equality operator is array_eq or record_eq, check
+		 * to see if the element type or column types support equality. If
+		 * not, array_eq or record_eq would fail at runtime, so we don't want
+		 * to report that the type has equality.
 		 */
 		if (eq_opr == ARRAY_EQ_OP &&
 			!array_element_has_equality(typentry))
@@ -315,7 +315,7 @@ lookup_type_cache(Oid type_id, int flags)
 	}
 	if ((flags & TYPECACHE_LT_OPR) && typentry->lt_opr == InvalidOid)
 	{
-		Oid		lt_opr = InvalidOid;
+		Oid			lt_opr = InvalidOid;
 
 		if (typentry->btree_opf != InvalidOid)
 			lt_opr = get_opfamily_member(typentry->btree_opf,
@@ -335,7 +335,7 @@ lookup_type_cache(Oid type_id, int flags)
 	}
 	if ((flags & TYPECACHE_GT_OPR) && typentry->gt_opr == InvalidOid)
 	{
-		Oid		gt_opr = InvalidOid;
+		Oid			gt_opr = InvalidOid;
 
 		if (typentry->btree_opf != InvalidOid)
 			gt_opr = get_opfamily_member(typentry->btree_opf,
@@ -356,7 +356,7 @@ lookup_type_cache(Oid type_id, int flags)
 	if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
 		typentry->cmp_proc == InvalidOid)
 	{
-		Oid		cmp_proc = InvalidOid;
+		Oid			cmp_proc = InvalidOid;
 
 		if (typentry->btree_opf != InvalidOid)
 			cmp_proc = get_opfamily_proc(typentry->btree_opf,
@@ -377,7 +377,7 @@ lookup_type_cache(Oid type_id, int flags)
 	if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
 		typentry->hash_proc == InvalidOid)
 	{
-		Oid		hash_proc = InvalidOid;
+		Oid			hash_proc = InvalidOid;
 
 		/*
 		 * We insist that the eq_opr, if one has been determined, match the
@@ -460,7 +460,7 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
 {
 	Relation	rel;
 
-	if (!OidIsValid(typentry->typrelid))	/* should not happen */
+	if (!OidIsValid(typentry->typrelid))		/* should not happen */
 		elog(ERROR, "invalid typrelid for composite type %u",
 			 typentry->type_id);
 	rel = relation_open(typentry->typrelid, AccessShareLock);
@@ -468,9 +468,9 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
 
 	/*
 	 * Link to the tupdesc and increment its refcount (we assert it's a
-	 * refcounted descriptor).	We don't use IncrTupleDescRefCount() for
-	 * this, because the reference mustn't be entered in the current
-	 * resource owner; it can outlive the current query.
+	 * refcounted descriptor).	We don't use IncrTupleDescRefCount() for this,
+	 * because the reference mustn't be entered in the current resource owner;
+	 * it can outlive the current query.
 	 */
 	typentry->tupDesc = RelationGetDescr(rel);
 
@@ -520,7 +520,7 @@ array_element_has_hashing(TypeCacheEntry *typentry)
 static void
 cache_array_element_properties(TypeCacheEntry *typentry)
 {
-	Oid		elem_type = get_base_element_type(typentry->type_id);
+	Oid			elem_type = get_base_element_type(typentry->type_id);
 
 	if (OidIsValid(elem_type))
 	{
@@ -571,7 +571,7 @@ cache_record_field_properties(TypeCacheEntry *typentry)
 	{
 		TupleDesc	tupdesc;
 		int			newflags;
-		int 		i;
+		int			i;
 
 		/* Fetch composite type's tupdesc if we don't have it already */
 		if (typentry->tupDesc == NULL)
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index ffa19d5a2d26efce4fd4b121a0cab536b48d3aae..21fb5ade7ac759d29c9793db78d903316195e3a1 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -1053,7 +1053,7 @@ DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2)
 
 Datum
 DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
-					Datum arg3)
+						Datum arg3)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1078,7 +1078,7 @@ DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4)
+						Datum arg3, Datum arg4)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1105,7 +1105,7 @@ DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 DirectFunctionCall5Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4, Datum arg5)
+						Datum arg3, Datum arg4, Datum arg5)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1134,8 +1134,8 @@ DirectFunctionCall5Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 DirectFunctionCall6Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4, Datum arg5,
-					Datum arg6)
+						Datum arg3, Datum arg4, Datum arg5,
+						Datum arg6)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1166,8 +1166,8 @@ DirectFunctionCall6Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 DirectFunctionCall7Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4, Datum arg5,
-					Datum arg6, Datum arg7)
+						Datum arg3, Datum arg4, Datum arg5,
+						Datum arg6, Datum arg7)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1200,8 +1200,8 @@ DirectFunctionCall7Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 DirectFunctionCall8Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4, Datum arg5,
-					Datum arg6, Datum arg7, Datum arg8)
+						Datum arg3, Datum arg4, Datum arg5,
+						Datum arg6, Datum arg7, Datum arg8)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1236,9 +1236,9 @@ DirectFunctionCall8Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 DirectFunctionCall9Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4, Datum arg5,
-					Datum arg6, Datum arg7, Datum arg8,
-					Datum arg9)
+						Datum arg3, Datum arg4, Datum arg5,
+						Datum arg6, Datum arg7, Datum arg8,
+						Datum arg9)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1327,7 +1327,7 @@ FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
 
 Datum
 FunctionCall3Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
-			  Datum arg3)
+				  Datum arg3)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1352,7 +1352,7 @@ FunctionCall3Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 FunctionCall4Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4)
+				  Datum arg3, Datum arg4)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1379,7 +1379,7 @@ FunctionCall4Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 FunctionCall5Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4, Datum arg5)
+				  Datum arg3, Datum arg4, Datum arg5)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1408,8 +1408,8 @@ FunctionCall5Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 FunctionCall6Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4, Datum arg5,
-			  Datum arg6)
+				  Datum arg3, Datum arg4, Datum arg5,
+				  Datum arg6)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1440,8 +1440,8 @@ FunctionCall6Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 FunctionCall7Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4, Datum arg5,
-			  Datum arg6, Datum arg7)
+				  Datum arg3, Datum arg4, Datum arg5,
+				  Datum arg6, Datum arg7)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1474,8 +1474,8 @@ FunctionCall7Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 FunctionCall8Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4, Datum arg5,
-			  Datum arg6, Datum arg7, Datum arg8)
+				  Datum arg3, Datum arg4, Datum arg5,
+				  Datum arg6, Datum arg7, Datum arg8)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1510,9 +1510,9 @@ FunctionCall8Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 FunctionCall9Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4, Datum arg5,
-			  Datum arg6, Datum arg7, Datum arg8,
-			  Datum arg9)
+				  Datum arg3, Datum arg4, Datum arg5,
+				  Datum arg6, Datum arg7, Datum arg8,
+				  Datum arg9)
 {
 	FunctionCallInfoData fcinfo;
 	Datum		result;
@@ -1625,7 +1625,7 @@ OidFunctionCall2Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2)
 
 Datum
 OidFunctionCall3Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
-				 Datum arg3)
+					 Datum arg3)
 {
 	FmgrInfo	flinfo;
 	FunctionCallInfoData fcinfo;
@@ -1653,7 +1653,7 @@ OidFunctionCall3Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 OidFunctionCall4Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4)
+					 Datum arg3, Datum arg4)
 {
 	FmgrInfo	flinfo;
 	FunctionCallInfoData fcinfo;
@@ -1683,7 +1683,7 @@ OidFunctionCall4Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 OidFunctionCall5Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4, Datum arg5)
+					 Datum arg3, Datum arg4, Datum arg5)
 {
 	FmgrInfo	flinfo;
 	FunctionCallInfoData fcinfo;
@@ -1715,8 +1715,8 @@ OidFunctionCall5Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 OidFunctionCall6Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4, Datum arg5,
-				 Datum arg6)
+					 Datum arg3, Datum arg4, Datum arg5,
+					 Datum arg6)
 {
 	FmgrInfo	flinfo;
 	FunctionCallInfoData fcinfo;
@@ -1750,8 +1750,8 @@ OidFunctionCall6Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 OidFunctionCall7Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4, Datum arg5,
-				 Datum arg6, Datum arg7)
+					 Datum arg3, Datum arg4, Datum arg5,
+					 Datum arg6, Datum arg7)
 {
 	FmgrInfo	flinfo;
 	FunctionCallInfoData fcinfo;
@@ -1787,8 +1787,8 @@ OidFunctionCall7Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 OidFunctionCall8Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4, Datum arg5,
-				 Datum arg6, Datum arg7, Datum arg8)
+					 Datum arg3, Datum arg4, Datum arg5,
+					 Datum arg6, Datum arg7, Datum arg8)
 {
 	FmgrInfo	flinfo;
 	FunctionCallInfoData fcinfo;
@@ -1826,9 +1826,9 @@ OidFunctionCall8Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
 
 Datum
 OidFunctionCall9Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4, Datum arg5,
-				 Datum arg6, Datum arg7, Datum arg8,
-				 Datum arg9)
+					 Datum arg3, Datum arg4, Datum arg5,
+					 Datum arg6, Datum arg7, Datum arg8,
+					 Datum arg9)
 {
 	FmgrInfo	flinfo;
 	FunctionCallInfoData fcinfo;
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 3ac3254afb4cb6d3ed35646579fb895ee7257941..8347f52ca8d5f1725416e5f5372ea2cdce217992 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -630,9 +630,9 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 	 */
 	if (IsBinaryUpgrade && !am_superuser)
 	{
-			ereport(FATAL,
-					(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-			errmsg("must be superuser to connect in binary upgrade mode")));
+		ereport(FATAL,
+				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+			 errmsg("must be superuser to connect in binary upgrade mode")));
 	}
 
 	/*
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index 4949af965727193ec77608461966dd08f305ad6d..399c734bc955e1f59bae8907944d95b3d60343d1 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -1086,7 +1086,7 @@ setup_config(void)
 							  "@authcomment@",
 					   strcmp(authmethod, "trust") ? "" : AUTHTRUST_WARNING);
 
-    /* Replace username for replication */
+	/* Replace username for replication */
 	conflines = replace_token(conflines,
 							  "@default_username@",
 							  username);
@@ -1663,7 +1663,7 @@ setup_collation(void)
 		 */
 		if (normalize_locale_name(alias, localebuf))
 			PG_CMD_PRINTF3("INSERT INTO tmp_pg_collation VALUES (E'%s', E'%s', %d);\n",
-						escape_quotes(alias), quoted_locale, enc);
+						   escape_quotes(alias), quoted_locale, enc);
 	}
 
 	/* Add an SQL-standard name */
@@ -1688,7 +1688,7 @@ setup_collation(void)
 				"   encoding, locale, locale "
 				"  FROM tmp_pg_collation"
 				"  WHERE NOT EXISTS (SELECT 1 FROM pg_collation WHERE collname = tmp_pg_collation.collname)"
-				"  ORDER BY collname, encoding, (collname = locale) DESC, locale;\n");
+	   "  ORDER BY collname, encoding, (collname = locale) DESC, locale;\n");
 
 	pclose(locale_a_handle);
 	PG_CMD_CLOSE;
@@ -1702,7 +1702,7 @@ setup_collation(void)
 #else							/* not HAVE_LOCALE_T && not WIN32 */
 	printf(_("not supported on this platform\n"));
 	fflush(stdout);
-#endif   /* not HAVE_LOCALE_T  && not WIN32*/
+#endif   /* not HAVE_LOCALE_T  && not WIN32 */
 }
 
 /*
@@ -2272,20 +2272,19 @@ check_locale_encoding(const char *locale, int user_enc)
 static void
 strreplace(char *str, char *needle, char *replacement)
 {
-	char *s;
+	char	   *s;
 
 	s = strstr(str, needle);
 	if (s != NULL)
 	{
-		int replacementlen = strlen(replacement);
-		char *rest = s + strlen(needle);
+		int			replacementlen = strlen(replacement);
+		char	   *rest = s + strlen(needle);
 
 		memcpy(s, replacement, replacementlen);
 		memmove(s + replacementlen, rest, strlen(rest) + 1);
 	}
 }
-
-#endif /* WIN32 */
+#endif   /* WIN32 */
 
 /*
  * Windows has a problem with locale names that have a dot in the country
@@ -2306,6 +2305,7 @@ localemap(char *locale)
 	locale = xstrdup(locale);
 
 #ifdef WIN32
+
 	/*
 	 * Map the full country name to an abbreviation that setlocale() accepts.
 	 *
@@ -2321,14 +2321,14 @@ localemap(char *locale)
 
 	/*
 	 * The ISO-3166 country code for Macau S.A.R. is MAC, but Windows doesn't
-	 * seem to recognize that. And Macau isn't listed in the table of
-	 * accepted abbreviations linked above.
+	 * seem to recognize that. And Macau isn't listed in the table of accepted
+	 * abbreviations linked above.
 	 *
-	 * Fortunately, "ZHM" seems to be accepted as an alias for
-	 * "Chinese (Traditional)_Macau S.A.R..950", so we use that. Note that
-	 * it's unlike HKG and ARE, ZHM is an alias for the whole locale name,
-	 * not just the country part. I'm not sure where that "ZHM" comes from,
-	 * must be some legacy naming scheme. But hey, it works.
+	 * Fortunately, "ZHM" seems to be accepted as an alias for "Chinese
+	 * (Traditional)_Macau S.A.R..950", so we use that. Note that it's unlike
+	 * HKG and ARE, ZHM is an alias for the whole locale name, not just the
+	 * country part. I'm not sure where that "ZHM" comes from, must be some
+	 * legacy naming scheme. But hey, it works.
 	 *
 	 * Some versions of Windows spell it "Macau", others "Macao".
 	 */
@@ -2336,7 +2336,7 @@ localemap(char *locale)
 	strreplace(locale, "Chinese_Macau S.A.R..950", "ZHM");
 	strreplace(locale, "Chinese (Traditional)_Macao S.A.R..950", "ZHM");
 	strreplace(locale, "Chinese_Macao S.A.R..950", "ZHM");
-#endif /* WIN32 */
+#endif   /* WIN32 */
 
 	return locale;
 }
@@ -3000,13 +3000,13 @@ main(int argc, char *argv[])
 		else if (!pg_valid_server_encoding_id(ctype_enc))
 		{
 			/*
-			 * We recognized it, but it's not a legal server encoding.
-			 * On Windows, UTF-8 works with any locale, so we can fall back
-			 * to UTF-8.
+			 * We recognized it, but it's not a legal server encoding. On
+			 * Windows, UTF-8 works with any locale, so we can fall back to
+			 * UTF-8.
 			 */
 #ifdef WIN32
 			printf(_("Encoding %s implied by locale is not allowed as a server-side encoding.\n"
-					 "The default database encoding will be set to %s instead.\n"),
+			   "The default database encoding will be set to %s instead.\n"),
 				   pg_encoding_to_char(ctype_enc),
 				   pg_encoding_to_char(PG_UTF8));
 			ctype_enc = PG_UTF8;
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index 17cff8dd5b92896af37ed130aa0e9bd14d3fa1e6..9bf1fcdc4b5f63b7c2f0178c65a4e05296d898e4 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -130,7 +130,7 @@ usage(void)
 	printf(_("  -Z, --compress=0-9       compress tar output with given compression level\n"));
 	printf(_("\nGeneral options:\n"));
 	printf(_("  -c, --checkpoint=fast|spread\n"
-			 "                           set fast or spread checkpointing\n"));
+		   "                           set fast or spread checkpointing\n"));
 	printf(_("  -l, --label=LABEL        set backup label\n"));
 	printf(_("  -P, --progress           show progress information\n"));
 	printf(_("  -v, --verbose            output verbose messages\n"));
@@ -1006,7 +1006,7 @@ main(int argc, char **argv)
 #ifdef HAVE_LIBZ
 				compresslevel = Z_DEFAULT_COMPRESSION;
 #else
-				compresslevel = 1; /* will be rejected below */
+				compresslevel = 1;		/* will be rejected below */
 #endif
 				break;
 			case 'Z':
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index 8172d076cfbed57bee1ee879a5cac8c775c2d899..a3933abaa696ae89ce7d781117e989df355373d7 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -370,9 +370,9 @@ start_postmaster(void)
 	 * Since there might be quotes to handle here, it is easier simply to pass
 	 * everything to a shell to process them.
 	 *
-	 * XXX it would be better to fork and exec so that we would know the
-	 * child postmaster's PID directly; then test_postmaster_connection could
-	 * use the PID without having to rely on reading it back from the pidfile.
+	 * XXX it would be better to fork and exec so that we would know the child
+	 * postmaster's PID directly; then test_postmaster_connection could use
+	 * the PID without having to rely on reading it back from the pidfile.
 	 */
 	if (log_file != NULL)
 		snprintf(cmd, MAXPGPATH, SYSTEMQUOTE "\"%s\" %s%s < \"%s\" >> \"%s\" 2>&1 &" SYSTEMQUOTE,
@@ -479,7 +479,7 @@ test_postmaster_connection(bool do_checkpoint)
 					time_t		pmstart;
 
 					/*
-					 * Make sanity checks.  If it's for a standalone backend
+					 * Make sanity checks.	If it's for a standalone backend
 					 * (negative PID), or the recorded start time is before
 					 * pg_ctl started, then either we are looking at the wrong
 					 * data directory, or this is a pre-existing pidfile that
@@ -492,8 +492,8 @@ test_postmaster_connection(bool do_checkpoint)
 					if (pmpid <= 0 || pmstart < start_time - 2)
 					{
 						/*
-						 * Set flag to report stale pidfile if it doesn't
-						 * get overwritten before we give up waiting.
+						 * Set flag to report stale pidfile if it doesn't get
+						 * overwritten before we give up waiting.
 						 */
 						found_stale_pidfile = true;
 					}
@@ -552,7 +552,7 @@ test_postmaster_connection(bool do_checkpoint)
 						 * timeout first.
 						 */
 						snprintf(connstr, sizeof(connstr),
-								 "dbname=postgres port=%d host='%s' connect_timeout=5",
+						"dbname=postgres port=%d host='%s' connect_timeout=5",
 								 portnum, host_str);
 					}
 				}
@@ -570,11 +570,11 @@ test_postmaster_connection(bool do_checkpoint)
 		/*
 		 * The postmaster should create postmaster.pid very soon after being
 		 * started.  If it's not there after we've waited 5 or more seconds,
-		 * assume startup failed and give up waiting.  (Note this covers
-		 * both cases where the pidfile was never created, and where it was
-		 * created and then removed during postmaster exit.)  Also, if there
-		 * *is* a file there but it appears stale, issue a suitable warning
-		 * and give up waiting.
+		 * assume startup failed and give up waiting.  (Note this covers both
+		 * cases where the pidfile was never created, and where it was created
+		 * and then removed during postmaster exit.)  Also, if there *is* a
+		 * file there but it appears stale, issue a suitable warning and give
+		 * up waiting.
 		 */
 		if (i >= 5)
 		{
@@ -593,7 +593,7 @@ test_postmaster_connection(bool do_checkpoint)
 
 		/*
 		 * If we've been able to identify the child postmaster's PID, check
-		 * the process is still alive.  This covers cases where the postmaster
+		 * the process is still alive.	This covers cases where the postmaster
 		 * successfully created the pidfile but then crashed without removing
 		 * it.
 		 */
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index 28175674c00cd0206737f4b3c2eec5d8dae843ca..ce12a41ce3ed98b7d795404d06be483b2c7353d8 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -104,7 +104,7 @@ typedef struct _restoreOptions
 										 * restore */
 	int			use_setsessauth;/* Use SET SESSION AUTHORIZATION commands
 								 * instead of OWNER TO */
-	int			no_security_labels;	/* Skip security label entries */
+	int			no_security_labels;		/* Skip security label entries */
 	char	   *superuser;		/* Username to use as superuser */
 	char	   *use_role;		/* Issue SET ROLE to this */
 	int			dataOnly;
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 3781e599c4fd5c569c69cd57560448c808e9c1a0..9e69b0fc5241e0ceedb304a9f62b9f56095f76da 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -7968,14 +7968,14 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
 		 * collation does not matter for those.
 		 */
 		appendPQExpBuffer(query, "SELECT a.attname, "
-						  "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
+			"pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
 						  "a.attlen, a.attalign, a.attisdropped, "
 						  "CASE WHEN a.attcollation <> at.typcollation "
 						  "THEN a.attcollation ELSE 0 END AS attcollation, "
 						  "ct.typrelid "
 						  "FROM pg_catalog.pg_type ct "
-						  "JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
-						  "LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
+				"JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
+					"LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
 						  "WHERE ct.oid = '%u'::pg_catalog.oid "
 						  "ORDER BY a.attnum ",
 						  tyinfo->dobj.catId.oid);
@@ -7988,11 +7988,11 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
 		 * always be false.
 		 */
 		appendPQExpBuffer(query, "SELECT a.attname, "
-						  "pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
+			"pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
 						  "a.attlen, a.attalign, a.attisdropped, "
 						  "0 AS attcollation, "
 						  "ct.typrelid "
-						  "FROM pg_catalog.pg_type ct, pg_catalog.pg_attribute a "
+					 "FROM pg_catalog.pg_type ct, pg_catalog.pg_attribute a "
 						  "WHERE ct.oid = '%u'::pg_catalog.oid "
 						  "AND a.attrelid = ct.typrelid "
 						  "ORDER BY a.attnum ",
@@ -8072,15 +8072,15 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
 		{
 			/*
 			 * This is a dropped attribute and we're in binary_upgrade mode.
-			 * Insert a placeholder for it in the CREATE TYPE command, and
-			 * set length and alignment with direct UPDATE to the catalogs
+			 * Insert a placeholder for it in the CREATE TYPE command, and set
+			 * length and alignment with direct UPDATE to the catalogs
 			 * afterwards. See similar code in dumpTableSchema().
 			 */
 			appendPQExpBuffer(q, "%s INTEGER /* dummy */", fmtId(attname));
 
 			/* stash separately for insertion after the CREATE TYPE */
 			appendPQExpBuffer(dropped,
-							  "\n-- For binary upgrade, recreate dropped column.\n");
+					  "\n-- For binary upgrade, recreate dropped column.\n");
 			appendPQExpBuffer(dropped, "UPDATE pg_catalog.pg_attribute\n"
 							  "SET attlen = %s, "
 							  "attalign = '%s', attbyval = false\n"
@@ -8380,8 +8380,8 @@ dumpProcLang(Archive *fout, ProcLangInfo *plang)
 	 * However, for a language that belongs to an extension, we must not use
 	 * the shouldDumpProcLangs heuristic, but just dump the language iff we're
 	 * told to (via dobj.dump).  Generally the support functions will belong
-	 * to the same extension and so have the same dump flags ... if they don't,
-	 * this might not work terribly nicely.
+	 * to the same extension and so have the same dump flags ... if they
+	 * don't, this might not work terribly nicely.
 	 */
 	useParams = (funcInfo != NULL &&
 				 (inlineInfo != NULL || !OidIsValid(plang->laninline)) &&
@@ -11181,8 +11181,8 @@ dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo)
 		return;
 
 	/*
-	 * FDWs that belong to an extension are dumped based on their "dump" field.
-	 * Otherwise omit them if we are only dumping some specific object.
+	 * FDWs that belong to an extension are dumped based on their "dump"
+	 * field. Otherwise omit them if we are only dumping some specific object.
 	 */
 	if (!fdwinfo->dobj.ext_member)
 		if (!include_everything)
@@ -11963,7 +11963,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 
 	if (binary_upgrade)
 		binary_upgrade_set_type_oids_by_rel_oid(q,
-													 tbinfo->dobj.catId.oid);
+												tbinfo->dobj.catId.oid);
 
 	/* Is it a table or a view? */
 	if (tbinfo->relkind == RELKIND_VIEW)
@@ -12085,6 +12085,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
 						  "UNLOGGED " : "",
 						  reltypename,
 						  fmtId(tbinfo->dobj.name));
+
 		/*
 		 * In case of a binary upgrade, we dump the table normally and attach
 		 * it to the type afterward.
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 6f603d4512a24236499d866c4220f7b34e1ac7d2..b2c54b5f929c8c32ed2c8c015cb1981cf5116866 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -1746,7 +1746,7 @@ describeOneTableDetails(const char *schemaname,
 		{
 			printfPQExpBuffer(&buf,
 							  "SELECT conname,\n"
-			   "  pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n"
+				 "  pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n"
 							  "FROM pg_catalog.pg_constraint r\n"
 					"WHERE r.conrelid = '%s' AND r.contype = 'f' ORDER BY 1",
 							  oid);
@@ -2693,7 +2693,7 @@ listDomains(const char *pattern, bool showSystem)
 	printfPQExpBuffer(&buf,
 					  "SELECT n.nspname as \"%s\",\n"
 					  "       t.typname as \"%s\",\n"
-					  "       pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n"
+	 "       pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n"
 					  "       TRIM(LEADING\n",
 					  gettext_noop("Schema"),
 					  gettext_noop("Name"),
@@ -2703,7 +2703,7 @@ listDomains(const char *pattern, bool showSystem)
 						  "            COALESCE((SELECT ' collate ' || c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt\n"
 						  "                      WHERE c.oid = t.typcollation AND bt.oid = t.typbasetype AND t.typcollation <> bt.typcollation), '') ||\n");
 	appendPQExpBuffer(&buf,
-					  "            CASE WHEN t.typnotnull THEN ' not null' ELSE '' END ||\n"
+	   "            CASE WHEN t.typnotnull THEN ' not null' ELSE '' END ||\n"
 					  "            CASE WHEN t.typdefault IS NOT NULL THEN ' default ' || t.typdefault ELSE '' END\n"
 					  "       ) as \"%s\",\n",
 					  gettext_noop("Modifier"));
diff --git a/src/bin/scripts/createdb.c b/src/bin/scripts/createdb.c
index 544f2f64b3db5bb9a706387222af27e5059ff144..d7c3928eb6e23135653d4905150a842378895e77 100644
--- a/src/bin/scripts/createdb.c
+++ b/src/bin/scripts/createdb.c
@@ -192,11 +192,11 @@ main(int argc, char *argv[])
 
 	appendPQExpBuffer(&sql, ";\n");
 
-    /*
-     * Connect to the 'postgres' database by default, except have
-     * the 'postgres' user use 'template1' so he can create the
-     * 'postgres' database.
-     */
+	/*
+	 * Connect to the 'postgres' database by default, except have the
+	 * 'postgres' user use 'template1' so he can create the 'postgres'
+	 * database.
+	 */
 	conn = connectDatabase(strcmp(dbname, "postgres") == 0 ? "template1" : "postgres",
 						   host, port, username, prompt_password, progname);
 
diff --git a/src/bin/scripts/dropdb.c b/src/bin/scripts/dropdb.c
index 48f73ae25e85e60f6d35a217c5878a0d96d370ca..4cec63ec8e2985eb5f22f2719d16ab5f96955151 100644
--- a/src/bin/scripts/dropdb.c
+++ b/src/bin/scripts/dropdb.c
@@ -113,11 +113,10 @@ main(int argc, char *argv[])
 	appendPQExpBuffer(&sql, "DROP DATABASE %s;\n",
 					  fmtId(dbname));
 
-    /*
-     * Connect to the 'postgres' database by default, except have
-     * the 'postgres' user use 'template1' so he can drop the
-     * 'postgres' database.
-     */
+	/*
+	 * Connect to the 'postgres' database by default, except have the
+	 * 'postgres' user use 'template1' so he can drop the 'postgres' database.
+	 */
 	conn = connectDatabase(strcmp(dbname, "postgres") == 0 ? "template1" : "postgres",
 						   host, port, username, prompt_password, progname);
 
diff --git a/src/include/fmgr.h b/src/include/fmgr.h
index 60d47d97b9d6e4033c73a57cc0da07794bb60471..4a412f8049c49fada4b1fa679bb8d454b213d410 100644
--- a/src/include/fmgr.h
+++ b/src/include/fmgr.h
@@ -433,70 +433,70 @@ extern int no_such_variable
  * are allowed to be NULL.
  */
 extern Datum DirectFunctionCall1Coll(PGFunction func, Oid collation,
-									 Datum arg1);
+						Datum arg1);
 extern Datum DirectFunctionCall2Coll(PGFunction func, Oid collation,
-									 Datum arg1, Datum arg2);
+						Datum arg1, Datum arg2);
 extern Datum DirectFunctionCall3Coll(PGFunction func, Oid collation,
-									 Datum arg1, Datum arg2,
-					Datum arg3);
+						Datum arg1, Datum arg2,
+						Datum arg3);
 extern Datum DirectFunctionCall4Coll(PGFunction func, Oid collation,
-									 Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4);
+						Datum arg1, Datum arg2,
+						Datum arg3, Datum arg4);
 extern Datum DirectFunctionCall5Coll(PGFunction func, Oid collation,
-									 Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4, Datum arg5);
+						Datum arg1, Datum arg2,
+						Datum arg3, Datum arg4, Datum arg5);
 extern Datum DirectFunctionCall6Coll(PGFunction func, Oid collation,
-									 Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4, Datum arg5,
-					Datum arg6);
+						Datum arg1, Datum arg2,
+						Datum arg3, Datum arg4, Datum arg5,
+						Datum arg6);
 extern Datum DirectFunctionCall7Coll(PGFunction func, Oid collation,
-									 Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4, Datum arg5,
-					Datum arg6, Datum arg7);
+						Datum arg1, Datum arg2,
+						Datum arg3, Datum arg4, Datum arg5,
+						Datum arg6, Datum arg7);
 extern Datum DirectFunctionCall8Coll(PGFunction func, Oid collation,
-									 Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4, Datum arg5,
-					Datum arg6, Datum arg7, Datum arg8);
+						Datum arg1, Datum arg2,
+						Datum arg3, Datum arg4, Datum arg5,
+						Datum arg6, Datum arg7, Datum arg8);
 extern Datum DirectFunctionCall9Coll(PGFunction func, Oid collation,
-									 Datum arg1, Datum arg2,
-					Datum arg3, Datum arg4, Datum arg5,
-					Datum arg6, Datum arg7, Datum arg8,
-					Datum arg9);
+						Datum arg1, Datum arg2,
+						Datum arg3, Datum arg4, Datum arg5,
+						Datum arg6, Datum arg7, Datum arg8,
+						Datum arg9);
 
 /* These are for invocation of a previously-looked-up function with a
  * directly-computed parameter list.  Note that neither arguments nor result
  * are allowed to be NULL.
  */
 extern Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation,
-							   Datum arg1);
+				  Datum arg1);
 extern Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation,
-							   Datum arg1, Datum arg2);
+				  Datum arg1, Datum arg2);
 extern Datum FunctionCall3Coll(FmgrInfo *flinfo, Oid collation,
-							   Datum arg1, Datum arg2,
-			  Datum arg3);
+				  Datum arg1, Datum arg2,
+				  Datum arg3);
 extern Datum FunctionCall4Coll(FmgrInfo *flinfo, Oid collation,
-							   Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4);
+				  Datum arg1, Datum arg2,
+				  Datum arg3, Datum arg4);
 extern Datum FunctionCall5Coll(FmgrInfo *flinfo, Oid collation,
-							   Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4, Datum arg5);
+				  Datum arg1, Datum arg2,
+				  Datum arg3, Datum arg4, Datum arg5);
 extern Datum FunctionCall6Coll(FmgrInfo *flinfo, Oid collation,
-							   Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4, Datum arg5,
-			  Datum arg6);
+				  Datum arg1, Datum arg2,
+				  Datum arg3, Datum arg4, Datum arg5,
+				  Datum arg6);
 extern Datum FunctionCall7Coll(FmgrInfo *flinfo, Oid collation,
-							   Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4, Datum arg5,
-			  Datum arg6, Datum arg7);
+				  Datum arg1, Datum arg2,
+				  Datum arg3, Datum arg4, Datum arg5,
+				  Datum arg6, Datum arg7);
 extern Datum FunctionCall8Coll(FmgrInfo *flinfo, Oid collation,
-							   Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4, Datum arg5,
-			  Datum arg6, Datum arg7, Datum arg8);
+				  Datum arg1, Datum arg2,
+				  Datum arg3, Datum arg4, Datum arg5,
+				  Datum arg6, Datum arg7, Datum arg8);
 extern Datum FunctionCall9Coll(FmgrInfo *flinfo, Oid collation,
-							   Datum arg1, Datum arg2,
-			  Datum arg3, Datum arg4, Datum arg5,
-			  Datum arg6, Datum arg7, Datum arg8,
-			  Datum arg9);
+				  Datum arg1, Datum arg2,
+				  Datum arg3, Datum arg4, Datum arg5,
+				  Datum arg6, Datum arg7, Datum arg8,
+				  Datum arg9);
 
 /* These are for invocation of a function identified by OID with a
  * directly-computed parameter list.  Note that neither arguments nor result
@@ -506,35 +506,35 @@ extern Datum FunctionCall9Coll(FmgrInfo *flinfo, Oid collation,
  */
 extern Datum OidFunctionCall0Coll(Oid functionId, Oid collation);
 extern Datum OidFunctionCall1Coll(Oid functionId, Oid collation,
-								  Datum arg1);
+					 Datum arg1);
 extern Datum OidFunctionCall2Coll(Oid functionId, Oid collation,
-								  Datum arg1, Datum arg2);
+					 Datum arg1, Datum arg2);
 extern Datum OidFunctionCall3Coll(Oid functionId, Oid collation,
-								  Datum arg1, Datum arg2,
-				 Datum arg3);
+					 Datum arg1, Datum arg2,
+					 Datum arg3);
 extern Datum OidFunctionCall4Coll(Oid functionId, Oid collation,
-								  Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4);
+					 Datum arg1, Datum arg2,
+					 Datum arg3, Datum arg4);
 extern Datum OidFunctionCall5Coll(Oid functionId, Oid collation,
-								  Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4, Datum arg5);
+					 Datum arg1, Datum arg2,
+					 Datum arg3, Datum arg4, Datum arg5);
 extern Datum OidFunctionCall6Coll(Oid functionId, Oid collation,
-								  Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4, Datum arg5,
-				 Datum arg6);
+					 Datum arg1, Datum arg2,
+					 Datum arg3, Datum arg4, Datum arg5,
+					 Datum arg6);
 extern Datum OidFunctionCall7Coll(Oid functionId, Oid collation,
-								  Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4, Datum arg5,
-				 Datum arg6, Datum arg7);
+					 Datum arg1, Datum arg2,
+					 Datum arg3, Datum arg4, Datum arg5,
+					 Datum arg6, Datum arg7);
 extern Datum OidFunctionCall8Coll(Oid functionId, Oid collation,
-								  Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4, Datum arg5,
-				 Datum arg6, Datum arg7, Datum arg8);
+					 Datum arg1, Datum arg2,
+					 Datum arg3, Datum arg4, Datum arg5,
+					 Datum arg6, Datum arg7, Datum arg8);
 extern Datum OidFunctionCall9Coll(Oid functionId, Oid collation,
-								  Datum arg1, Datum arg2,
-				 Datum arg3, Datum arg4, Datum arg5,
-				 Datum arg6, Datum arg7, Datum arg8,
-				 Datum arg9);
+					 Datum arg1, Datum arg2,
+					 Datum arg3, Datum arg4, Datum arg5,
+					 Datum arg6, Datum arg7, Datum arg8,
+					 Datum arg9);
 
 /* These macros allow the collation argument to be omitted (with a default of
  * InvalidOid, ie, no collation).  They exist mostly for backwards
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index ee1881b630fbe5f60dd1dbd0ee16d42af71ec7b5..14937d4363ee79f7c876ca202129f756231f0446 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -725,7 +725,7 @@ typedef struct RangeTblEntry
 	 *
 	 * If the function returns RECORD, funccoltypes lists the column types
 	 * declared in the RTE's column type specification, funccoltypmods lists
-	 * their declared typmods, funccolcollations their collations.  Otherwise,
+	 * their declared typmods, funccolcollations their collations.	Otherwise,
 	 * those fields are NIL.
 	 */
 	Node	   *funcexpr;		/* expression tree for func call */
diff --git a/src/include/optimizer/clauses.h b/src/include/optimizer/clauses.h
index 4af772d255cbc7b178814d3ecf980a6e7223d017..dde6d82db4d645e57ce45c6655d4802d9e4a6444 100644
--- a/src/include/optimizer/clauses.h
+++ b/src/include/optimizer/clauses.h
@@ -50,7 +50,7 @@ extern List *make_ands_implicit(Expr *clause);
 extern bool contain_agg_clause(Node *clause);
 extern List *pull_agg_clause(Node *clause);
 extern void count_agg_clauses(PlannerInfo *root, Node *clause,
-							  AggClauseCosts *costs);
+				  AggClauseCosts *costs);
 
 extern bool contain_window_function(Node *clause);
 extern WindowFuncLists *find_window_functions(Node *clause, Index maxWinRef);
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index ce54c0a21b17c4e5bd5b966a5456e8932b14aa67..c53d93e3969977051b17953e4ccdf5e4d6d8cf99 100644
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -92,7 +92,7 @@ typedef struct HASHCTL
 #define HASH_CONTEXT	0x200	/* Set memory allocation context */
 #define HASH_COMPARE	0x400	/* Set user defined comparison function */
 #define HASH_KEYCOPY	0x800	/* Set user defined key-copying function */
-#define HASH_FIXED_SIZE	0x1000	/* Initial size is a hard limit */
+#define HASH_FIXED_SIZE 0x1000	/* Initial size is a hard limit */
 
 
 /* max_dsize value to indicate expansible directory */
diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h
index dd38a0292fc763c6189f5133f8db681532da5347..4208588c2eb9b931a568643b5f1339176aac2b61 100644
--- a/src/include/utils/selfuncs.h
+++ b/src/include/utils/selfuncs.h
@@ -136,7 +136,7 @@ extern Pattern_Prefix_Status pattern_fixed_prefix(Const *patt,
 					 Const **prefix,
 					 Const **rest);
 extern Const *make_greater_string(const Const *str_const, FmgrInfo *ltproc,
-								  Oid collation);
+					Oid collation);
 
 extern Datum eqsel(PG_FUNCTION_ARGS);
 extern Datum neqsel(PG_FUNCTION_ARGS);
diff --git a/src/interfaces/ecpg/ecpglib/sqlda.c b/src/interfaces/ecpg/ecpglib/sqlda.c
index c08c61b5df15ae46748184bc5742f9944b8fb2ef..33b4d2bce3d3e397123c46d66ca61bbee2cf4f1f 100644
--- a/src/interfaces/ecpg/ecpglib/sqlda.c
+++ b/src/interfaces/ecpg/ecpglib/sqlda.c
@@ -228,8 +228,12 @@ ecpg_build_compat_sqlda(int line, PGresult *res, int row, enum COMPAT_MODE compa
 		strcpy(fname, PQfname(res, i));
 		sqlda->sqlvar[i].sqlname = fname;
 		fname += strlen(sqlda->sqlvar[i].sqlname) + 1;
-		/* this is reserved for future use, so we leave it empty for the time being */
-		/* sqlda->sqlvar[i].sqlformat = (char *) (long) PQfformat(res, i);*/
+
+		/*
+		 * this is reserved for future use, so we leave it empty for the time
+		 * being
+		 */
+		/* sqlda->sqlvar[i].sqlformat = (char *) (long) PQfformat(res, i); */
 		sqlda->sqlvar[i].sqlxid = PQftype(res, i);
 		sqlda->sqlvar[i].sqltypelen = PQfsize(res, i);
 	}
diff --git a/src/interfaces/ecpg/pgtypeslib/timestamp.c b/src/interfaces/ecpg/pgtypeslib/timestamp.c
index 3db57ad3cb0141ac92741dc92a5c36476b56f89c..8354e460d02f1ddb8c6d7cfe96ab69f7a0808231 100644
--- a/src/interfaces/ecpg/pgtypeslib/timestamp.c
+++ b/src/interfaces/ecpg/pgtypeslib/timestamp.c
@@ -503,7 +503,7 @@ dttofmtasc_replace(timestamp * ts, date dDate, int dow, struct tm * tm,
 				case 'G':
 					{
 						/* Keep compiler quiet - Don't use a literal format */
-						const char *fmt = "%G"; 
+						const char *fmt = "%G";
 
 						tm->tm_mon -= 1;
 						i = strftime(q, *pstr_len, fmt, tm);
@@ -689,7 +689,7 @@ dttofmtasc_replace(timestamp * ts, date dDate, int dow, struct tm * tm,
 				case 'V':
 					{
 						/* Keep compiler quiet - Don't use a literal format */
-						const char *fmt = "%V"; 
+						const char *fmt = "%V";
 
 						i = strftime(q, *pstr_len, fmt, tm);
 						if (i == 0)
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
index 9a0317ba4af13ef9ca237c74ef0c11e3bb01f9c1..e72826cddc30c30738c3adda170580d077cf6a25 100644
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -325,7 +325,7 @@ pg_GSS_error_int(PQExpBuffer str, const char *mprefix,
 	do
 	{
 		gss_display_status(&lmin_s, stat, type,
-									GSS_C_NO_OID, &msg_ctx, &lmsg);
+						   GSS_C_NO_OID, &msg_ctx, &lmsg);
 		appendPQExpBuffer(str, "%s: %s\n", mprefix, (char *) lmsg.value);
 		gss_release_buffer(&lmin_s, &lmsg);
 	} while (msg_ctx);
@@ -693,9 +693,9 @@ pg_local_sendauth(PGconn *conn)
 	struct cmsghdr *cmsg;
 	union
 	{
-		struct cmsghdr	hdr;
-		unsigned char	buf[CMSG_SPACE(sizeof(struct cmsgcred))];
-	} cmsgbuf;
+		struct cmsghdr hdr;
+		unsigned char buf[CMSG_SPACE(sizeof(struct cmsgcred))];
+	}			cmsgbuf;
 
 	/*
 	 * The backend doesn't care what we send here, but it wants exactly one
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 9aa6ca01eb2fb5c325e50963e495789635754556..9e4807e8a96cac13c40e38a24bdb845c4824f1e6 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -1054,18 +1054,18 @@ connectFailureMessage(PGconn *conn, int errorno)
 		if ((conn->pghostaddr == NULL) &&
 			(conn->pghost == NULL || strcmp(conn->pghost, host_addr) != 0))
 			appendPQExpBuffer(&conn->errorMessage,
-							  libpq_gettext("could not connect to server: %s\n"
-											"\tIs the server running on host \"%s\" (%s) and accepting\n"
-											"\tTCP/IP connections on port %s?\n"),
+							libpq_gettext("could not connect to server: %s\n"
+				"\tIs the server running on host \"%s\" (%s) and accepting\n"
+									   "\tTCP/IP connections on port %s?\n"),
 							  SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)),
 							  displayed_host,
 							  host_addr,
 							  conn->pgport);
 		else
 			appendPQExpBuffer(&conn->errorMessage,
-							  libpq_gettext("could not connect to server: %s\n"
-											"\tIs the server running on host \"%s\" and accepting\n"
-											"\tTCP/IP connections on port %s?\n"),
+							libpq_gettext("could not connect to server: %s\n"
+					 "\tIs the server running on host \"%s\" and accepting\n"
+									   "\tTCP/IP connections on port %s?\n"),
 							  SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)),
 							  displayed_host,
 							  conn->pgport);
@@ -1854,6 +1854,7 @@ keep_going:						/* We will come back to here until there is
 				int			packetlen;
 
 #ifdef HAVE_UNIX_SOCKETS
+
 				/*
 				 * Implement requirepeer check, if requested and it's a
 				 * Unix-domain socket.
@@ -1870,14 +1871,17 @@ keep_going:						/* We will come back to here until there is
 					errno = 0;
 					if (getpeereid(conn->sock, &uid, &gid) != 0)
 					{
-						/* Provide special error message if getpeereid is a stub */
+						/*
+						 * Provide special error message if getpeereid is a
+						 * stub
+						 */
 						if (errno == ENOSYS)
 							appendPQExpBuffer(&conn->errorMessage,
 											  libpq_gettext("requirepeer parameter is not supported on this platform\n"));
 						else
 							appendPQExpBuffer(&conn->errorMessage,
 											  libpq_gettext("could not get peer credentials: %s\n"),
-											  pqStrerror(errno, sebuf, sizeof(sebuf)));
+									pqStrerror(errno, sebuf, sizeof(sebuf)));
 						goto error_return;
 					}
 
@@ -1899,7 +1903,7 @@ keep_going:						/* We will come back to here until there is
 						goto error_return;
 					}
 				}
-#endif /* HAVE_UNIX_SOCKETS */
+#endif   /* HAVE_UNIX_SOCKETS */
 
 #ifdef USE_SSL
 
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index d2c672c7bb35f1801d7aaa60636e6130ea5aaf18..61252308cd871bd49823ec8621fcfdd1f15ef0ae 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -1986,7 +1986,7 @@ plperl_call_perl_trigger_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo,
 	if (!TDsv)
 		elog(ERROR, "couldn't fetch $_TD");
 
-	save_item(TDsv);				/* local $_TD */
+	save_item(TDsv);			/* local $_TD */
 	sv_setsv(TDsv, td);
 
 	PUSHMARK(sp);
@@ -3564,7 +3564,7 @@ hv_store_string(HV *hv, const char *key, SV *val)
 	 * does not appear that hashes track UTF-8-ness of keys at all in Perl
 	 * 5.6.
 	 */
-	hlen = - (int) strlen(hkey);
+	hlen = -(int) strlen(hkey);
 	ret = hv_store(hv, hkey, hlen, val, 0);
 
 	if (hkey != key)
@@ -3589,7 +3589,7 @@ hv_fetch_string(HV *hv, const char *key)
 								  GetDatabaseEncoding(), PG_UTF8);
 
 	/* See notes in hv_store_string */
-	hlen = - (int) strlen(hkey);
+	hlen = -(int) strlen(hkey);
 	ret = hv_fetch(hv, hkey, hlen, 0);
 
 	if (hkey != key)
diff --git a/src/pl/plperl/plperl.h b/src/pl/plperl/plperl.h
index c1236b7efc4e558b43499008a32e76043c5d59ee..c4810cbcdb15fbfb01e1ab9902a077782042db70 100644
--- a/src/pl/plperl/plperl.h
+++ b/src/pl/plperl/plperl.h
@@ -59,13 +59,13 @@
 #undef vsnprintf
 #endif
 #ifdef __GNUC__
-#define vsnprintf(...)  pg_vsnprintf(__VA_ARGS__)
-#define snprintf(...)   pg_snprintf(__VA_ARGS__)
+#define vsnprintf(...)	pg_vsnprintf(__VA_ARGS__)
+#define snprintf(...)	pg_snprintf(__VA_ARGS__)
 #else
-#define vsnprintf       pg_vsnprintf
-#define snprintf        pg_snprintf
-#endif /* __GNUC__ */
-#endif /*  USE_REPL_SNPRINTF */
+#define vsnprintf		pg_vsnprintf
+#define snprintf		pg_snprintf
+#endif   /* __GNUC__ */
+#endif   /* USE_REPL_SNPRINTF */
 
 /* perl version and platform portability */
 #define NEED_eval_pv
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 4331a5c8df4f004831f0b7363a2d9cca1ca01801..906a485853f1232c181a2fdabb3a25b3dbe04c36 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -4400,7 +4400,7 @@ exec_get_datum_type_info(PLpgSQL_execstate *estate,
 
 		default:
 			elog(ERROR, "unrecognized dtype: %d", datum->dtype);
-			*typeid = InvalidOid;	/* keep compiler quiet */
+			*typeid = InvalidOid;		/* keep compiler quiet */
 			*typmod = -1;
 			*collation = InvalidOid;
 			break;
diff --git a/src/pl/plpython/plpython.c b/src/pl/plpython/plpython.c
index e81f62542ff579528f7342a519193c228fa6937b..9081cffb498bedfc411afecab374441c8a58bda6 100644
--- a/src/pl/plpython/plpython.c
+++ b/src/pl/plpython/plpython.c
@@ -4512,8 +4512,8 @@ get_source_line(const char *src, int lineno)
 
 	/*
 	 * Sanity check, next < s if the line was all-whitespace, which should
-	 * never happen if Python reported a frame created on that line, but
-	 * check anyway.
+	 * never happen if Python reported a frame created on that line, but check
+	 * anyway.
 	 */
 	if (next < s)
 		return NULL;
@@ -4680,7 +4680,10 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
 					&tbstr, "\n  PL/Python function \"%s\", line %ld, in %s",
 								 proname, plain_lineno - 1, fname);
 
-			/* function code object was compiled with "<string>" as the filename */
+			/*
+			 * function code object was compiled with "<string>" as the
+			 * filename
+			 */
 			if (PLy_curr_procedure && plain_filename != NULL &&
 				strcmp(plain_filename, "<string>") == 0)
 			{
diff --git a/src/port/getopt.c b/src/port/getopt.c
index e901bf7db50f0d1efc7690ac51a1b160217b4e41..eb50cba1ca440d0de9f9d194dc0c792b0bfb293b 100644
--- a/src/port/getopt.c
+++ b/src/port/getopt.c
@@ -61,7 +61,7 @@ extern char *optarg;
 #define BADARG	(int)':'
 #define EMSG	""
 
-int getopt(int nargc, char *const * nargv, const char * ostr);
+int			getopt(int nargc, char *const * nargv, const char *ostr);
 
 /*
  * getopt
@@ -74,7 +74,7 @@ int getopt(int nargc, char *const * nargv, const char * ostr);
  * returning -1.)
  */
 int
-getopt(int nargc, char *const * nargv, const char * ostr)
+getopt(int nargc, char *const * nargv, const char *ostr)
 {
 	static char *place = EMSG;	/* option letter processing */
 	char	   *oli;			/* option letter list index */
diff --git a/src/port/getpeereid.c b/src/port/getpeereid.c
index e10a140624f7c3035f0311384f8614da89e0cffe..5f989ff78e4ae735494323c4994e2f82d15dd7d0 100644
--- a/src/port/getpeereid.c
+++ b/src/port/getpeereid.c
@@ -69,7 +69,7 @@ getpeereid(int sock, uid_t *uid, gid_t *gid)
 	*gid = ucred_getegid(ucred);
 	ucred_free(ucred);
 
-	if (*uid == (uid_t)(-1) || *gid == (gid_t)(-1))
+	if (*uid == (uid_t) (-1) || *gid == (gid_t) (-1))
 		return -1;
 	return 0;
 #else
diff --git a/src/port/inet_net_ntop.c b/src/port/inet_net_ntop.c
index 9c3c93b8ecca9509e5fc759acef9d6d12d4a9a29..047895e4f4f03b988c556c3ff9c61b08eb7283d0 100644
--- a/src/port/inet_net_ntop.c
+++ b/src/port/inet_net_ntop.c
@@ -81,8 +81,8 @@ inet_net_ntop(int af, const void *src, int bits, char *dst, size_t size)
 	 * We need to cover both the address family constants used by the PG inet
 	 * type (PGSQL_AF_INET and PGSQL_AF_INET6) and those used by the system
 	 * libraries (AF_INET and AF_INET6).  We can safely assume PGSQL_AF_INET
-	 * == AF_INET, but the INET6 constants are very likely to be different.
-	 * If AF_INET6 isn't defined, silently ignore it.
+	 * == AF_INET, but the INET6 constants are very likely to be different. If
+	 * AF_INET6 isn't defined, silently ignore it.
 	 */
 	switch (af)
 	{
diff --git a/src/port/noblock.c b/src/port/noblock.c
index 883697535dcf19867ead73d904c5ce1289ceffc9..93a88ec7c69a82b5974975f2533b88b96bef310f 100644
--- a/src/port/noblock.c
+++ b/src/port/noblock.c
@@ -23,7 +23,7 @@ pg_set_noblock(pgsocket sock)
 #if !defined(WIN32)
 	return (fcntl(sock, F_SETFL, O_NONBLOCK) != -1);
 #else
-	unsigned long		ioctlsocket_ret = 1;
+	unsigned long ioctlsocket_ret = 1;
 
 	/* Returns non-0 on failure, while fcntl() returns -1 on failure */
 	return (ioctlsocket(sock, FIONBIO, &ioctlsocket_ret) == 0);
@@ -42,7 +42,7 @@ pg_set_block(pgsocket sock)
 		return false;
 	return true;
 #else
-	unsigned long		ioctlsocket_ret = 0;
+	unsigned long ioctlsocket_ret = 0;
 
 	/* Returns non-0 on failure, while fcntl() returns -1 on failure */
 	return (ioctlsocket(sock, FIONBIO, &ioctlsocket_ret) == 0);
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index 28b052847534102c06e4aa6b9236b14a41e6a4da..9fc9c1088ffb0abaf800da158f168d4b4b01f51c 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -2140,7 +2140,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 #ifndef WIN32_ONLY_COMPILER
 			snprintf(buf, sizeof(buf),
 					 SYSTEMQUOTE "\"%s\" -C \"%s/%s\" DESTDIR=\"%s/install\" install >> \"%s/log/install.log\" 2>&1" SYSTEMQUOTE,
-					 makeprog, top_builddir, sl->str, temp_install, outputdir);
+				   makeprog, top_builddir, sl->str, temp_install, outputdir);
 #else
 			fprintf(stderr, _("\n%s: --extra-install option not supported on this platform\n"), progname);
 			exit_nicely(2);
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index 622cf94c5ad84daf14a02dc0cd162d6fde7681dd..ce6e7d1add96399ecdc02a2469b4a7f10d5aedc5 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -1480,9 +1480,9 @@ pg_timezone_initialize(void)
 	 * postgresql.conf, this code will not do what you might expect, namely
 	 * call select_default_timezone() and install that value as the setting.
 	 * Rather, the previously active setting --- typically the one from
-	 * postgresql.conf --- will be reinstalled, relabeled as PGC_S_ENV_VAR.
-	 * If we did try to install the "correct" default value, the effect would
-	 * be that each postmaster child would independently run an extremely
+	 * postgresql.conf --- will be reinstalled, relabeled as PGC_S_ENV_VAR. If
+	 * we did try to install the "correct" default value, the effect would be
+	 * that each postmaster child would independently run an extremely
 	 * expensive search of the timezone database, bringing the database to its
 	 * knees for possibly multiple seconds.  This is so unpleasant, and could
 	 * so easily be triggered quite unintentionally, that it seems better to