diff --git a/configure b/configure
index 9a83f198216e14b5180740a43430272a6578141a..8468417f69b5011bcf98214915b1a917ee76489b 100755
--- a/configure
+++ b/configure
@@ -7088,7 +7088,7 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
 test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
 
 # When Autoconf chooses install-sh as install program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
 # with our Makefile.global concept. This workaround helps.
 case $INSTALL in
   *install-sh*) install_bin='';;
@@ -7232,7 +7232,7 @@ fi
 $as_echo "$MKDIR_P" >&6; }
 
 # When Autoconf chooses install-sh as mkdir -p program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
 # with our Makefile.global concept. This workaround helps.
 case $MKDIR_P in
   *install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
diff --git a/configure.in b/configure.in
index 52e4e7847106d37690825f4c46e031f1ae4d198e..01b618c931ddd44e7d40460d86377ef0e27fb394 100644
--- a/configure.in
+++ b/configure.in
@@ -887,7 +887,7 @@ fi
 
 AC_PROG_INSTALL
 # When Autoconf chooses install-sh as install program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
 # with our Makefile.global concept. This workaround helps.
 case $INSTALL in
   *install-sh*) install_bin='';;
@@ -900,7 +900,7 @@ AC_PROG_LN_S
 AC_PROG_AWK
 AC_PROG_MKDIR_P
 # When Autoconf chooses install-sh as mkdir -p program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
 # with our Makefile.global concept. This workaround helps.
 case $MKDIR_P in
   *install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
diff --git a/contrib/bloom/blvacuum.c b/contrib/bloom/blvacuum.c
index 807da9254e07b01352133e96607e22b48d0c205c..04abd0f6b6cd98e9470ddc62b917eed02a711829 100644
--- a/contrib/bloom/blvacuum.c
+++ b/contrib/bloom/blvacuum.c
@@ -51,7 +51,7 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	initBloomState(&state, index);
 
 	/*
-	 * Interate over the pages. We don't care about concurrently added pages,
+	 * Iterate over the pages. We don't care about concurrently added pages,
 	 * they can't contain tuples to delete.
 	 */
 	npages = RelationGetNumberOfBlocks(index);
diff --git a/contrib/cube/expected/cube.out b/contrib/cube/expected/cube.out
index ada54b2885fd0b8101f220ca8f601db8c8caecd3..328b3b5f5de42954d007ab104557cf1c3f658690 100644
--- a/contrib/cube/expected/cube.out
+++ b/contrib/cube/expected/cube.out
@@ -1056,7 +1056,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
         5
 (1 row)
 
--- Test of cube_ll_coord function (retrieves LL coodinate values)
+-- Test of cube_ll_coord function (retrieves LL coordinate values)
 --
 SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
  cube_ll_coord 
@@ -1112,7 +1112,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3);
              0
 (1 row)
 
--- Test of cube_ur_coord function (retrieves UR coodinate values)
+-- Test of cube_ur_coord function (retrieves UR coordinate values)
 --
 SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
  cube_ur_coord 
diff --git a/contrib/cube/expected/cube_2.out b/contrib/cube/expected/cube_2.out
index c58614ef05f0e0e306f2d4eb52e3d59974e65cb3..1aa5cf2f983cf112791df9b3976520aca0189d93 100644
--- a/contrib/cube/expected/cube_2.out
+++ b/contrib/cube/expected/cube_2.out
@@ -1056,7 +1056,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
         5
 (1 row)
 
--- Test of cube_ll_coord function (retrieves LL coodinate values)
+-- Test of cube_ll_coord function (retrieves LL coordinate values)
 --
 SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
  cube_ll_coord 
@@ -1112,7 +1112,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3);
              0
 (1 row)
 
--- Test of cube_ur_coord function (retrieves UR coodinate values)
+-- Test of cube_ur_coord function (retrieves UR coordinate values)
 --
 SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
  cube_ur_coord 
diff --git a/contrib/cube/sql/cube.sql b/contrib/cube/sql/cube.sql
index a61fba1ea8182b499a3c0f8becc5c5064abbd74f..58ea3ad81139ce11c23fe0a38cefd5f7c0724de0 100644
--- a/contrib/cube/sql/cube.sql
+++ b/contrib/cube/sql/cube.sql
@@ -256,7 +256,7 @@ SELECT cube_dim('(0,0,0)'::cube);
 SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
 SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
 
--- Test of cube_ll_coord function (retrieves LL coodinate values)
+-- Test of cube_ll_coord function (retrieves LL coordinate values)
 --
 SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
 SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2);
@@ -268,7 +268,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 1);
 SELECT cube_ll_coord('(42,137)'::cube, 2);
 SELECT cube_ll_coord('(42,137)'::cube, 3);
 
--- Test of cube_ur_coord function (retrieves UR coodinate values)
+-- Test of cube_ur_coord function (retrieves UR coordinate values)
 --
 SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
 SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2);
diff --git a/contrib/earthdistance/earthdistance--1.1.sql b/contrib/earthdistance/earthdistance--1.1.sql
index 657d328ebbbe64dba1920890bca50f58a2bf2527..9136a54a7b348b79a72f140d7691c0840b10ee85 100644
--- a/contrib/earthdistance/earthdistance--1.1.sql
+++ b/contrib/earthdistance/earthdistance--1.1.sql
@@ -11,7 +11,7 @@ CREATE FUNCTION earth() RETURNS float8
 LANGUAGE SQL IMMUTABLE PARALLEL SAFE
 AS 'SELECT ''6378168''::float8';
 
--- Astromers may want to change the earth function so that distances will be
+-- Astronomers may want to change the earth function so that distances will be
 -- returned in degrees. To do this comment out the above definition and
 -- uncomment the one below. Note that doing this will break the regression
 -- tests.
diff --git a/contrib/isn/ISSN.h b/contrib/isn/ISSN.h
index 082efcff7c67f866edf4ec48d4ef8312370c5e8f..585f0e2674156cd560850026fd87a2263115175b 100644
--- a/contrib/isn/ISSN.h
+++ b/contrib/isn/ISSN.h
@@ -23,7 +23,7 @@
  * Product		9 + 21 + 7 + 3 + 1 + 12 + 4 + 24 + 7 + 15 + 0 + 0 = 103
  *				103 / 10 = 10 remainder 3
  * Check digit	10 - 3 = 7
- * => 977-1144875-00-7 ??  <- suplemental number (number of the week, month, etc.)
+ * => 977-1144875-00-7 ??  <- supplemental number (number of the week, month, etc.)
  *				  ^^ 00 for non-daily publications (01=Monday, 02=Tuesday, ...)
  *
  * The hyphenation is always in after the four digits of the ISSN code.
diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c
index 9e125b83d79ca94fb7c88bd9b15e9b44c5c31f54..c3c10e14bca3e31987a5151f8efa7c5720b93fd9 100644
--- a/contrib/isn/isn.c
+++ b/contrib/isn/isn.c
@@ -160,7 +160,7 @@ dehyphenate(char *bufO, char *bufI)
  *				  into bufO using the given hyphenation range TABLE.
  *				  Assumes the input string to be used is of only digits.
  *
- * Returns the number of characters acctually hyphenated.
+ * Returns the number of characters actually hyphenated.
  */
 static unsigned
 hyphenate(char *bufO, char *bufI, const char *(*TABLE)[2], const unsigned TABLE_index[10][2])
@@ -748,7 +748,7 @@ string2ean(const char *str, bool errorOK, ean13 *result,
 		}
 		else if (*aux2 == '!' && *(aux2 + 1) == '\0')
 		{
-			/* the invalid check digit sufix was found, set it */
+			/* the invalid check digit suffix was found, set it */
 			if (!magic)
 				valid = false;
 			magic = true;
diff --git a/contrib/ltree/expected/ltree.out b/contrib/ltree/expected/ltree.out
index db52069c266b961316f3a3705b785f121e740c45..3d5737d41b191287eb615704324fa734951f30e5 100644
--- a/contrib/ltree/expected/ltree.out
+++ b/contrib/ltree/expected/ltree.out
@@ -1113,7 +1113,7 @@ SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
  t
 (1 row)
 
---exractors
+--extractors
 SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
  ?column? 
 ----------
diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c
index befda1344d5e94f650545340643d490551e07e85..32d90462581d9ea10bf36ec7e338785faba8d8cf 100644
--- a/contrib/ltree/ltxtquery_io.c
+++ b/contrib/ltree/ltxtquery_io.c
@@ -197,7 +197,7 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
 
 #define STACKDEPTH		32
 /*
- * make polish notaion of query
+ * make polish notation of query
  */
 static int32
 makepol(QPRS_STATE *state)
diff --git a/contrib/ltree/sql/ltree.sql b/contrib/ltree/sql/ltree.sql
index b4f62e3febbd103e0466590e62d37bbf56eaeb03..e9f74909a64b92259bfa03ec5b9b29e27b97154f 100644
--- a/contrib/ltree/sql/ltree.sql
+++ b/contrib/ltree/sql/ltree.sql
@@ -209,7 +209,7 @@ SELECT 'a.b.c.d.e'::ltree ? '{A.b.c.d.e, a.*}';
 SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e}';
 SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
 
---exractors
+--extractors
 SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
 SELECT '{3456,1.2.3}'::ltree[] ?@> '1.2.3.4';
 SELECT '{3456,1.2.3.4}'::ltree[] ?<@ '1.2.3';
diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c
index e4136f9149dcc6ada53112c08b2472876d89a083..e4d057e18e21e98ed3f31be4c20935d1da631522 100644
--- a/contrib/pg_standby/pg_standby.c
+++ b/contrib/pg_standby/pg_standby.c
@@ -779,7 +779,7 @@ main(int argc, char **argv)
 		{
 			/*
 			 * Once we have restored this file successfully we can remove some
-			 * prior WAL files. If this restore fails we musn't remove any
+			 * prior WAL files. If this restore fails we mustn't remove any
 			 * file because some of them will be requested again immediately
 			 * after the failed restore, or when we restart recovery.
 			 */
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index a65b52968a59c4c0b69b2f9ab09694635169962c..62dec8768a5c30a6e191d4bbde14511019f7a721 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -139,7 +139,7 @@ typedef struct Counters
 {
 	int64		calls;			/* # of times executed */
 	double		total_time;		/* total execution time, in msec */
-	double		min_time;		/* minimim execution time in msec */
+	double		min_time;		/* minimum execution time in msec */
 	double		max_time;		/* maximum execution time in msec */
 	double		mean_time;		/* mean execution time in msec */
 	double		sum_var_time;	/* sum of variances in execution time in msec */
diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c
index dd0f492cfab74c6f51cc76f2d193ae088e69c76b..368e7c8941d08252169dd616a0274ca02b78fe88 100644
--- a/contrib/pg_trgm/trgm_op.c
+++ b/contrib/pg_trgm/trgm_op.c
@@ -413,7 +413,7 @@ comp_ptrgm(const void *v1, const void *v2)
  * ulen1: count of unique trigrams of array "trg1".
  * len2: length of array "trg2" and array "trg2indexes".
  * len: length of the array "found".
- * check_only: if true then only check existaince of similar search pattern in
+ * check_only: if true then only check existence of similar search pattern in
  *			   text.
  *
  * Returns word similarity.
@@ -456,7 +456,7 @@ iterate_word_similarity(int *trg2indexes,
 			lastpos[trgindex] = i;
 		}
 
-		/* Adjust lower bound if this trigram is present in required substing */
+		/* Adjust lower bound if this trigram is present in required substring */
 		if (found[trgindex])
 		{
 			int			prev_lower,
@@ -547,7 +547,7 @@ iterate_word_similarity(int *trg2indexes,
  *
  * str1: search pattern string, of length slen1 bytes.
  * str2: text in which we are looking for a word, of length slen2 bytes.
- * check_only: if true then only check existaince of similar search pattern in
+ * check_only: if true then only check existence of similar search pattern in
  *			   text.
  *
  * Returns word similarity.
diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c
index 44d9adcd2ab9519697415b0f927dc4a893f3408c..73dbfbd08fc65db6f9d6e7e706e0218003b93a03 100644
--- a/contrib/pgcrypto/mbuf.c
+++ b/contrib/pgcrypto/mbuf.c
@@ -311,7 +311,7 @@ pullf_read_max(PullFilter *pf, int len, uint8 **data_p, uint8 *tmpbuf)
 }
 
 /*
- * caller wants exatly len bytes and dont bother with references
+ * caller wants exactly len bytes and don't bother with references
  */
 int
 pullf_read_fixed(PullFilter *src, int len, uint8 *dst)
diff --git a/contrib/pgcrypto/pgp-mpi-internal.c b/contrib/pgcrypto/pgp-mpi-internal.c
index cb70fcba6cc13d0d5ee08c5a99d6ed7abf1852c3..545009ce199900b5817eedee816a6284a63207a6 100644
--- a/contrib/pgcrypto/pgp-mpi-internal.c
+++ b/contrib/pgcrypto/pgp-mpi-internal.c
@@ -141,7 +141,7 @@ bn_to_mpi(mpz_t *bn)
 }
 
 /*
- * Decide the number of bits in the random componont k
+ * Decide the number of bits in the random component k
  *
  * It should be in the same range as p for signing (which
  * is deprecated), but can be much smaller for encrypting.
@@ -149,8 +149,8 @@ bn_to_mpi(mpz_t *bn)
  * Until I research it further, I just mimic gpg behaviour.
  * It has a special mapping table, for values <= 5120,
  * above that it uses 'arbitrary high number'.  Following
- * algorihm hovers 10-70 bits above gpg values.  And for
- * larger p, it uses gpg's algorihm.
+ * algorithm hovers 10-70 bits above gpg values.  And for
+ * larger p, it uses gpg's algorithm.
  *
  * The point is - if k gets large, encryption will be
  * really slow.  It does not matter for decryption.
diff --git a/contrib/pgcrypto/pgp-mpi-openssl.c b/contrib/pgcrypto/pgp-mpi-openssl.c
index 24484a6c54e24c44ca3d71af70f8b6e10fa91380..afece26918653142ab0111d90519623a0b8da41e 100644
--- a/contrib/pgcrypto/pgp-mpi-openssl.c
+++ b/contrib/pgcrypto/pgp-mpi-openssl.c
@@ -74,7 +74,7 @@ bn_to_mpi(BIGNUM *bn)
 }
 
 /*
- * Decide the number of bits in the random componont k
+ * Decide the number of bits in the random component k
  *
  * It should be in the same range as p for signing (which
  * is deprecated), but can be much smaller for encrypting.
@@ -82,8 +82,8 @@ bn_to_mpi(BIGNUM *bn)
  * Until I research it further, I just mimic gpg behaviour.
  * It has a special mapping table, for values <= 5120,
  * above that it uses 'arbitrary high number'.  Following
- * algorihm hovers 10-70 bits above gpg values.  And for
- * larger p, it uses gpg's algorihm.
+ * algorithm hovers 10-70 bits above gpg values.  And for
+ * larger p, it uses gpg's algorithm.
  *
  * The point is - if k gets large, encryption will be
  * really slow.  It does not matter for decryption.
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 3a092804a2d16eac1fd7d8d8a1947a81e222cc40..0b9e3e45379df2ec7d2c626d61153e9a9e24e479 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -2057,7 +2057,7 @@ SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM
    1
 (10 rows)
 
--- non-Var items in targelist of the nullable rel of a join preventing
+-- non-Var items in targetlist of the nullable rel of a join preventing
 -- push-down in some cases
 -- unable to push {ft1, ft2}
 EXPLAIN (VERBOSE, COSTS OFF)
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index e19a3ef398ca11180d5de7553a6645e7a856373b..56b01d049050600ade601aabf305f3f8a9efd9ed 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -493,7 +493,7 @@ EXPLAIN (VERBOSE, COSTS OFF)
 SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
 SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
 
--- non-Var items in targelist of the nullable rel of a join preventing
+-- non-Var items in targetlist of the nullable rel of a join preventing
 -- push-down in some cases
 -- unable to push {ft1, ft2}
 EXPLAIN (VERBOSE, COSTS OFF)
diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c
index c6c082b8ea6d0d04098bc7224f4229c66e5e2746..895d8794982540ab5f1b45bd953ec43664e939c5 100644
--- a/contrib/seg/seg.c
+++ b/contrib/seg/seg.c
@@ -888,7 +888,7 @@ restore(char *result, float val, int n)
 		if (Abs(exp) <= 4)
 		{
 			/*
-			 * remove the decimal point from the mantyssa and write the digits
+			 * remove the decimal point from the mantissa and write the digits
 			 * to the buf array
 			 */
 			for (p = result + sign, i = 10, dp = 0; *p != 'e'; p++, i++)
diff --git a/contrib/sepgsql/selinux.c b/contrib/sepgsql/selinux.c
index 3e2cfab81980c89e752ad5d7b8cf82c9f10d70b6..7728a183338576295aaf5698bf59519e4b65fe97 100644
--- a/contrib/sepgsql/selinux.c
+++ b/contrib/sepgsql/selinux.c
@@ -23,7 +23,7 @@
  * When we ask SELinux whether the required privileges are allowed or not,
  * we use security_compute_av(3). It needs us to represent object classes
  * and access vectors using 'external' codes defined in the security policy.
- * It is determinded in the runtime, not build time. So, it needs an internal
+ * It is determined in the runtime, not build time. So, it needs an internal
  * service to translate object class/access vectors which we want to check
  * into the code which kernel want to be given.
  */
diff --git a/contrib/sepgsql/sql/label.sql b/contrib/sepgsql/sql/label.sql
index 04085e57a4dcf306851767db7535b2936d97bff6..49780b2697b52cb1c595612892e6ccc54935ac38 100644
--- a/contrib/sepgsql/sql/label.sql
+++ b/contrib/sepgsql/sql/label.sql
@@ -206,7 +206,7 @@ SELECT * FROM auth_tbl;	-- failed
 SELECT sepgsql_setcon(NULL);	-- end of session
 SELECT sepgsql_getcon();
 
--- the pooler cannot touch these tables directry
+-- the pooler cannot touch these tables directly
 SELECT * FROM foo_tbl;	-- failed
 
 SELECT * FROM var_tbl;	-- failed
diff --git a/contrib/spi/refint.c b/contrib/spi/refint.c
index 78cfedf219f03965b7baf6b3dfc445fab2d3f1d7..208ff6103def4ec2e849183d0f4ef1d0033d96dc 100644
--- a/contrib/spi/refint.c
+++ b/contrib/spi/refint.c
@@ -89,7 +89,7 @@ check_primary_key(PG_FUNCTION_ARGS)
 		/* internal error */
 		elog(ERROR, "check_primary_key: cannot process DELETE events");
 
-	/* If UPDATion the must check new Tuple, not old one */
+	/* If UPDATE, then must check new Tuple, not old one */
 	else
 		tuple = trigdata->tg_newtuple;
 
diff --git a/contrib/start-scripts/osx/PostgreSQL b/contrib/start-scripts/osx/PostgreSQL
index 9735c8c57f38bf9bc3f33ed4544d47c3bd82f4a5..7ff1d0e377fe4b76376439a352996b30efdd8afd 100755
--- a/contrib/start-scripts/osx/PostgreSQL
+++ b/contrib/start-scripts/osx/PostgreSQL
@@ -29,7 +29,7 @@
 # modified by Ray Aspeitia 12-03-2003 :
 # added log rotation script to db startup
 # modified StartupParameters.plist "Provides" parameter to make it easier to
-# start and stop with the SystemStarter utitlity
+# start and stop with the SystemStarter utility
 
 # use the below command in order to correctly start/stop/restart PG with log rotation script:
 # SystemStarter [start|stop|restart] PostgreSQL
diff --git a/contrib/tsearch2/tsearch2--1.0.sql b/contrib/tsearch2/tsearch2--1.0.sql
index a32c5fe85b55601aba1b983083fd129eac857e30..68bb43fd7cdbadcc412b7235dcee9d20e7ed400b 100644
--- a/contrib/tsearch2/tsearch2--1.0.sql
+++ b/contrib/tsearch2/tsearch2--1.0.sql
@@ -414,7 +414,7 @@ CREATE FUNCTION stat(text,text)
 	LANGUAGE INTERNAL
 	RETURNS NULL ON NULL INPUT;
 
---reset - just for debuging
+--reset - just for debugging
 CREATE FUNCTION reset_tsearch()
         RETURNS void
         as 'MODULE_PATHNAME', 'tsa_reset_tsearch'
diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c
index ac28996867b36aec60ca77ba5ba477cad5c005c1..73b74c875e915c86eb36376833542fdb6eee2463 100644
--- a/contrib/xml2/xpath.c
+++ b/contrib/xml2/xpath.c
@@ -610,7 +610,7 @@ xpath_table(PG_FUNCTION_ARGS)
 
 	/*
 	 * At the moment we assume that the returned attributes make sense for the
-	 * XPath specififed (i.e. we trust the caller). It's not fatal if they get
+	 * XPath specified (i.e. we trust the caller). It's not fatal if they get
 	 * it wrong - the input function for the column type will raise an error
 	 * if the path result can't be converted into the correct binary
 	 * representation.
diff --git a/src/Makefile.shlib b/src/Makefile.shlib
index c293a34d1aa8403f776eefddc14b57d65a180824..35e2dd86904ee5d7dd5085fd546a2b9ee861447f 100644
--- a/src/Makefile.shlib
+++ b/src/Makefile.shlib
@@ -377,7 +377,7 @@ $(shlib): $(OBJS) $(DLL_DEFFILE) | $(SHLIB_PREREQS)
 	$(CC) $(CFLAGS)  -shared -static-libgcc -o $@  $(OBJS) $(DLL_DEFFILE) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) -Wl,--out-implib=$(stlib)
 endif
 
-endif # PORTNAME == cgywin
+endif # PORTNAME == cygwin
 endif # PORTNAME == cygwin || PORTNAME == win32
 
 
diff --git a/src/backend/access/gist/README b/src/backend/access/gist/README
index dd4c9fa70a028a894c73eac1917c2cc9c3d4f97e..02228662b81248f26c72725997f159236416c499 100644
--- a/src/backend/access/gist/README
+++ b/src/backend/access/gist/README
@@ -28,7 +28,7 @@ The current implementation of GiST supports:
 
 The support for concurrency implemented in PostgreSQL was developed based on
 the paper "Access Methods for Next-Generation Database Systems" by
-Marcel Kornaker:
+Marcel Kornacker:
 
     http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz
 
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 943079420773173fbd12ab80abc793612b7af780..69676eba95389675562753575b300b1071741fe4 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -1077,7 +1077,7 @@ _hash_splitbucket_guts(Relation rel,
  * already moved before the split operation was previously interrupted.
  *
  * The caller must hold a pin, but no lock, on the metapage and old bucket's
- * primay page buffer.  The buffers are returned in the same state.  (The
+ * primary page buffer.  The buffers are returned in the same state.  (The
  * metapage is only touched if it becomes necessary to add or remove overflow
  * pages.)
  */
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index 90ab6f2421546b116147ad22102f41e8c126cdce..c7b283c1986f903e1abe0c169d4e36838590dc62 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -209,7 +209,7 @@ typedef struct RewriteMappingFile
 } RewriteMappingFile;
 
 /*
- * A single In-Memeory logical rewrite mapping, hanging of
+ * A single In-Memory logical rewrite mapping, hanging off
  * RewriteMappingFile->mappings.
  */
 typedef struct RewriteMappingDataEntry
diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c
index 18a5f5602c744df26dfbf560a329fb5269eb5d33..20f60bc02366106267a4e18d01324ff18f2aba0d 100644
--- a/src/backend/access/transam/commit_ts.c
+++ b/src/backend/access/transam/commit_ts.c
@@ -615,7 +615,7 @@ CommitTsParameterChange(bool newvalue, bool oldvalue)
 
 /*
  * Activate this module whenever necessary.
- *		This must happen during postmaster or standalong-backend startup,
+ *		This must happen during postmaster or standalone-backend startup,
  *		or during WAL replay anytime the track_commit_timestamp setting is
  *		changed in the master.
  *
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index f6f136da3ab3fff5a296f2f41d8c23487fb68ab9..82f9a3c5c6c0c632be4ee1602240ecf85730a983 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -2752,7 +2752,7 @@ CommitTransactionCommand(void)
 			 * These shouldn't happen.  TBLOCK_DEFAULT means the previous
 			 * StartTransactionCommand didn't set the STARTED state
 			 * appropriately, while TBLOCK_PARALLEL_INPROGRESS should be ended
-			 * by EndParallelWorkerTranaction(), not this function.
+			 * by EndParallelWorkerTransaction(), not this function.
 			 */
 		case TBLOCK_DEFAULT:
 		case TBLOCK_PARALLEL_INPROGRESS:
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index 2a38792ed6f3f1c7df695e23dbd29f19b0e32830..a3bb2f1c0ef1174369d3f8552ac0d073fc32027e 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -770,7 +770,7 @@ static void getRelationIdentity(StringInfo buffer, Oid relid, List **objname);
  *
  * Note: If the object is not found, we don't give any indication of the
  * reason.  (It might have been a missing schema if the name was qualified, or
- * an inexistant type name in case of a cast, function or operator; etc).
+ * a nonexistent type name in case of a cast, function or operator; etc).
  * Currently there is only one caller that might be interested in such info, so
  * we don't spend much effort here.  If more callers start to care, it might be
  * better to add some support for that in this function.
diff --git a/src/backend/commands/amcmds.c b/src/backend/commands/amcmds.c
index 225e6f636cadf6f1f5f030059e948a2ae125904d..7e0a9aa0fd9525ad638d03fe71a1c8b06582d1b8 100644
--- a/src/backend/commands/amcmds.c
+++ b/src/backend/commands/amcmds.c
@@ -34,7 +34,7 @@ static const char *get_am_type_string(char amtype);
 
 
 /*
- * CreateAcessMethod
+ * CreateAccessMethod
  *		Registers a new access method.
  */
 ObjectAddress
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 30000a1eeb8fe130b6a0b965fbea00567bb248a5..1ebacbc24fea893bb15b9daeffd9757115ee26ed 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -685,7 +685,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
 
 		/*
 		 * Force synchronous commit, thus minimizing the window between
-		 * creation of the database files and commital of the transaction. If
+		 * creation of the database files and committal of the transaction. If
 		 * we crash before committing, we'll have a DB that's taking up disk
 		 * space but is not in pg_database, which is not good.
 		 */
@@ -955,7 +955,7 @@ dropdb(const char *dbname, bool missing_ok)
 
 	/*
 	 * Force synchronous commit, thus minimizing the window between removal of
-	 * the database files and commital of the transaction. If we crash before
+	 * the database files and committal of the transaction. If we crash before
 	 * committing, we'll have a DB that's gone on disk but still there
 	 * according to pg_database, which is not good.
 	 */
@@ -1309,7 +1309,7 @@ movedb(const char *dbname, const char *tblspcname)
 
 		/*
 		 * Force synchronous commit, thus minimizing the window between
-		 * copying the database files and commital of the transaction. If we
+		 * copying the database files and committal of the transaction. If we
 		 * crash before committing, we'll leave an orphaned set of files on
 		 * disk, which is not fatal but not good either.
 		 */
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 0a67be031be8e9ef40d735cc702e8ccc83af3ff2..c9e0a3e42d20339143736833da92c117cd76aa16 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -3401,7 +3401,7 @@ ExplainYAMLLineStarting(ExplainState *es)
 }
 
 /*
- * YAML is a superset of JSON; unfortuantely, the YAML quoting rules are
+ * YAML is a superset of JSON; unfortunately, the YAML quoting rules are
  * ridiculously complicated -- as documented in sections 5.3 and 7.3.3 of
  * http://yaml.org/spec/1.2/spec.html -- so we chose to just quote everything.
  * Empty strings, strings with leading or trailing whitespace, and strings
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index dd83858b3ddb82d84c699546706f4778bc57a751..8b1285a54201a76aabef2f6166d2faa90c484374 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -1040,7 +1040,7 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt)
 	}
 	else
 	{
-		/* store SQL NULL instead of emtpy array */
+		/* store SQL NULL instead of empty array */
 		trftypes = NULL;
 	}
 
@@ -1441,7 +1441,7 @@ CreateCast(CreateCastStmt *stmt)
 				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
 				 errmsg("cast will be ignored because the target data type is a domain")));
 
-	/* Detemine the cast method */
+	/* Determine the cast method */
 	if (stmt->func != NULL)
 		castmethod = COERCION_METHOD_FUNCTION;
 	else if (stmt->inout)
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index ed6136c153575267d09e73eff24aa78a31489ed6..f4814c095b59884191d3f7e4ad0516bbd35b5241 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -99,7 +99,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
  * Errors arising from the attribute list still apply.
  *
  * Most column type changes that can skip a table rewrite do not invalidate
- * indexes.  We ackowledge this when all operator classes, collations and
+ * indexes.  We acknowledge this when all operator classes, collations and
  * exclusion operators match.  Though we could further permit intra-opfamily
  * changes for btree and hash indexes, that adds subtle complexity with no
  * concrete benefit for core types.
@@ -965,7 +965,7 @@ CheckMutability(Expr *expr)
  * indxpath.c could do something with.  However, that seems overly
  * restrictive.  One useful application of partial indexes is to apply
  * a UNIQUE constraint across a subset of a table, and in that scenario
- * any evaluatable predicate will work.  So accept any predicate here
+ * any evaluable predicate will work.  So accept any predicate here
  * (except ones requiring a plan), and let indxpath.c fend for itself.
  */
 static void
diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c
index 3fe1d15052d8ae2ddcc1f402127d5feb59f99f18..04f83e0a2ea7ad87e1d45111ca8092c9deb22a89 100644
--- a/src/backend/commands/publicationcmds.c
+++ b/src/backend/commands/publicationcmds.c
@@ -525,7 +525,7 @@ OpenTableList(List *tables)
 		myrelid = RelationGetRelid(rel);
 		/*
 		 * filter out duplicates when user specifies "foo, foo"
-		 * Note that this algrithm is know to not be very effective (O(N^2))
+		 * Note that this algorithm is know to not be very effective (O(N^2))
 		 * but given that it only works on list of tables given to us by user
 		 * it's deemed acceptable.
 		 */
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index 4353e14e1bdf2807b7a7ddd0c93bb2595d8c7a41..ab21e64b48875455ff409ed588ddf207f0bdf1fb 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -474,7 +474,7 @@ DropSubscription(DropSubscriptionStmt *stmt)
 	InvokeObjectDropHook(SubscriptionRelationId, subid, 0);
 
 	/*
-	 * Lock the subscription so noboby else can do anything with it
+	 * Lock the subscription so nobody else can do anything with it
 	 * (including the replication workers).
 	 */
 	LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 878b48d39ef0b6fa0b91840dbe5e53ef1aa6b367..37a4c4a3d6abcd5638c5c09a6e31e15473a11f69 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -6630,7 +6630,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
 
 	/*
 	 * Check if ONLY was specified with ALTER TABLE.  If so, allow the
-	 * contraint creation only if there are no children currently.  Error out
+	 * constraint creation only if there are no children currently.  Error out
 	 * otherwise.
 	 */
 	if (!recurse && children != NIL)
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index ce6600bde7006ee0d35b1499953ad10f533756bf..a66639178a5b5f4d9aa86de423a9bbf256e93415 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1261,7 +1261,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
 	resultRelInfo->ri_projectReturning = NULL;
 
 	/*
-	 * If partition_root has been specified, that means we are builiding the
+	 * If partition_root has been specified, that means we are building the
 	 * ResultRelationInfo for one of its leaf partitions.  In that case, we
 	 * need *not* initialize the leaf partition's constraint, but rather the
 	 * the partition_root's (if any).  We must do that explicitly like this,
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index e01fe6da96492f46d034c9522f88062646437d1c..fe87c9ae71deef2da21233653b778a712ed4c3ec 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -533,7 +533,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
 	int			plan_node_id = planstate->plan->plan_node_id;
 	MemoryContext oldcontext;
 
-	/* Find the instumentation for this node. */
+	/* Find the instrumentation for this node. */
 	for (i = 0; i < instrumentation->num_plan_nodes; ++i)
 		if (instrumentation->plan_node_id[i] == plan_node_id)
 			break;
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index a8bd5832c92058af5ce713ba40a9d3b1d891539e..ebf3f6b3c9b6df736ba5cb8092e41b4980bb6644 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -391,7 +391,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
 		if (rel->rd_att->constr)
 			ExecConstraints(resultRelInfo, slot, slot, estate);
 
-		/* Store the slot into tuple that we can insett. */
+		/* Store the slot into tuple that we can inspect. */
 		tuple = ExecMaterializeSlot(slot);
 
 		/* OK, store the tuple and create index entries for it */
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 5c6079af808487cf41637f617662c5cd9ccad118..aa081523506c7b4cb67af626a342c85908c7ab2d 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -304,7 +304,7 @@ typedef struct AggStatePerTransData
 	/*
 	 * Slots for holding the evaluated input arguments.  These are set up
 	 * during ExecInitAgg() and then used for each input row requiring
-	 * procesessing besides what's done in AggState->evalproj.
+	 * processing besides what's done in AggState->evalproj.
 	 */
 	TupleTableSlot *sortslot;	/* current input tuple */
 	TupleTableSlot *uniqslot;	/* used for multi-column DISTINCT */
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index 6ac6b83cddcb31ecd289d66a33e8937495c8050d..2a123e8452688c09afda0901a7be0e222a17e131 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -354,7 +354,7 @@ advance_windowaggregate(WindowAggState *winstate,
 
 	/*
 	 * We must track the number of rows included in transValue, since to
-	 * remove the last input, advance_windowaggregate_base() musn't call the
+	 * remove the last input, advance_windowaggregate_base() mustn't call the
 	 * inverse transition function, but simply reset transValue back to its
 	 * initial value.
 	 */
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index 95ded23791c7dfb90305e3528452ce63c4bd19c3..be63a4bc637f245686bf4450aa427cce309b6e13 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -109,7 +109,7 @@ static MemoryContext parsed_hba_context = NULL;
  *
  * NOTE: the IdentLine structs can contain pre-compiled regular expressions
  * that live outside the memory context. Before destroying or resetting the
- * memory context, they need to be expliticly free'd.
+ * memory context, they need to be explicitly free'd.
  */
 static List *parsed_ident_lines = NIL;
 static MemoryContext parsed_ident_context = NULL;
diff --git a/src/backend/optimizer/geqo/geqo_erx.c b/src/backend/optimizer/geqo/geqo_erx.c
index 1a43ab7288c1dcaa43dc341ae30acb5d6893496a..023abf70e21fe02ce8f95bf64f292fc3ee1b13ce 100644
--- a/src/backend/optimizer/geqo/geqo_erx.c
+++ b/src/backend/optimizer/geqo/geqo_erx.c
@@ -111,7 +111,7 @@ gimme_edge_table(PlannerInfo *root, Gene *tour1, Gene *tour2,
 	for (index1 = 0; index1 < num_gene; index1++)
 	{
 		/*
-		 * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton
+		 * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operation
 		 * maps n back to 1
 		 */
 
@@ -314,7 +314,7 @@ gimme_gene(PlannerInfo *root, Edge edge, Edge *edge_table)
 		/*
 		 * give priority to candidates with fewest remaining unused edges;
 		 * find out what the minimum number of unused edges is
-		 * (minimum_edges); if there is more than one cadidate with the
+		 * (minimum_edges); if there is more than one candidate with the
 		 * minimum number of unused edges keep count of this number
 		 * (minimum_count);
 		 */
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 7c30ec6fb9caee6d2abf26ab91c040067dbae4e1..28972458830bd49675159a48c8a341dff2d5c85e 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -1618,7 +1618,7 @@ select_mergejoin_clauses(PlannerInfo *root,
 		/*
 		 * Insist that each side have a non-redundant eclass.  This
 		 * restriction is needed because various bits of the planner expect
-		 * that each clause in a merge be associatable with some pathkey in a
+		 * that each clause in a merge be associable with some pathkey in a
 		 * canonical pathkey list, but redundant eclasses can't appear in
 		 * canonical sort orderings.  (XXX it might be worth relaxing this,
 		 * but not enough time to address it for 8.3.)
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index e8807591a0e22ab1d3bce0ec35778acd7c4d3c68..3c58d0596c69c9fdad079e9d75e2a1a32ad56468 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -195,7 +195,7 @@ query_planner(PlannerInfo *root, List *tlist,
 	/*
 	 * Now distribute "placeholders" to base rels as needed.  This has to be
 	 * done after join removal because removal could change whether a
-	 * placeholder is evaluatable at a base rel.
+	 * placeholder is evaluable at a base rel.
 	 */
 	add_placeholders_to_base_rels(root);
 
diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c
index 6d84477f8644fdbccdd9f10f709e2632178c9449..62629ee7d866e2b3a6e8565065399f22bbaa6938 100644
--- a/src/backend/optimizer/util/joininfo.c
+++ b/src/backend/optimizer/util/joininfo.c
@@ -24,7 +24,7 @@
  *		Detect whether there is a joinclause that involves
  *		the two given relations.
  *
- * Note: the joinclause does not have to be evaluatable with only these two
+ * Note: the joinclause does not have to be evaluable with only these two
  * relations.  This is intentional.  For example consider
  *		SELECT * FROM a, b, c WHERE a.x = (b.y + c.z)
  * If a is much larger than the other tables, it may be worthwhile to
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 8f10520f8132ef8b42d2569f0538e40154c034d2..045b5cf53920c7d8c3b7986437e8dc9e9eafef0e 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -515,7 +515,7 @@ join_clause_is_movable_into(RestrictInfo *rinfo,
 							Relids currentrelids,
 							Relids current_and_outer)
 {
-	/* Clause must be evaluatable given available context */
+	/* Clause must be evaluable given available context */
 	if (!bms_is_subset(rinfo->clause_relids, current_and_outer))
 		return false;
 
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index a4edea08a3e30fbc2378adcce21147a41cc77d3e..cf97be512d9170c6ee47d4418d14e45140847fb0 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -11312,7 +11312,7 @@ table_ref:	relation_expr opt_alias_clause
 					n->lateral = true;
 					n->subquery = $2;
 					n->alias = $3;
-					/* same coment as above */
+					/* same comment as above */
 					if ($3 == NULL)
 					{
 						if (IsA($2, SelectStmt) &&
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 0e4e7a8c803da1e9ca560ce86c38c09255f5d459..8d1939445b57b29077e080692164b29c034fd750 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -3050,7 +3050,7 @@ transformAttachPartition(CreateStmtContext *cxt, PartitionCmd *cmd)
 				 errmsg("\"%s\" is not partitioned",
 						RelationGetRelationName(parentRel))));
 
-	/* tranform the values */
+	/* transform the values */
 	Assert(RelationGetPartitionKey(parentRel) != NULL);
 	cxt->partbound = transformPartitionBound(cxt->pstate, parentRel,
 											 cmd->bound);
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index 40819824adb2aca1a2b19d6c3bc31bf56c624353..dcb4cf249c507a41bd54f79dcdfe714255066ba0 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -211,7 +211,7 @@ BackgroundWriterMain(void)
 		/* Flush any leaked data in the top-level context */
 		MemoryContextResetAndDeleteChildren(bgwriter_context);
 
-		/* re-initilialize to avoid repeated errors causing problems */
+		/* re-initialize to avoid repeated errors causing problems */
 		WritebackContextInit(&wb_context, &bgwriter_flush_after);
 
 		/* Now we can allow interrupts again */
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 91ccbe78c07a13c7f3c9508a820ff97f55c8c9cc..271c492000265447eb285dedd4b363ac5b0c1c0c 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -5156,7 +5156,7 @@ RandomCancelKey(int32 *cancel_key)
 }
 
 /*
- * Count up number of child processes of specified types (dead_end chidren
+ * Count up number of child processes of specified types (dead_end children
  * are always excluded).
  */
 static int
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index d222cff7085c072d8fa8bef5069a21e979fa0da2..e9ce061e83cab31498942863d37309f9fd1a2d6a 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -170,7 +170,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
 
 		/*
 		 * Worker started and attached to our shmem. This check is safe
-		 * because only laucher ever starts the workers, so nobody can steal
+		 * because only launcher ever starts the workers, so nobody can steal
 		 * the worker slot.
 		 */
 		if (status == BGWH_STARTED && worker->proc)
@@ -180,7 +180,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
 			return false;
 
 		/*
-		 * We need timeout because we generaly don't get notified via latch
+		 * We need timeout because we generally don't get notified via latch
 		 * about the worker attach.
 		 */
 		rc = WaitLatch(MyLatch,
@@ -533,7 +533,7 @@ AtCommit_ApplyLauncher(void)
 /*
  * Request wakeup of the launcher on commit of the transaction.
  *
- * This is used to send launcher signal to stop sleeping and proccess the
+ * This is used to send launcher signal to stop sleeping and process the
  * subscriptions when current transaction commits. Should be used when new
  * tuple was added to the pg_subscription catalog.
 */
@@ -638,7 +638,7 @@ ApplyLauncherMain(Datum main_arg)
 		else
 		{
 			/*
-			 * The wait in previous cycle was interruped in less than
+			 * The wait in previous cycle was interrupted in less than
 			 * wal_retrieve_retry_interval since last worker was started,
 			 * this usually means crash of the worker, so we should retry
 			 * in wal_retrieve_retry_interval again.
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index ade80d407fea9074d693de85d8f1ad26e281eb60..bf84c68a0cdafb8a8cd3cebfa711f7b69e55d066 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -1250,7 +1250,7 @@ pg_replication_origin_session_is_setup(PG_FUNCTION_ARGS)
  * Return the replication progress for origin setup in the current session.
  *
  * If 'flush' is set to true it is ensured that the returned value corresponds
- * to a local transaction that has been flushed. this is useful if asychronous
+ * to a local transaction that has been flushed. this is useful if asynchronous
  * commits are used when replaying replicated transactions.
  */
 Datum
@@ -1336,7 +1336,7 @@ pg_replication_origin_advance(PG_FUNCTION_ARGS)
  * Return the replication progress for an individual replication origin.
  *
  * If 'flush' is set to true it is ensured that the returned value corresponds
- * to a local transaction that has been flushed. this is useful if asychronous
+ * to a local transaction that has been flushed. this is useful if asynchronous
  * commits are used when replaying replicated transactions.
  */
 Datum
diff --git a/src/backend/replication/logical/proto.c b/src/backend/replication/logical/proto.c
index 1f30de606ae5c7ac8824b17693e7bc79fb3ffeee..142cd993cd858519e657030cee72c4f643bb4ab0 100644
--- a/src/backend/replication/logical/proto.c
+++ b/src/backend/replication/logical/proto.c
@@ -539,7 +539,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel)
 		if (att->attisdropped)
 			continue;
 
-		/* REPLICA IDENTITY FULL means all colums are sent as part of key. */
+		/* REPLICA IDENTITY FULL means all columns are sent as part of key. */
 		if (replidentfull ||
 			bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
 						  idattrs))
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index d805ef4fb7e6d7a266c07cee645c86f9e40a5653..7dc97fa79674f0b2909b30a27b4fa1c3595ca013 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -1714,7 +1714,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
  *
  * NB: Transactions handled here have to have actively aborted (i.e. have
  * produced an abort record). Implicitly aborted transactions are handled via
- * ReorderBufferAbortOld(); transactions we're just not interesteded in, but
+ * ReorderBufferAbortOld(); transactions we're just not interested in, but
  * which have committed are handled in ReorderBufferForget().
  *
  * This function purges this transaction and its contents from memory and
@@ -1782,7 +1782,7 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid)
  * toplevel xid.
  *
  * This is significantly different to ReorderBufferAbort() because
- * transactions that have committed need to be treated differenly from aborted
+ * transactions that have committed need to be treated differently from aborted
  * ones since they may have modified the catalog.
  *
  * Note that this is only allowed to be called in the moment a transaction
@@ -2660,7 +2660,7 @@ StartupReorderBuffer(void)
 
 		/*
 		 * ok, has to be a surviving logical slot, iterate and delete
-		 * everythign starting with xid-*
+		 * everything starting with xid-*
 		 */
 		sprintf(path, "pg_replslot/%s", logical_de->d_name);
 
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 1e02aa9bd860c945fbf0d6781e2559e847a17aa3..62020b6ed03329d8ca404126ee31cc22a586cdb2 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -614,7 +614,7 @@ SnapBuildGetOrBuildSnapshot(SnapBuild *builder, TransactionId xid)
 	if (builder->snapshot == NULL)
 	{
 		builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
-		/* inrease refcount for the snapshot builder */
+		/* increase refcount for the snapshot builder */
 		SnapBuildSnapIncRefcount(builder->snapshot);
 	}
 
@@ -678,7 +678,7 @@ SnapBuildProcessChange(SnapBuild *builder, TransactionId xid, XLogRecPtr lsn)
 		if (builder->snapshot == NULL)
 		{
 			builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
-			/* inrease refcount for the snapshot builder */
+			/* increase refcount for the snapshot builder */
 			SnapBuildSnapIncRefcount(builder->snapshot);
 		}
 
@@ -911,7 +911,7 @@ SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid)
 		{
 			/*
 			 * None of the originally running transaction is running anymore,
-			 * so our incrementaly built snapshot now is consistent.
+			 * so our incrementally built snapshot now is consistent.
 			 */
 			ereport(LOG,
 				  (errmsg("logical decoding found consistent point at %X/%X",
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 9383960da75eeda735b053d883b8e708b3ed4fa0..0b19feca401fb3137b86db0a5302932fe8dc17dd 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -327,7 +327,7 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel,
 /*
  * Modify slot with user data provided as C strigs.
  * This is somewhat similar to heap_modify_tuple but also calls the type
- * input fuction on the user data as the input is the text representation
+ * input function on the user data as the input is the text representation
  * of the types.
  */
 static void
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 08c30af88a1d34d4d0ed6428926892b5ee4218a1..0ceb4be375e1cab604dda746944b765d88f0c643 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -172,7 +172,7 @@ pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt,
 								&data->protocol_version,
 								&data->publication_names);
 
-		/* Check if we support requested protol */
+		/* Check if we support requested protocol */
 		if (data->protocol_version != LOGICALREP_PROTO_VERSION_NUM)
 			ereport(ERROR,
 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -424,7 +424,7 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue)
 /*
  * Initialize the relation schema sync cache for a decoding session.
  *
- * The hash table is destoyed at the end of a decoding session. While
+ * The hash table is destroyed at the end of a decoding session. While
  * relcache invalidations still exist and will still be invoked, they
  * will just see the null hash table global and take no action.
  */
@@ -540,7 +540,7 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
 
 	/*
 	 * We can get here if the plugin was used in SQL interface as the
-	 * RelSchemaSyncCache is detroyed when the decoding finishes, but there
+	 * RelSchemaSyncCache is destroyed when the decoding finishes, but there
 	 * is no way to unregister the relcache invalidation callback.
 	 */
 	if (RelationSyncCache == NULL)
@@ -580,7 +580,7 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue)
 
 	/*
 	 * We can get here if the plugin was used in SQL interface as the
-	 * RelSchemaSyncCache is detroyed when the decoding finishes, but there
+	 * RelSchemaSyncCache is destroyed when the decoding finishes, but there
 	 * is no way to unregister the relcache invalidation callback.
 	 */
 	if (RelationSyncCache == NULL)
diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c
index d45a41d8633759da6ae7bf75b809f71c6f167acd..0079ba567f62b588cb83987eccf8462324911993 100644
--- a/src/backend/storage/ipc/latch.c
+++ b/src/backend/storage/ipc/latch.c
@@ -860,7 +860,7 @@ WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
  * reached.  At most nevents occurred events are returned.
  *
  * If timeout = -1, block until an event occurs; if 0, check sockets for
- * readiness, but don't block; if > 0, block for at most timeout miliseconds.
+ * readiness, but don't block; if > 0, block for at most timeout milliseconds.
  *
  * Returns the number of events occurred, or 0 if the timeout was reached.
  *
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
index 32b4d3d5d4f000dd95f04950c391a6f0f6aba591..f5bf807cd63e308ea469841f52465935bc2ec4b3 100644
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -501,7 +501,7 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
  * it will point to a temporary buffer.  This mostly avoids data copying in
  * the hoped-for case where messages are short compared to the buffer size,
  * while still allowing longer messages.  In either case, the return value
- * remains valid until the next receive operation is perfomed on the queue.
+ * remains valid until the next receive operation is performed on the queue.
  *
  * When nowait = false, we'll wait on our process latch when the ring buffer
  * is empty and we have not yet received a full message.  The sender will
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 6532240dd199219df6a8c1404f9baedbc74af403..62590707229884a39347fe846fd30b61e782bb95 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -967,7 +967,7 @@ LogStandbySnapshot(void)
  * similar. We keep them separate because xl_xact_running_xacts is a
  * contiguous chunk of memory and never exists fully until it is assembled in
  * WAL. The inserted records are marked as not being important for durability,
- * to avoid triggering superflous checkpoint / archiving activity.
+ * to avoid triggering superfluous checkpoint / archiving activity.
  */
 static XLogRecPtr
 LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index e9703f1866e2562bf0ed9016b0e54f28850dbabe..ad64a79fa1daebd566fbcffc73b467bff1a38dbd 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -2778,7 +2778,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
 		vxids = (VirtualTransactionId *)
 			palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
 
-	/* Compute hash code and partiton lock, and look up conflicting modes. */
+	/* Compute hash code and partition lock, and look up conflicting modes. */
 	hashcode = LockTagHashCode(locktag);
 	partitionLock = LockHashPartitionLock(hashcode);
 	conflictMask = lockMethodTable->conflictTab[lockmode];
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index c196bb8205350611524887c7f00daf405ee88813..ab81d94b51043d7ce263717da644a464ff9fe817 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -781,7 +781,7 @@ LWLockAttemptLock(LWLock *lock, LWLockMode mode)
 				return false;
 			}
 			else
-				return true;	/* someobdy else has the lock */
+				return true;	/* somebody else has the lock */
 		}
 	}
 	pg_unreachable();
@@ -953,7 +953,7 @@ LWLockWakeup(LWLock *lock)
 		 * that happens before the list unlink happens, the list would end up
 		 * being corrupted.
 		 *
-		 * The barrier pairs with the LWLockWaitListLock() when enqueueing for
+		 * The barrier pairs with the LWLockWaitListLock() when enqueuing for
 		 * another lock.
 		 */
 		pg_write_barrier();
@@ -1029,7 +1029,7 @@ LWLockDequeueSelf(LWLock *lock)
 
 	/*
 	 * Can't just remove ourselves from the list, but we need to iterate over
-	 * all entries as somebody else could have unqueued us.
+	 * all entries as somebody else could have dequeued us.
 	 */
 	proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
 	{
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 9183764ca76d7364e0d3150f7be34e623693d188..7aa719d6123554dcc7393bb1230a0107989b2244 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -3193,7 +3193,7 @@ ReleasePredicateLocks(bool isCommit)
 	/*
 	 * We can't trust XactReadOnly here, because a transaction which started
 	 * as READ WRITE can show as READ ONLY later, e.g., within
-	 * substransactions.  We want to flag a transaction as READ ONLY if it
+	 * subtransactions.  We want to flag a transaction as READ ONLY if it
 	 * commits without writing so that de facto READ ONLY transactions get the
 	 * benefit of some RO optimizations, so we will use this local variable to
 	 * get some cleanup logic right which is based on whether the transaction
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 1d9384ef91e1785a63d0d70eb0f49f58e37d79c2..6c17b54f0d942cbdaa45ae4550ae9d4995811a26 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -1728,7 +1728,7 @@ _fdvec_resize(SMgrRelation reln,
 	else
 	{
 		/*
-		 * It doesn't seem worthwile complicating the code by having a more
+		 * It doesn't seem worthwhile complicating the code by having a more
 		 * aggressive growth strategy here; the number of segments doesn't
 		 * grow that fast, and the memory context internally will sometimes
 		 * avoid doing an actual reallocation.
diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c
index 1c1b04c49e817f16a92d69d6e8be87ccb3ae7481..c1e194a8f571030b70a81eef1fcf25c20be2954b 100644
--- a/src/backend/tsearch/spell.c
+++ b/src/backend/tsearch/spell.c
@@ -37,7 +37,7 @@
  *	  Spell field. The AffixData field is initialized if AF parameter is not
  *	  defined.
  *	- NISortAffixes():
- *	  - builds a list of compond affixes from the affix list and stores it
+ *	  - builds a list of compound affixes from the affix list and stores it
  *		in the CompoundAffix.
  *	  - builds prefix trees (Trie) from the affix list for prefixes and suffixes
  *		and stores them in Suffix and Prefix fields.
diff --git a/src/backend/tsearch/ts_parse.c b/src/backend/tsearch/ts_parse.c
index e0c9ffb7f4c91a4a01f5b9200e3fe851630abdc2..b612fb0e2cb4ea23611af9f74f9e7d7fa6459648 100644
--- a/src/backend/tsearch/ts_parse.c
+++ b/src/backend/tsearch/ts_parse.c
@@ -179,7 +179,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
 	if (ld->curDictId == InvalidOid)
 	{
 		/*
-		 * usial mode: dictionary wants only one word, but we should keep in
+		 * usual mode: dictionary wants only one word, but we should keep in
 		 * mind that we should go through all stack
 		 */
 
@@ -272,7 +272,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
 
 				/*
 				 * We should be sure that current type of lexeme is recognized
-				 * by our dictinonary: we just check is it exist in list of
+				 * by our dictionary: we just check is it exist in list of
 				 * dictionaries ?
 				 */
 				for (i = 0; i < map->len && !dictExists; i++)
@@ -627,7 +627,7 @@ generateHeadline(HeadlineParsedText *prs)
 				/* start of a new fragment */
 				infrag = 1;
 				numfragments++;
-				/* add a fragment delimitor if this is after the first one */
+				/* add a fragment delimiter if this is after the first one */
 				if (numfragments > 1)
 				{
 					memcpy(ptr, prs->fragdelim, prs->fragdelimlen);
diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c
index 6586760f15fa6fe7ba6e9946983fa036ab8cb6ca..bb7edc1516bb5bdd0e42d3ea2b61bd4d412dc8f7 100644
--- a/src/backend/tsearch/wparser_def.c
+++ b/src/backend/tsearch/wparser_def.c
@@ -2445,7 +2445,7 @@ mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight,
 						break;
 				}
 				if (curlen < min_words && i >= prs->curwords)
-				{				/* got end of text and our cover is shoter
+				{				/* got end of text and our cover is shorter
 								 * than min_words */
 					for (i = p - 1; i >= 0; i--)
 					{
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 16a7954ec42ce8b1ea0a3a603c158dc2d1ddebb7..4f3d8a118949f0a84510bfc32f417cd8cefeac24 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -2265,7 +2265,7 @@ seq_search(char *name, const char *const * array, int type, int max, int *len)
 
 	for (last = 0, a = array; *a != NULL; a++)
 	{
-		/* comperate first chars */
+		/* compare first chars */
 		if (*name != **a)
 			continue;
 
diff --git a/src/backend/utils/adt/rangetypes_selfuncs.c b/src/backend/utils/adt/rangetypes_selfuncs.c
index cf6ecc4cf3ae4fa4f15ac77dd13c9f0fba562bff..2997edd67204f65e1718e9816f9ca24ac2f7e7e5 100644
--- a/src/backend/utils/adt/rangetypes_selfuncs.c
+++ b/src/backend/utils/adt/rangetypes_selfuncs.c
@@ -533,7 +533,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
 			{
 				/*
 				 * Lower bound no longer matters. Just estimate the fraction
-				 * with an upper bound <= const uppert bound
+				 * with an upper bound <= const upper bound
 				 */
 				hist_selec =
 					calc_hist_selectivity_scalar(typcache, &const_upper,
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index f26175ec44f586ac56afe7550e5d2f9b314512bc..f355954b536dfe1334091a29bfe666f8003d2368 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -2687,7 +2687,7 @@ is_input_argument(int nth, const char *argmodes)
 }
 
 /*
- * Append used transformated types to specified buffer
+ * Append used transformed types to specified buffer
  */
 static void
 print_function_trftypes(StringInfo buf, HeapTuple proctup)
diff --git a/src/backend/utils/adt/tsrank.c b/src/backend/utils/adt/tsrank.c
index 9b2cd6df41659712df9750f1bee8a271eb0601de..76e5e541b63ad4698c23c6fbb9dec9edfea886e2 100644
--- a/src/backend/utils/adt/tsrank.c
+++ b/src/backend/utils/adt/tsrank.c
@@ -899,7 +899,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method)
 
 		/*
 		 * if doc are big enough then ext.q may be equal to ext.p due to limit
-		 * of posional information. In this case we approximate number of
+		 * of positional information. In this case we approximate number of
 		 * noise word as half cover's length
 		 */
 		nNoise = (ext.q - ext.p) - (ext.end - ext.begin);
@@ -908,7 +908,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method)
 		Wdoc += Cpos / ((double) (1 + nNoise));
 
 		CurExtPos = ((double) (ext.q + ext.p)) / 2.0;
-		if (NExtent > 0 && CurExtPos > PrevExtPos		/* prevent devision by
+		if (NExtent > 0 && CurExtPos > PrevExtPos		/* prevent division by
 														 * zero in a case of
 				multiple lexize */ )
 			SumDist += 1.0 / (CurExtPos - PrevExtPos);
diff --git a/src/backend/utils/adt/windowfuncs.c b/src/backend/utils/adt/windowfuncs.c
index 4e714cd5bffc74f1b7f901ae21b880e019c2bd14..d86ad703dac444d739792be5e5db25aa90bfefb7 100644
--- a/src/backend/utils/adt/windowfuncs.c
+++ b/src/backend/utils/adt/windowfuncs.c
@@ -342,7 +342,7 @@ window_lag(PG_FUNCTION_ARGS)
 
 /*
  * lag_with_offset
- * returns the value of VE evelulated on a row that is OFFSET
+ * returns the value of VE evaluated on a row that is OFFSET
  * rows before the current row within a partition,
  * per spec.
  */
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 8a7c560e46c0ff7544baede33ea727206bf70f67..4dd2e2b2c64b378a21a1ce9fcb45bdc2cd0d0014 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -1433,7 +1433,7 @@ RelationInitPhysicalAddr(Relation relation)
 		 * points to the current file since the older file will be gone (or
 		 * truncated). The new file will still contain older rows so lookups
 		 * in them will work correctly. This wouldn't work correctly if
-		 * rewrites were allowed to change the schema in a noncompatible way,
+		 * rewrites were allowed to change the schema in an incompatible way,
 		 * but those are prevented both on catalog tables and on user tables
 		 * declared as additional catalog tables.
 		 */
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index c55da54878ddb4883dbc25f3a715cfd39b55b70f..af08f102fe5d203aeab8e65a0b1c6012a5341ac7 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -879,7 +879,7 @@ get_func_arg_info(HeapTuple procTup,
 /*
  * get_func_trftypes
  *
- * Returns a number of transformated types used by function.
+ * Returns the number of transformed types used by function.
  */
 int
 get_func_trftypes(HeapTuple procTup,
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 4d0a2a7bed3eccd5d033e4fca627f3d9e715f545..9f938f2d2707a1af74a64739e14bc8ef6e05c170 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -1108,7 +1108,7 @@ process_settings(Oid databaseid, Oid roleid)
 
 	relsetting = heap_open(DbRoleSettingRelationId, AccessShareLock);
 
-	/* read all the settings under the same snapsot for efficiency */
+	/* read all the settings under the same snapshot for efficiency */
 	snapshot = RegisterSnapshot(GetCatalogSnapshot(DbRoleSettingRelationId));
 
 	/* Later settings are ignored if set earlier. */
diff --git a/src/backend/utils/misc/Makefile b/src/backend/utils/misc/Makefile
index 0ad1b8b59540f1e006abf30be27cd854e2da39d8..45cdf76ec2c032bfdc953aec9e8e5f50d4c90280 100644
--- a/src/backend/utils/misc/Makefile
+++ b/src/backend/utils/misc/Makefile
@@ -19,7 +19,7 @@ OBJS = backend_random.o guc.o help_config.o pg_config.o pg_controldata.o \
        tzparser.o
 
 # This location might depend on the installation directories. Therefore
-# we can't subsitute it into pg_config.h.
+# we can't substitute it into pg_config.h.
 ifdef krb_srvtab
 override CPPFLAGS += -DPG_KRB_SRVTAB='"$(krb_srvtab)"'
 endif
diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c
index 230756e0cd561ad48bb63fb9fa929ed23f0a4adb..2cd758178c06fa1924dcb4d7ff436bc032241f7e 100644
--- a/src/backend/utils/mmgr/freepage.c
+++ b/src/backend/utils/mmgr/freepage.c
@@ -318,7 +318,7 @@ sum_free_pages(FreePageManager *fpm)
 
 /*
  * Compute the size of the largest run of pages that the user could
- * succesfully get.
+ * successfully get.
  */
 static Size
 FreePageManagerLargestContiguous(FreePageManager *fpm)
@@ -360,7 +360,7 @@ FreePageManagerLargestContiguous(FreePageManager *fpm)
 
 /*
  * Recompute the size of the largest run of pages that the user could
- * succesfully get, if it has been marked dirty.
+ * successfully get, if it has been marked dirty.
  */
 static void
 FreePageManagerUpdateLargest(FreePageManager *fpm)
@@ -1704,7 +1704,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
 			 * The act of allocating pages for use in constructing our btree
 			 * should never cause any page to become more full, so the new
 			 * split depth should be no greater than the old one, and perhaps
-			 * less if we fortutiously allocated a chunk that freed up a slot
+			 * less if we fortuitously allocated a chunk that freed up a slot
 			 * on the page we need to update.
 			 */
 			Assert(result.split_pages <= fpm->btree_recycle_count);
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 053a6d1c700140d43031aa11697d3a4e793cbfcd..703bdcedaf406a727f11d0e33acddad1f6674581 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -1625,7 +1625,7 @@ HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
 }
 
 /*
- * check whether the transaciont id 'xid' is in the pre-sorted array 'xip'.
+ * check whether the transaction id 'xid' is in the pre-sorted array 'xip'.
  */
 static bool
 TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c
index 5388c08b29d1754e4494d448b800f93bde60aad4..5737608f9e13ab4f812995525f47b04ea9ce66c4 100644
--- a/src/bin/pg_dump/pg_backup_custom.c
+++ b/src/bin/pg_dump/pg_backup_custom.c
@@ -198,7 +198,7 @@ InitArchiveFmt_Custom(ArchiveHandle *AH)
  *
  * Optional.
  *
- * Set up extrac format-related TOC data.
+ * Set up extract format-related TOC data.
 */
 static void
 _ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 35ac05e851f01a35adb22bec1488fcc5c3ed78e8..4d22802912b4813207dcf95f5174fa144c30aad7 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -3500,7 +3500,7 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables)
 
 		resetPQExpBuffer(query);
 
-		/* Get the publication memebership for the table. */
+		/* Get the publication membership for the table. */
 		appendPQExpBuffer(query,
 						  "SELECT pr.tableoid, pr.oid, p.pubname "
 						  "FROM pg_catalog.pg_publication_rel pr,"
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 6e3acdc416eb2534b98dc2914241da72940d0eb0..5349c394115db2f5bd73f83bdf2d493eecdd0e0d 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -828,7 +828,7 @@ StoreQueryTuple(const PGresult *result)
 			char	   *varname;
 			char	   *value;
 
-			/* concate prefix and column name */
+			/* concatenate prefix and column name */
 			varname = psprintf("%s%s", pset.gset_prefix, colname);
 
 			if (!PQgetisnull(result, 0, i))
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index c501168d8c7538adb1aa6d37845340b4948fdc35..e2e4cbcc08a2abd77285ca126374109636652d7c 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -2127,7 +2127,7 @@ describeOneTableDetails(const char *schemaname,
 				printTableAddFooter(&cont, _("Check constraints:"));
 				for (i = 0; i < tuples; i++)
 				{
-					/* untranslated contraint name and def */
+					/* untranslated constraint name and def */
 					printfPQExpBuffer(&buf, "    \"%s\" %s",
 									  PQgetvalue(result, i, 0),
 									  PQgetvalue(result, i, 1));
@@ -3197,7 +3197,7 @@ listTables(const char *tabtypes, const char *pattern, bool verbose, bool showSys
 	if (verbose)
 	{
 		/*
-		 * As of PostgreSQL 9.0, use pg_table_size() to show a more acurate
+		 * As of PostgreSQL 9.0, use pg_table_size() to show a more accurate
 		 * size of a table, including FSM, VM and TOAST tables.
 		 */
 		if (pset.sversion >= 90000)
@@ -5108,7 +5108,7 @@ describeSubscriptions(const char *pattern, bool verbose)
 						  gettext_noop("Conninfo"));
 	}
 
-	/* Only display subscritpions in current database. */
+	/* Only display subscriptions in current database. */
 	appendPQExpBufferStr(&buf,
 						 "FROM pg_catalog.pg_subscription\n"
 						 "WHERE subdbid = (SELECT oid\n"
diff --git a/src/include/access/visibilitymap.h b/src/include/access/visibilitymap.h
index 7a237d75691d8d3cb99c3d7748027b6142df875f..a3796f290289852ec8e147e921cde9adb4f0227a 100644
--- a/src/include/access/visibilitymap.h
+++ b/src/include/access/visibilitymap.h
@@ -26,7 +26,7 @@
 #define VISIBILITYMAP_ALL_VISIBLE	0x01
 #define VISIBILITYMAP_ALL_FROZEN	0x02
 #define VISIBILITYMAP_VALID_BITS	0x03		/* OR of all valid
-												 * visiblitymap flags bits */
+												 * visibilitymap flags bits */
 
 /* Macros for visibilitymap test */
 #define VM_ALL_VISIBLE(r, b, v) \
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
index 4df6529ea0064f82d363b129863acf19185c3446..e7d11913d137d7772d9380ed29e743a7382f1d0a 100644
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -65,7 +65,7 @@ typedef enum
 										 * apply */
 }	SyncCommitLevel;
 
-/* Define the default setting for synchonous_commit */
+/* Define the default setting for synchronous_commit */
 #define SYNCHRONOUS_COMMIT_ON	SYNCHRONOUS_COMMIT_REMOTE_FLUSH
 
 /* Synchronous commit level */
diff --git a/src/include/c.h b/src/include/c.h
index a2c043adfbfe314a3b7d59b3645e1da9fbd99d58..91e5baa969229116b706c33530986b63943cf9a4 100644
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -989,7 +989,7 @@ typedef NameData *Name;
 /* gettext domain name mangling */
 
 /*
- * To better support parallel installations of major PostgeSQL
+ * To better support parallel installations of major PostgreSQL
  * versions as well as parallel installations of major library soname
  * versions, we mangle the gettext domain name by appending those
  * version numbers.  The coding rule ought to be that wherever the
diff --git a/src/include/catalog/partition.h b/src/include/catalog/partition.h
index 93cb1686bd632dc5f5171b5a55b42e3c837fa3e6..b195d1a5ab48fd284f87f192b6490c484a531479 100644
--- a/src/include/catalog/partition.h
+++ b/src/include/catalog/partition.h
@@ -41,7 +41,7 @@ typedef struct PartitionDescData *PartitionDesc;
 
 /*-----------------------
  * PartitionDispatch - information about one partitioned table in a partition
- * hiearchy required to route a tuple to one of its partitions
+ * hierarchy required to route a tuple to one of its partitions
  *
  *	reldesc		Relation descriptor of the table
  *	key			Partition key information of the table
diff --git a/src/include/catalog/pg_subscription.h b/src/include/catalog/pg_subscription.h
index cf30bf90db6ad62a2fd70e040c1bb089cf6e9251..75b618accd664fca33b2410107d7168597f03469 100644
--- a/src/include/catalog/pg_subscription.h
+++ b/src/include/catalog/pg_subscription.h
@@ -23,7 +23,7 @@
 #define SubscriptionRelation_Rowtype_Id	6101
 
 /*
- * Technicaly, the subscriptions live inside the database, so a shared catalog
+ * Technically, the subscriptions live inside the database, so a shared catalog
  * seems weird, but the replication launcher process needs to access all of
  * them to be able to start the workers, so we have to put them in a shared,
  * nailed catalog.
@@ -35,7 +35,7 @@ CATALOG(pg_subscription,6100) BKI_SHARED_RELATION BKI_ROWTYPE_OID(6101) BKI_SCHE
 
 	Oid			subowner;		/* Owner of the subscription */
 
-	bool		subenabled;		/* True if the subsription is enabled
+	bool		subenabled;		/* True if the subscription is enabled
 								 * (the worker should be running) */
 
 #ifdef CATALOG_VARLEN			/* variable-length fields start here */
@@ -65,7 +65,7 @@ typedef FormData_pg_subscription *Form_pg_subscription;
 typedef struct Subscription
 {
 	Oid		oid;			/* Oid of the subscription */
-	Oid		dbid;			/* Oid of the database which dubscription is in */
+	Oid		dbid;			/* Oid of the database which subscription is in */
 	char   *name;			/* Name of the subscription */
 	Oid		owner;			/* Oid of the subscription owner */
 	bool	enabled;		/* Indicates if the subscription is enabled */
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index 12aedbc384cdd3207ffcd6fae37efd073e9ac2f3..72e18499c07aad8db520b81151e606841fbc9d2c 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -345,7 +345,7 @@ SH_GROW(SH_TYPE *tb, uint32 newsize)
 	 * we need. We neither want tb->members increased, nor do we need to do
 	 * deal with deleted elements, nor do we need to compare keys. So a
 	 * special-cased implementation is lot faster. As resizing can be time
-	 * consuming and frequent, that's worthwile to optimize.
+	 * consuming and frequent, that's worthwhile to optimize.
 	 *
 	 * To be able to simply move entries over, we have to start not at the
 	 * first bucket (i.e olddata[0]), but find the first bucket that's either
@@ -620,7 +620,7 @@ SH_DELETE(SH_TYPE *tb, SH_KEY_TYPE key)
 
 			/*
 			 * Backward shift following elements till either an empty element
-			 * or an element at its optimal position is encounterered.
+			 * or an element at its optimal position is encountered.
 			 *
 			 * While that sounds expensive, the average chain length is short,
 			 * and deletions would otherwise require toombstones.
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index f46cd49bab4057ac9be948f97de9e46008fb41ff..1ac56ccbb128e5e4e6c36b1bbb2abcdf0c66c7e8 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -842,7 +842,7 @@ typedef LONG slock_t;
 #define SPIN_DELAY() spin_delay()
 
 /* If using Visual C++ on Win64, inline assembly is unavailable.
- * Use a _mm_pause instrinsic instead of rep nop.
+ * Use a _mm_pause intrinsic instead of rep nop.
  */
 #if defined(_WIN64)
 static __forceinline void
diff --git a/src/include/tsearch/dicts/spell.h b/src/include/tsearch/dicts/spell.h
index 8cba645540e4ce1bf5f26513c30e5bbb3388ffa9..3032d0b508747b2db91f6f5cf582273ac029b446 100644
--- a/src/include/tsearch/dicts/spell.h
+++ b/src/include/tsearch/dicts/spell.h
@@ -147,7 +147,7 @@ typedef struct
 } CMPDAffix;
 
 /*
- * Type of encoding affix flags in Hunspel dictionaries
+ * Type of encoding affix flags in Hunspell dictionaries
  */
 typedef enum
 {
diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c
index a3ae92eb35b0e0cacd68eba7a333571226b76dbe..d5a463d9404a3600b631a2dfa666ba244cb0f88c 100644
--- a/src/interfaces/ecpg/ecpglib/execute.c
+++ b/src/interfaces/ecpg/ecpglib/execute.c
@@ -2,7 +2,7 @@
 
 /*
  * The aim is to get a simpler interface to the database routines.
- * All the tidieous messing around with tuples is supposed to be hidden
+ * All the tedious messing around with tuples is supposed to be hidden
  * by this function.
  */
 /* Author: Linus Tolke
diff --git a/src/interfaces/ecpg/pgtypeslib/datetime.c b/src/interfaces/ecpg/pgtypeslib/datetime.c
index 3b0855f7225b8687481ebec2fb33c5718a62a645..7216b432d461f711654f8ef694a3ef90b9b59b9b 100644
--- a/src/interfaces/ecpg/pgtypeslib/datetime.c
+++ b/src/interfaces/ecpg/pgtypeslib/datetime.c
@@ -324,7 +324,7 @@ PGTYPESdate_fmt_asc(date dDate, const char *fmtstring, char *outbuf)
  *
  * function works as follows:
  *	 - first we analyze the parameters
- *	 - if this is a special case with no delimiters, add delimters
+ *	 - if this is a special case with no delimiters, add delimiters
  *	 - find the tokens. First we look for numerical values. If we have found
  *	   less than 3 tokens, we check for the months' names and thereafter for
  *	   the abbreviations of the months' names.
diff --git a/src/interfaces/ecpg/pgtypeslib/numeric.c b/src/interfaces/ecpg/pgtypeslib/numeric.c
index 120794550d638063de81b75a769aa44f5c7efa4a..a93d074de211bc5e9cbae519695ba1e9c6db35e8 100644
--- a/src/interfaces/ecpg/pgtypeslib/numeric.c
+++ b/src/interfaces/ecpg/pgtypeslib/numeric.c
@@ -1368,11 +1368,11 @@ PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
 {
 	/* use cmp_abs function to calculate the result */
 
-	/* both are positive: normal comparation with cmp_abs */
+	/* both are positive: normal comparison with cmp_abs */
 	if (var1->sign == NUMERIC_POS && var2->sign == NUMERIC_POS)
 		return cmp_abs(var1, var2);
 
-	/* both are negative: return the inverse of the normal comparation */
+	/* both are negative: return the inverse of the normal comparison */
 	if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG)
 	{
 		/*
diff --git a/src/interfaces/ecpg/preproc/ecpg.header b/src/interfaces/ecpg/preproc/ecpg.header
index 672f0b45d48e6e207d10e0765984b0d530aff5a8..2562366bbedbd6183f266738f664e729e5e3a443 100644
--- a/src/interfaces/ecpg/preproc/ecpg.header
+++ b/src/interfaces/ecpg/preproc/ecpg.header
@@ -207,7 +207,7 @@ create_questionmarks(char *name, bool array)
 
 	/* In case we have a struct, we have to print as many "?" as there are attributes in the struct
 	 * An array is only allowed together with an element argument
-	 * This is essantially only used for inserts, but using a struct as input parameter is an error anywhere else
+	 * This is essentially only used for inserts, but using a struct as input parameter is an error anywhere else
 	 * so we don't have to worry here. */
 
 	if (p->type->type == ECPGt_struct || (array && p->type->type == ECPGt_array && p->type->u.element->type == ECPGt_struct))
diff --git a/src/interfaces/ecpg/preproc/ecpg.trailer b/src/interfaces/ecpg/preproc/ecpg.trailer
index 31e765ccd39ae083834e88df8e6dddaedee7fd61..1c108795de995cbc4f801d9fa03267e210c4aa6d 100644
--- a/src/interfaces/ecpg/preproc/ecpg.trailer
+++ b/src/interfaces/ecpg/preproc/ecpg.trailer
@@ -355,7 +355,7 @@ ECPGExecuteImmediateStmt: EXECUTE IMMEDIATE execstring
 			  $$ = $3;
 			};
 /*
- * variable decalartion outside exec sql declare block
+ * variable declaration outside exec sql declare block
  */
 ECPGVarDeclaration: single_vt_declaration;
 
@@ -707,7 +707,7 @@ struct_union_type_with_symbol: s_struct_union_symbol
 			free(forward_name);
 			forward_name = NULL;
 
-			/* This is essantially a typedef but needs the keyword struct/union as well.
+			/* This is essentially a typedef but needs the keyword struct/union as well.
 			 * So we create the typedef for each struct definition with symbol */
 			for (ptr = types; ptr != NULL; ptr = ptr->next)
 			{
@@ -1275,7 +1275,7 @@ descriptor_item:	SQL_CARDINALITY			{ $$ = ECPGd_cardinality; }
 		;
 
 /*
- * set/reset the automatic transaction mode, this needs a differnet handling
+ * set/reset the automatic transaction mode, this needs a different handling
  * as the other set commands
  */
 ECPGSetAutocommit:	SET SQL_AUTOCOMMIT '=' on_off	{ $$ = $4; }
@@ -1287,7 +1287,7 @@ on_off: ON				{ $$ = mm_strdup("on"); }
 		;
 
 /*
- * set the actual connection, this needs a differnet handling as the other
+ * set the actual connection, this needs a different handling as the other
  * set commands
  */
 ECPGSetConnection:	SET CONNECTION TO connection_object { $$ = $4; }
diff --git a/src/interfaces/ecpg/preproc/parse.pl b/src/interfaces/ecpg/preproc/parse.pl
index ea661d3694818c3cc8167ea457e10f2cb38204ce..8a401304ec5ccb0da3baeed852f6a2b461823949 100644
--- a/src/interfaces/ecpg/preproc/parse.pl
+++ b/src/interfaces/ecpg/preproc/parse.pl
@@ -550,7 +550,7 @@ sub dump_fields
 			if ($len == 1)
 			{
 
-				# Straight assignement
+				# Straight assignment
 				$str = ' $$ = ' . $flds_new[0] . ';';
 				add_to_buffer('rules', $str);
 			}
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
index 2845d3b9d295603953de9861db68fef8d6e1b4f9..b47a16e3d0194c0f218406feab774c3b1721c448 100644
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -803,7 +803,7 @@ pg_fe_getauthname(PQExpBuffer errorMessage)
  * be sent in cleartext if it is encrypted on the client side.  This is
  * good because it ensures the cleartext password won't end up in logs,
  * pg_stat displays, etc.  We export the function so that clients won't
- * be dependent on low-level details like whether the enceyption is MD5
+ * be dependent on low-level details like whether the encryption is MD5
  * or something else.
  *
  * Arguments are the cleartext password, and the SQL name of the user it
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
index c6553888642ee3b806acc73531e43be773fbac8c..e9b73a925ed333eaccc908153cafef734f1f0cd3 100644
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -309,7 +309,7 @@ typedef struct pg_conn_host
 	char	   *host;			/* host name or address, or socket path */
 	pg_conn_host_type type;		/* type of host */
 	char	   *port;			/* port number for this host; if not NULL,
-								 * overrrides the PGConn's pgport */
+								 * overrides the PGConn's pgport */
 	char	   *password;		/* password for this host, read from the
 								 * password file.  only set if the PGconn's
 								 * pgpass field is NULL. */
@@ -666,7 +666,7 @@ extern void pq_reset_sigpipe(sigset_t *osigset, bool sigpipe_pending,
 #endif
 
 /*
- * The SSL implementatation provides these functions (fe-secure-openssl.c)
+ * The SSL implementation provides these functions (fe-secure-openssl.c)
  */
 extern void pgtls_init_library(bool do_ssl, int do_crypto);
 extern int	pgtls_init(PGconn *conn);
diff --git a/src/interfaces/libpq/win32.c b/src/interfaces/libpq/win32.c
index d6ecca3859b1e15372475c94911908e004bc59a3..f99f9a8cdbead519fdd4bb6a1c35208fb00299d6 100644
--- a/src/interfaces/libpq/win32.c
+++ b/src/interfaces/libpq/win32.c
@@ -32,7 +32,7 @@
 
 #include "win32.h"
 
-/* Declared here to avoid pulling in all includes, which causes name collissions */
+/* Declared here to avoid pulling in all includes, which causes name collisions */
 #ifdef ENABLE_NLS
 extern char *libpq_gettext(const char *msgid) pg_attribute_format_arg(1);
 #else
diff --git a/src/pl/plperl/ppport.h b/src/pl/plperl/ppport.h
index 5ea0c66e98c18dd7ec5dc9cbb4a5f11419d0aa47..31d06cb3b0ba3566bd524f5e12eccb251b48447e 100644
--- a/src/pl/plperl/ppport.h
+++ b/src/pl/plperl/ppport.h
@@ -79,7 +79,7 @@ to be installed on your system.
 If this option is given, a copy of each file will be saved with
 the given suffix that contains the suggested changes. This does
 not require any external programs. Note that this does not
-automagially add a dot between the original filename and the
+automagically add a dot between the original filename and the
 suffix. If you want the dot, you have to include it in the option
 argument.
 
@@ -4364,9 +4364,9 @@ DPPP_(my_vload_module)(U32 flags, SV *name, SV *ver, va_list *args)
 
     OP * const modname = newSVOP(OP_CONST, 0, name);
     /* 5.005 has a somewhat hacky force_normal that doesn't croak on
-       SvREADONLY() if PL_compling is true. Current perls take care in
+       SvREADONLY() if PL_compiling is true. Current perls take care in
        ck_require() to correctly turn off SvREADONLY before calling
-       force_normal_flags(). This seems a better fix than fudging PL_compling
+       force_normal_flags(). This seems a better fix than fudging PL_compiling
      */
     SvREADONLY_off(((SVOP*)modname)->op_sv);
     modname->op_private |= OPpCONST_BARE;
diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c
index d61493fac895df8c82b257a791e51268f379eca5..c4806274bc98300474807fcf3eae910a22af81c4 100644
--- a/src/pl/plpython/plpy_elog.c
+++ b/src/pl/plpython/plpy_elog.c
@@ -303,7 +303,7 @@ PLy_traceback(PyObject *e, PyObject *v, PyObject *tb,
 			long		plain_lineno;
 
 			/*
-			 * The second frame points at the internal function, but to mimick
+			 * The second frame points at the internal function, but to mimic
 			 * Python error reporting we want to say <module>.
 			 */
 			if (*tb_depth == 1)
diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c
index 0cf2ad29cbdd209d8a60021813478489b7673258..761534406d5310c441740be9b4c57c9292878ba5 100644
--- a/src/pl/plpython/plpy_plpymodule.c
+++ b/src/pl/plpython/plpy_plpymodule.c
@@ -463,7 +463,7 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw)
 
 			if (strcmp(keyword, "message") == 0)
 			{
-				/* the message should not be overwriten */
+				/* the message should not be overwritten */
 				if (PyTuple_Size(args) != 0)
 				{
 					PLy_exception_set(PyExc_TypeError, "Argument 'message' given by name and position");
diff --git a/src/pl/plpython/plpy_typeio.h b/src/pl/plpython/plpy_typeio.h
index 5f5c1ad5c6babfb1e7c5485decd50eeec361693b..e04722c47a503911b6ee2c9e3f5ec925e904eb38 100644
--- a/src/pl/plpython/plpy_typeio.h
+++ b/src/pl/plpython/plpy_typeio.h
@@ -43,7 +43,7 @@ typedef union PLyTypeInput
 } PLyTypeInput;
 
 /*
- * Conversion from Python object to a Postgresql Datum.
+ * Conversion from Python object to a PostgreSQL Datum.
  *
  * The 'inarray' argument to the conversion function is true, if the
  * converted value was in an array (Python list). It is used to give a
@@ -78,7 +78,7 @@ typedef union PLyTypeOutput
 	PLyObToTuple r;
 } PLyTypeOutput;
 
-/* all we need to move Postgresql data to Python objects,
+/* all we need to move PostgreSQL data to Python objects,
  * and vice versa
  */
 typedef struct PLyTypeInfo
diff --git a/src/test/isolation/specs/receipt-report.spec b/src/test/isolation/specs/receipt-report.spec
index 1e214960d183feab8f176d0f45d12541317e6226..5e1d51d0bd06d3a7ea424d73cd573859d9fe94c9 100644
--- a/src/test/isolation/specs/receipt-report.spec
+++ b/src/test/isolation/specs/receipt-report.spec
@@ -7,7 +7,7 @@
 # be changed and a report of the closed day's receipts subsequently
 # run which will miss a receipt from the date which has been closed.
 #
-# There are only six permuations which must cause a serialization failure.
+# There are only six permutations which must cause a serialization failure.
 # Failure cases are where s1 overlaps both s2 and s3, but s2 commits before
 # s3 executes its first SELECT.
 #
diff --git a/src/test/isolation/specs/two-ids.spec b/src/test/isolation/specs/two-ids.spec
index d67064068e034f7d176bba054182fc1fc51ec3d3..277097125ac1a0f5a1be72ff12e2aefe85439a06 100644
--- a/src/test/isolation/specs/two-ids.spec
+++ b/src/test/isolation/specs/two-ids.spec
@@ -2,7 +2,7 @@
 #
 # Small, simple test showing read-only anomalies.
 #
-# There are only four permuations which must cause a serialization failure.
+# There are only four permutations which must cause a serialization failure.
 # Required failure cases are where s2 overlaps both s1 and s3, but s1
 # commits before s3 executes its first SELECT.
 #
diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out
index 76b9c2d372d91e75fd377f8329c841fb2eaa554c..d8e7b612940fde9d52dd830161be9bc48c56e81a 100644
--- a/src/test/regress/expected/alter_table.out
+++ b/src/test/regress/expected/alter_table.out
@@ -310,7 +310,7 @@ INSERT INTO tmp3 values (5,50);
 -- Try (and fail) to add constraint due to invalid source columns
 ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full;
 ERROR:  column "c" referenced in foreign key constraint does not exist
--- Try (and fail) to add constraint due to invalide destination columns explicitly given
+-- Try (and fail) to add constraint due to invalid destination columns explicitly given
 ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full;
 ERROR:  column "b" referenced in foreign key constraint does not exist
 -- Try (and fail) to add constraint due to invalid data
@@ -2842,7 +2842,7 @@ ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key
 ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists
 ERROR:  could not change table "unlogged2" to logged because it references unlogged table "unlogged1"
 ALTER TABLE unlogged1 SET LOGGED;
--- check relpersistence of an unlogged table after changing to permament
+-- check relpersistence of an unlogged table after changing to permanent
 SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
 UNION ALL
 SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
@@ -3029,7 +3029,7 @@ ERROR:  cannot alter type of column referenced in partition key expression
 -- cannot drop NOT NULL on columns in the range partition key
 ALTER TABLE partitioned ALTER COLUMN a DROP NOT NULL;
 ERROR:  column "a" is in range partition key
--- partitioned table cannot partiticipate in regular inheritance
+-- partitioned table cannot participate in regular inheritance
 CREATE TABLE foo (
 	a int,
 	b int
diff --git a/src/test/regress/expected/create_table.out b/src/test/regress/expected/create_table.out
index 36266f0a32b059cb08fc94fda63a6c910f3007db..fc92cd92dd5221c6033d91947bc3dcf1339390b2 100644
--- a/src/test/regress/expected/create_table.out
+++ b/src/test/regress/expected/create_table.out
@@ -424,7 +424,7 @@ DROP FUNCTION plusone(int);
 ERROR:  cannot drop function plusone(integer) because other objects depend on it
 DETAIL:  table partitioned depends on function plusone(integer)
 HINT:  Use DROP ... CASCADE to drop the dependent objects too.
--- partitioned table cannot partiticipate in regular inheritance
+-- partitioned table cannot participate in regular inheritance
 CREATE TABLE partitioned2 (
 	a int
 ) PARTITION BY LIST ((a+1));
diff --git a/src/test/regress/expected/indirect_toast.out b/src/test/regress/expected/indirect_toast.out
index 4f4bf41973afbab96e097cf65c701f0bf2c91238..3e255fbded8de20b646e38dc033a96ba1e91d4a5 100644
--- a/src/test/regress/expected/indirect_toast.out
+++ b/src/test/regress/expected/indirect_toast.out
@@ -23,7 +23,7 @@ UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200);
  ("one-toasted,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
 (4 rows)
 
--- modification without modifying asigned value
+-- modification without modifying assigned value
 UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
                                                                                                 substring                                                                                                 
 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -61,7 +61,7 @@ SELECT substring(toasttest::text, 1, 200) FROM toasttest;
  ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
 (4 rows)
 
--- check we didn't screw with main/toast tuple visiblity
+-- check we didn't screw with main/toast tuple visibility
 VACUUM FREEZE toasttest;
 SELECT substring(toasttest::text, 1, 200) FROM toasttest;
                                                                                                 substring                                                                                                 
@@ -95,7 +95,7 @@ UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200);
  ("one-toasted,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
 (4 rows)
 
--- modification without modifying asigned value
+-- modification without modifying assigned value
 UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
                                                                                                 substring                                                                                                 
 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -135,7 +135,7 @@ SELECT substring(toasttest::text, 1, 200) FROM toasttest;
  ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
 (5 rows)
 
--- check we didn't screw with main/toast tuple visiblity
+-- check we didn't screw with main/toast tuple visibility
 VACUUM FREEZE toasttest;
 SELECT substring(toasttest::text, 1, 200) FROM toasttest;
                                                                                                 substring                                                                                                 
diff --git a/src/test/regress/expected/init_privs.out b/src/test/regress/expected/init_privs.out
index 55139d4d37c6bab805fd67ac9e794a32c22a58d5..292b1a1035b1360d341d1c85128758ca566555b5 100644
--- a/src/test/regress/expected/init_privs.out
+++ b/src/test/regress/expected/init_privs.out
@@ -1,4 +1,4 @@
--- Test iniital privileges
+-- Test initial privileges
 -- There should always be some initial privileges, set up by initdb
 SELECT count(*) > 0 FROM pg_init_privs;
  ?column? 
diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out
index 63859c53acbeec284f34e2339e3f4ce616684e9d..8d005fddd46c4c56840a59ade08d3f71c00c8983 100644
--- a/src/test/regress/expected/insert_conflict.out
+++ b/src/test/regress/expected/insert_conflict.out
@@ -291,7 +291,7 @@ insert into insertconflicttest values (12, 'Date') on conflict (lower(fruit), ke
 ERROR:  there is no unique or exclusion constraint matching the ON CONFLICT specification
 drop index comp_key_index;
 --
--- Partial index tests, no inference predicate specificied
+-- Partial index tests, no inference predicate specified
 --
 create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5;
 create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5;
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index d9bbae097b73fb64448bd86cb42ba467ae025234..c3bb4fe767ffba5273da40326fff5c80e71ff8e1 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -4260,7 +4260,7 @@ select * from
 -- Test hints given on incorrect column references are useful
 --
 select t1.uunique1 from
-  tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestipn
+  tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestion
 ERROR:  column t1.uunique1 does not exist
 LINE 1: select t1.uunique1 from
                ^
diff --git a/src/test/regress/expected/matview.out b/src/test/regress/expected/matview.out
index 4ae44607a40902721d322ef1ee759cbf5a837fcd..d1f35c58337937d6fe65563d7b916088805680b9 100644
--- a/src/test/regress/expected/matview.out
+++ b/src/test/regress/expected/matview.out
@@ -292,7 +292,7 @@ SELECT * FROM mvtest_tvvm;
 -- test diemv when the mv does not exist
 DROP MATERIALIZED VIEW IF EXISTS no_such_mv;
 NOTICE:  materialized view "no_such_mv" does not exist, skipping
--- make sure invalid comination of options is prohibited
+-- make sure invalid combination of options is prohibited
 REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tvmm WITH NO DATA;
 ERROR:  CONCURRENTLY and WITH NO DATA options cannot be used together
 -- no tuple locks on materialized views
diff --git a/src/test/regress/expected/plpgsql.out b/src/test/regress/expected/plpgsql.out
index 79513e4598bee72bc258a2426ddaaa917c5e4cf6..04848c10a2cf97e9ec4d2dc0af3b6c07d861e2e3 100644
--- a/src/test/regress/expected/plpgsql.out
+++ b/src/test/regress/expected/plpgsql.out
@@ -1423,7 +1423,7 @@ select * from WSlot order by slotname;
 
 --
 -- Install the central phone system and create the phone numbers.
--- They are weired on insert to the patchfields. Again the
+-- They are wired on insert to the patchfields. Again the
 -- triggers automatically tell the PSlots to update their
 -- backlink field.
 --
diff --git a/src/test/regress/expected/replica_identity.out b/src/test/regress/expected/replica_identity.out
index 1a04ec55610b2c781a3e86e343b90372a3468c6d..fa63235fc9dab0b34df3c51b9d50ba5644413751 100644
--- a/src/test/regress/expected/replica_identity.out
+++ b/src/test/regress/expected/replica_identity.out
@@ -98,7 +98,7 @@ Indexes:
 
 -- succeed, oid unique index
 ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_oid_idx;
--- succeed, nondeferrable unique constraint over nonullable cols
+-- succeed, nondeferrable unique constraint over nonnullable cols
 ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_unique_nondefer;
 -- succeed unique index over nonnullable cols
 ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_keyab_key;
diff --git a/src/test/regress/expected/rolenames.out b/src/test/regress/expected/rolenames.out
index b8bf0cf8778612286d5318008522f05a8658c0f7..fd058e4f7d2d94eb7f7dd264789d3e3f0797c006 100644
--- a/src/test/regress/expected/rolenames.out
+++ b/src/test/regress/expected/rolenames.out
@@ -440,7 +440,7 @@ LINE 1: ALTER USER NONE SET application_name to 'BOMB';
                    ^
 ALTER USER nonexistent SET application_name to 'BOMB'; -- error
 ERROR:  role "nonexistent" does not exist
--- CREAETE SCHEMA
+-- CREATE SCHEMA
 set client_min_messages to error;
 CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER;
 CREATE SCHEMA newschema2 AUTHORIZATION "current_user";
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index de5ae00970e998f3c61283d4e67920a70308e599..60731a99b7c84d292d450ae43190a22a8b64a33e 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -938,7 +938,7 @@ CREATE TABLE shoe_data (
 	shoename   char(10),      -- primary key
 	sh_avail   integer,       -- available # of pairs
 	slcolor    char(10),      -- preferred shoelace color
-	slminlen   float,         -- miminum shoelace length
+	slminlen   float,         -- minimum shoelace length
 	slmaxlen   float,         -- maximum shoelace length
 	slunit     char(8)        -- length unit
 );
diff --git a/src/test/regress/expected/tsdicts.out b/src/test/regress/expected/tsdicts.out
index 8ed64d3c68e07107fbe048e114b1c48a6f21c037..493a25587c2bc952ca80855b743e978c57a9657d 100644
--- a/src/test/regress/expected/tsdicts.out
+++ b/src/test/regress/expected/tsdicts.out
@@ -383,7 +383,7 @@ SELECT ts_lexize('hunspell_num', 'footballyklubber');
  {foot,ball,klubber}
 (1 row)
 
--- Synonim dictionary
+-- Synonym dictionary
 CREATE TEXT SEARCH DICTIONARY synonym (
 						Template=synonym,
 						Synonyms=synonym_sample
diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql
index 4611cbb731c2a6ff2fec8f46c93f302a80d080a3..1f551ec53c46e7271e5a24df3964dc1bff929e13 100644
--- a/src/test/regress/sql/alter_table.sql
+++ b/src/test/regress/sql/alter_table.sql
@@ -255,7 +255,7 @@ INSERT INTO tmp3 values (5,50);
 -- Try (and fail) to add constraint due to invalid source columns
 ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full;
 
--- Try (and fail) to add constraint due to invalide destination columns explicitly given
+-- Try (and fail) to add constraint due to invalid destination columns explicitly given
 ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full;
 
 -- Try (and fail) to add constraint due to invalid data
@@ -1829,7 +1829,7 @@ CREATE UNLOGGED TABLE unlogged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unl
 ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key
 ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists
 ALTER TABLE unlogged1 SET LOGGED;
--- check relpersistence of an unlogged table after changing to permament
+-- check relpersistence of an unlogged table after changing to permanent
 SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
 UNION ALL
 SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
@@ -1917,7 +1917,7 @@ ALTER TABLE partitioned ALTER COLUMN b TYPE char(5);
 -- cannot drop NOT NULL on columns in the range partition key
 ALTER TABLE partitioned ALTER COLUMN a DROP NOT NULL;
 
--- partitioned table cannot partiticipate in regular inheritance
+-- partitioned table cannot participate in regular inheritance
 CREATE TABLE foo (
 	a int,
 	b int
diff --git a/src/test/regress/sql/create_table.sql b/src/test/regress/sql/create_table.sql
index 6314aa403ff1dd89e1ea445f251766a6788b6f6e..5f25c436ee16fc7a215dd4f72564f151af952ec4 100644
--- a/src/test/regress/sql/create_table.sql
+++ b/src/test/regress/sql/create_table.sql
@@ -418,7 +418,7 @@ SELECT attname, attnotnull FROM pg_attribute
 -- prevent a function referenced in partition key from being dropped
 DROP FUNCTION plusone(int);
 
--- partitioned table cannot partiticipate in regular inheritance
+-- partitioned table cannot participate in regular inheritance
 CREATE TABLE partitioned2 (
 	a int
 ) PARTITION BY LIST ((a+1));
diff --git a/src/test/regress/sql/indirect_toast.sql b/src/test/regress/sql/indirect_toast.sql
index d502480ad3fb2fc498c11b40b6aee6459293ef25..18b6cc3a95f2d1584eb612cf900c27022c54cdf6 100644
--- a/src/test/regress/sql/indirect_toast.sql
+++ b/src/test/regress/sql/indirect_toast.sql
@@ -11,7 +11,7 @@ SELECT descr, substring(make_tuple_indirect(toasttest)::text, 1, 200) FROM toast
 -- modification without changing varlenas
 UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200);
 
--- modification without modifying asigned value
+-- modification without modifying assigned value
 UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
 
 -- modification modifying, but effectively not changing
@@ -20,7 +20,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(toasttest::te
 UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toasttest::text, 1, 200);
 
 SELECT substring(toasttest::text, 1, 200) FROM toasttest;
--- check we didn't screw with main/toast tuple visiblity
+-- check we didn't screw with main/toast tuple visibility
 VACUUM FREEZE toasttest;
 SELECT substring(toasttest::text, 1, 200) FROM toasttest;
 
@@ -42,7 +42,7 @@ CREATE TRIGGER toasttest_update_indirect
 -- modification without changing varlenas
 UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200);
 
--- modification without modifying asigned value
+-- modification without modifying assigned value
 UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
 
 -- modification modifying, but effectively not changing
@@ -53,7 +53,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toastte
 INSERT INTO toasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL);
 
 SELECT substring(toasttest::text, 1, 200) FROM toasttest;
--- check we didn't screw with main/toast tuple visiblity
+-- check we didn't screw with main/toast tuple visibility
 VACUUM FREEZE toasttest;
 SELECT substring(toasttest::text, 1, 200) FROM toasttest;
 
diff --git a/src/test/regress/sql/init_privs.sql b/src/test/regress/sql/init_privs.sql
index 9b4c70246e5ada3e2d834335567455e00b9d1be9..4a31af27986ae081cba88d315262b9eeabfb375a 100644
--- a/src/test/regress/sql/init_privs.sql
+++ b/src/test/regress/sql/init_privs.sql
@@ -1,4 +1,4 @@
--- Test iniital privileges
+-- Test initial privileges
 
 -- There should always be some initial privileges, set up by initdb
 SELECT count(*) > 0 FROM pg_init_privs;
diff --git a/src/test/regress/sql/insert_conflict.sql b/src/test/regress/sql/insert_conflict.sql
index 116cf763f956df0cbe0df9cd563cfdff8e7b5f2f..df3a9b59b5b0882549c1d9aa70b6330fee2f7064 100644
--- a/src/test/regress/sql/insert_conflict.sql
+++ b/src/test/regress/sql/insert_conflict.sql
@@ -138,7 +138,7 @@ insert into insertconflicttest values (12, 'Date') on conflict (lower(fruit), ke
 drop index comp_key_index;
 
 --
--- Partial index tests, no inference predicate specificied
+-- Partial index tests, no inference predicate specified
 --
 create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5;
 create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5;
diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql
index 97bccec721a45d356e1ad9f8c36f6a3108099524..bf18a8f6c42f88ab08253122c3fbedada2957fff 100644
--- a/src/test/regress/sql/join.sql
+++ b/src/test/regress/sql/join.sql
@@ -1456,7 +1456,7 @@ select * from
 --
 
 select t1.uunique1 from
-  tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestipn
+  tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestion
 select t2.uunique1 from
   tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t2" suggestion
 select uunique1 from
diff --git a/src/test/regress/sql/matview.sql b/src/test/regress/sql/matview.sql
index 1164b4cea21d525f5d1b4fb011c932e7efd4f02e..e0f4a1319f9c070e56f4073f9e02f2f0aaaa0bf0 100644
--- a/src/test/regress/sql/matview.sql
+++ b/src/test/regress/sql/matview.sql
@@ -92,7 +92,7 @@ SELECT * FROM mvtest_tvvm;
 -- test diemv when the mv does not exist
 DROP MATERIALIZED VIEW IF EXISTS no_such_mv;
 
--- make sure invalid comination of options is prohibited
+-- make sure invalid combination of options is prohibited
 REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tvmm WITH NO DATA;
 
 -- no tuple locks on materialized views
diff --git a/src/test/regress/sql/plpgsql.sql b/src/test/regress/sql/plpgsql.sql
index 877d3ad08ef0afefb901cf9d6191785b9cfb3c4e..31dcbdffdd816a09287981abeddd890313272eda 100644
--- a/src/test/regress/sql/plpgsql.sql
+++ b/src/test/regress/sql/plpgsql.sql
@@ -1350,7 +1350,7 @@ select * from WSlot order by slotname;
 
 --
 -- Install the central phone system and create the phone numbers.
--- They are weired on insert to the patchfields. Again the
+-- They are wired on insert to the patchfields. Again the
 -- triggers automatically tell the PSlots to update their
 -- backlink field.
 --
diff --git a/src/test/regress/sql/replica_identity.sql b/src/test/regress/sql/replica_identity.sql
index 68824a3aa7aa6fc4d9776834a44a63b3384757e3..3d2171c73367439462151a5cb36fd3079f84c614 100644
--- a/src/test/regress/sql/replica_identity.sql
+++ b/src/test/regress/sql/replica_identity.sql
@@ -56,7 +56,7 @@ SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass;
 -- succeed, oid unique index
 ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_oid_idx;
 
--- succeed, nondeferrable unique constraint over nonullable cols
+-- succeed, nondeferrable unique constraint over nonnullable cols
 ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_unique_nondefer;
 
 -- succeed unique index over nonnullable cols
diff --git a/src/test/regress/sql/rolenames.sql b/src/test/regress/sql/rolenames.sql
index 451d9d338d25c3a6638712b24c81158a53812816..4c5706bbaa867bfce86d9831da426374fded6dee 100644
--- a/src/test/regress/sql/rolenames.sql
+++ b/src/test/regress/sql/rolenames.sql
@@ -176,7 +176,7 @@ ALTER USER PUBLIC SET application_name to 'BOMB'; -- error
 ALTER USER NONE SET application_name to 'BOMB'; -- error
 ALTER USER nonexistent SET application_name to 'BOMB'; -- error
 
--- CREAETE SCHEMA
+-- CREATE SCHEMA
 set client_min_messages to error;
 CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER;
 CREATE SCHEMA newschema2 AUTHORIZATION "current_user";
diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql
index 835945f4b7bf07808914d0b22f6331eec0e2efa2..90dc9ceaf46a93ed217dad2711aa6c3a014c6103 100644
--- a/src/test/regress/sql/rules.sql
+++ b/src/test/regress/sql/rules.sql
@@ -522,7 +522,7 @@ CREATE TABLE shoe_data (
 	shoename   char(10),      -- primary key
 	sh_avail   integer,       -- available # of pairs
 	slcolor    char(10),      -- preferred shoelace color
-	slminlen   float,         -- miminum shoelace length
+	slminlen   float,         -- minimum shoelace length
 	slmaxlen   float,         -- maximum shoelace length
 	slunit     char(8)        -- length unit
 );
diff --git a/src/test/regress/sql/tsdicts.sql b/src/test/regress/sql/tsdicts.sql
index 4d0419e35a6d4c51bf31908068a000fd639ad71f..ed2cbe1fec55795fde3b081910e8cfba14be615d 100644
--- a/src/test/regress/sql/tsdicts.sql
+++ b/src/test/regress/sql/tsdicts.sql
@@ -96,7 +96,7 @@ SELECT ts_lexize('hunspell_num', 'footballklubber');
 SELECT ts_lexize('hunspell_num', 'ballyklubber');
 SELECT ts_lexize('hunspell_num', 'footballyklubber');
 
--- Synonim dictionary
+-- Synonym dictionary
 CREATE TEXT SEARCH DICTIONARY synonym (
 						Template=synonym,
 						Synonyms=synonym_sample
diff --git a/src/test/ssl/ServerSetup.pm b/src/test/ssl/ServerSetup.pm
index 20eaf76bffc074499832f04fb66bc1e4a302059c..9441249b3ad1ad93264d619e68d6ace683d2b411 100644
--- a/src/test/ssl/ServerSetup.pm
+++ b/src/test/ssl/ServerSetup.pm
@@ -7,7 +7,7 @@
 # - ssl/root+client_ca.crt as the CA root for validating client certs.
 # - reject non-SSL connections
 # - a database called trustdb that lets anyone in
-# - another database called certdb that uses certificate authentiction, ie.
+# - another database called certdb that uses certificate authentication, ie.
 #   the client must present a valid certificate signed by the client CA
 # - two users, called ssltestuser and anotheruser.
 #