From 807b9e0dff663c5da875af7907a5106c0ff90673 Mon Sep 17 00:00:00 2001
From: Bruce Momjian <bruce@momjian.us>
Date: Sat, 23 May 2015 21:35:49 -0400
Subject: [PATCH] pgindent run for 9.5

---
 contrib/btree_gin/btree_gin.c                 |  35 +-
 contrib/btree_gist/btree_utils_num.c          |   2 +-
 contrib/btree_gist/btree_utils_var.c          |   2 +-
 contrib/fuzzystrmatch/dmetaphone.c            |   2 +-
 contrib/hstore/hstore_gist.c                  |   2 +-
 contrib/hstore_plperl/hstore_plperl.c         |  12 +-
 contrib/hstore_plpython/hstore_plpython.c     |  14 +-
 contrib/ltree/crc32.c                         |   5 +-
 contrib/ltree_plpython/ltree_plpython.c       |   2 +-
 contrib/pageinspect/brinfuncs.c               |  38 +-
 contrib/pageinspect/ginfuncs.c                |   2 +-
 contrib/pg_audit/pg_audit.c                   | 487 +++++++++---------
 contrib/pg_buffercache/pg_buffercache_pages.c |   1 +
 .../pg_stat_statements/pg_stat_statements.c   |  25 +-
 contrib/pgcrypto/pgp-armor.c                  |   2 +-
 contrib/pgcrypto/pgp-pgsql.c                  |  33 +-
 contrib/pgcrypto/pgp.h                        |   8 +-
 contrib/pgstattuple/pgstatapprox.c            |  27 +-
 contrib/postgres_fdw/postgres_fdw.c           |   2 +-
 contrib/test_decoding/test_decoding.c         |   8 +-
 contrib/tsm_system_rows/tsm_system_rows.c     |  73 +--
 contrib/tsm_system_time/tsm_system_time.c     |  98 ++--
 src/backend/access/brin/brin.c                |  17 +-
 src/backend/access/brin/brin_inclusion.c      |  50 +-
 src/backend/access/brin/brin_minmax.c         |  14 +-
 src/backend/access/brin/brin_revmap.c         |  18 +-
 src/backend/access/brin/brin_tuple.c          |   2 +-
 src/backend/access/gin/ginget.c               |   3 +-
 src/backend/access/gin/ginutil.c              |   2 +-
 src/backend/access/gist/gist.c                |   2 +-
 src/backend/access/gist/gistscan.c            |  12 +-
 src/backend/access/gist/gistutil.c            |   2 +-
 src/backend/access/heap/heapam.c              | 102 ++--
 src/backend/access/heap/hio.c                 |   6 +-
 src/backend/access/index/genam.c              |  20 +-
 src/backend/access/nbtree/nbtinsert.c         |  13 +-
 src/backend/access/nbtree/nbtpage.c           |  11 +-
 src/backend/access/nbtree/nbtree.c            |   5 +-
 src/backend/access/nbtree/nbtsearch.c         |   8 +-
 src/backend/access/nbtree/nbtsort.c           |   2 +-
 src/backend/access/nbtree/nbtutils.c          |   6 +-
 src/backend/access/rmgrdesc/committsdesc.c    |   8 +-
 src/backend/access/rmgrdesc/replorigindesc.c  |   6 +-
 src/backend/access/rmgrdesc/xactdesc.c        |  14 +-
 src/backend/access/spgist/spgscan.c           |   1 +
 src/backend/access/tablesample/bernoulli.c    |  69 +--
 src/backend/access/tablesample/system.c       |  48 +-
 src/backend/access/tablesample/tablesample.c  |  94 ++--
 src/backend/access/transam/commit_ts.c        |  74 +--
 src/backend/access/transam/multixact.c        |  59 ++-
 src/backend/access/transam/parallel.c         | 188 +++----
 src/backend/access/transam/twophase.c         |  41 +-
 src/backend/access/transam/xact.c             | 141 ++---
 src/backend/access/transam/xlog.c             | 242 ++++-----
 src/backend/access/transam/xloginsert.c       |  22 +-
 src/backend/access/transam/xlogreader.c       |  27 +-
 src/backend/bootstrap/bootstrap.c             |   1 +
 src/backend/catalog/Catalog.pm                |   3 +-
 src/backend/catalog/aclchk.c                  |  16 +-
 src/backend/catalog/dependency.c              |  12 +-
 src/backend/catalog/genbki.pl                 |  15 +-
 src/backend/catalog/index.c                   |   6 +-
 src/backend/catalog/objectaddress.c           | 359 ++++++++-----
 src/backend/catalog/pg_aggregate.c            |   2 +-
 src/backend/catalog/pg_enum.c                 |   2 +-
 src/backend/catalog/pg_proc.c                 |  10 +-
 src/backend/catalog/pg_type.c                 |   2 +-
 src/backend/catalog/toasting.c                |  12 +-
 src/backend/commands/analyze.c                |   1 +
 src/backend/commands/copy.c                   |  29 +-
 src/backend/commands/createas.c               |   2 +-
 src/backend/commands/dbcommands.c             |   8 +-
 src/backend/commands/dropcmds.c               |   4 +-
 src/backend/commands/event_trigger.c          |  70 +--
 src/backend/commands/explain.c                |  23 +-
 src/backend/commands/functioncmds.c           |  37 +-
 src/backend/commands/matview.c                |   2 +-
 src/backend/commands/policy.c                 | 260 +++++-----
 src/backend/commands/schemacmds.c             |   4 +-
 src/backend/commands/sequence.c               |  16 +-
 src/backend/commands/tablecmds.c              |  61 +--
 src/backend/commands/trigger.c                |   4 +-
 src/backend/commands/typecmds.c               |  40 +-
 src/backend/commands/user.c                   |  27 +-
 src/backend/commands/vacuum.c                 |  11 +-
 src/backend/commands/vacuumlazy.c             |  11 +-
 src/backend/executor/execAmi.c                |   8 +-
 src/backend/executor/execIndexing.c           |  26 +-
 src/backend/executor/execMain.c               |  94 ++--
 src/backend/executor/execQual.c               |  14 +-
 src/backend/executor/execUtils.c              |   2 +-
 src/backend/executor/nodeAgg.c                | 132 ++---
 src/backend/executor/nodeBitmapHeapscan.c     |   2 +-
 src/backend/executor/nodeGroup.c              |   2 +-
 src/backend/executor/nodeHash.c               |  56 +-
 src/backend/executor/nodeIndexonlyscan.c      |   4 +-
 src/backend/executor/nodeIndexscan.c          |   6 +-
 src/backend/executor/nodeLockRows.c           |  11 +-
 src/backend/executor/nodeMaterial.c           |   2 +-
 src/backend/executor/nodeMergeAppend.c        |   8 +-
 src/backend/executor/nodeMergejoin.c          |   4 +-
 src/backend/executor/nodeModifyTable.c        |  55 +-
 src/backend/executor/nodeSamplescan.c         |  13 +-
 src/backend/executor/nodeSort.c               |   2 +-
 src/backend/executor/nodeWindowAgg.c          |   2 +-
 src/backend/executor/spi.c                    |  14 +-
 src/backend/lib/bipartite_match.c             |  16 +-
 src/backend/lib/hyperloglog.c                 |   6 +-
 src/backend/lib/pairingheap.c                 |   4 +-
 src/backend/libpq/auth.c                      |   4 +-
 src/backend/libpq/be-secure-openssl.c         |  33 +-
 src/backend/libpq/be-secure.c                 |  14 +-
 src/backend/libpq/hba.c                       |   4 +-
 src/backend/libpq/pqcomm.c                    |   4 +-
 src/backend/libpq/pqmq.c                      |  31 +-
 src/backend/nodes/copyfuncs.c                 |   6 +-
 src/backend/nodes/makefuncs.c                 |   2 +-
 src/backend/nodes/nodeFuncs.c                 |  10 +-
 src/backend/optimizer/path/allpaths.c         |   8 +-
 src/backend/optimizer/path/costsize.c         |   6 +-
 src/backend/optimizer/plan/analyzejoins.c     |  14 +-
 src/backend/optimizer/plan/createplan.c       |   6 +-
 src/backend/optimizer/plan/planner.c          | 150 +++---
 src/backend/optimizer/plan/setrefs.c          |   2 +-
 src/backend/optimizer/util/clauses.c          |   4 +-
 src/backend/optimizer/util/pathnode.c         |   2 +-
 src/backend/optimizer/util/plancat.c          |  25 +-
 src/backend/optimizer/util/var.c              |  11 +-
 src/backend/parser/analyze.c                  |  16 +-
 src/backend/parser/parse_agg.c                | 112 ++--
 src/backend/parser/parse_clause.c             | 169 +++---
 src/backend/parser/parse_func.c               |  55 +-
 src/backend/parser/parse_relation.c           |  56 +-
 src/backend/parser/parse_type.c               |   2 +-
 src/backend/parser/parse_utilcmd.c            |   6 +-
 src/backend/port/atomics.c                    |  15 +-
 src/backend/port/sysv_shmem.c                 |   2 +-
 src/backend/port/win32_latch.c                |   2 +-
 src/backend/port/win32_sema.c                 |   1 +
 src/backend/postmaster/autovacuum.c           |  10 +-
 src/backend/postmaster/bgworker.c             |  21 +-
 src/backend/postmaster/pgstat.c               |   2 +-
 src/backend/postmaster/postmaster.c           |   8 +-
 src/backend/replication/basebackup.c          |  21 +-
 .../libpqwalreceiver/libpqwalreceiver.c       |  14 +-
 src/backend/replication/logical/decode.c      |   4 +-
 src/backend/replication/logical/logical.c     |   4 +-
 .../replication/logical/logicalfuncs.c        |   2 +-
 src/backend/replication/logical/origin.c      | 156 +++---
 .../replication/logical/reorderbuffer.c       |  29 +-
 src/backend/replication/logical/snapbuild.c   |  27 +-
 src/backend/replication/slot.c                |   6 +-
 src/backend/replication/slotfuncs.c           |   6 +-
 src/backend/replication/walreceiverfuncs.c    |   2 +-
 src/backend/replication/walsender.c           |  11 +-
 src/backend/rewrite/rewriteHandler.c          |  32 +-
 src/backend/rewrite/rewriteManip.c            |   2 +-
 src/backend/rewrite/rowsecurity.c             | 196 +++----
 src/backend/storage/buffer/buf_init.c         |   6 +-
 src/backend/storage/buffer/bufmgr.c           |  42 +-
 src/backend/storage/buffer/freelist.c         |  13 +-
 src/backend/storage/file/fd.c                 |   4 +-
 src/backend/storage/file/reinit.c             |  10 +-
 src/backend/storage/ipc/dsm_impl.c            |   8 +-
 src/backend/storage/ipc/procarray.c           |  10 +-
 src/backend/storage/ipc/shm_mq.c              |  20 +-
 src/backend/storage/ipc/sinval.c              |   4 +-
 src/backend/storage/lmgr/lwlock.c             |  74 +--
 src/backend/storage/lmgr/proc.c               |   1 +
 src/backend/storage/page/bufpage.c            |   8 +-
 src/backend/storage/smgr/md.c                 |   4 +-
 src/backend/tcop/postgres.c                   |  20 +-
 src/backend/tcop/utility.c                    |  65 +--
 src/backend/tsearch/spell.c                   |   4 +-
 src/backend/utils/adt/acl.c                   |   4 +-
 src/backend/utils/adt/array_userfuncs.c       |  19 +-
 src/backend/utils/adt/formatting.c            |  54 +-
 src/backend/utils/adt/json.c                  |   6 +-
 src/backend/utils/adt/jsonb.c                 | 115 +++--
 src/backend/utils/adt/jsonb_util.c            |  11 +-
 src/backend/utils/adt/jsonfuncs.c             | 103 ++--
 src/backend/utils/adt/levenshtein.c           |   4 +-
 src/backend/utils/adt/lockfuncs.c             |   2 +-
 src/backend/utils/adt/misc.c                  |   2 +-
 src/backend/utils/adt/network_gist.c          |   8 +-
 src/backend/utils/adt/numeric.c               |  59 +--
 src/backend/utils/adt/pg_locale.c             |   2 +-
 src/backend/utils/adt/pg_upgrade_support.c    |  26 +-
 src/backend/utils/adt/pgstatfuncs.c           |  20 +-
 src/backend/utils/adt/rangetypes_spgist.c     |  30 +-
 src/backend/utils/adt/regexp.c                |   5 +-
 src/backend/utils/adt/regproc.c               |   8 +-
 src/backend/utils/adt/ri_triggers.c           |  16 +-
 src/backend/utils/adt/ruleutils.c             |  73 +--
 src/backend/utils/adt/tsquery_op.c            |   5 +-
 src/backend/utils/adt/txid.c                  |   6 +-
 src/backend/utils/adt/varlena.c               | 184 +++----
 src/backend/utils/adt/xml.c                   |   4 +-
 src/backend/utils/cache/inval.c               |  11 +-
 src/backend/utils/cache/lsyscache.c           |   4 +-
 src/backend/utils/cache/plancache.c           |   4 +-
 src/backend/utils/cache/relcache.c            |  14 +-
 src/backend/utils/cache/syscache.c            |  38 +-
 src/backend/utils/error/elog.c                |   4 +-
 src/backend/utils/fmgr/dfmgr.c                |   4 +-
 src/backend/utils/fmgr/funcapi.c              |   9 +-
 src/backend/utils/init/miscinit.c             |   1 +
 src/backend/utils/init/postinit.c             |   2 +-
 .../utils/mb/Unicode/UCS_to_GB18030.pl        |   4 +-
 .../utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl |   4 +-
 .../conversion_procs/euc_tw_and_big5/big5.c   |   8 +-
 src/backend/utils/misc/guc.c                  | 115 ++---
 src/backend/utils/misc/rls.c                  |  35 +-
 src/backend/utils/misc/sampling.c             |   4 +-
 src/backend/utils/sort/sortsupport.c          |   4 +-
 src/backend/utils/sort/tuplesort.c            | 116 +++--
 src/backend/utils/time/combocid.c             |   7 +-
 src/backend/utils/time/snapmgr.c              |  33 +-
 src/backend/utils/time/tqual.c                |  11 +-
 src/bin/pg_basebackup/pg_basebackup.c         |  18 +-
 src/bin/pg_basebackup/pg_receivexlog.c        |  23 +-
 src/bin/pg_basebackup/receivelog.c            | 115 +++--
 src/bin/pg_basebackup/receivelog.h            |   2 +-
 src/bin/pg_basebackup/streamutil.c            |  14 +-
 src/bin/pg_basebackup/streamutil.h            |  12 +-
 src/bin/pg_basebackup/t/010_pg_basebackup.pl  |  24 +-
 src/bin/pg_ctl/pg_ctl.c                       |  13 +-
 src/bin/pg_ctl/t/001_start_stop.pl            |   4 +-
 src/bin/pg_ctl/t/002_status.pl                |   2 +-
 src/bin/pg_dump/pg_dump.c                     | 126 ++---
 src/bin/pg_dump/pg_dump.h                     |   2 +-
 src/bin/pg_dump/pg_dumpall.c                  |   2 +-
 src/bin/pg_resetxlog/pg_resetxlog.c           |   6 +-
 src/bin/pg_rewind/RewindTest.pm               | 148 ++++--
 src/bin/pg_rewind/filemap.c                   |  19 +-
 src/bin/pg_rewind/filemap.h                   |  28 +-
 src/bin/pg_rewind/parsexlog.c                 |  10 +-
 src/bin/pg_rewind/pg_rewind.c                 |   8 +-
 src/bin/pg_rewind/t/001_basic.pl              |  20 +-
 src/bin/pg_rewind/t/002_databases.pl          |   8 +-
 src/bin/pg_rewind/t/003_extrafiles.pl         |  56 +-
 src/bin/pg_upgrade/check.c                    |  29 +-
 src/bin/pg_upgrade/dump.c                     |   8 +-
 src/bin/pg_upgrade/info.c                     | 160 +++---
 src/bin/pg_upgrade/option.c                   |  12 +-
 src/bin/pg_upgrade/pg_upgrade.c               |   6 +-
 src/bin/pg_upgrade/pg_upgrade.h               |   6 +-
 src/bin/pg_upgrade/relfilenode.c              |   8 +-
 src/bin/pg_upgrade/server.c                   |  11 +-
 src/bin/pg_upgrade/version.c                  |   4 +-
 src/bin/pg_xlogdump/pg_xlogdump.c             |  30 +-
 src/bin/pgbench/pgbench.c                     | 119 +++--
 src/bin/pgbench/pgbench.h                     |  34 +-
 src/bin/psql/command.c                        |  31 +-
 src/bin/psql/common.c                         |   9 +-
 src/bin/psql/common.h                         |   2 +-
 src/bin/psql/copy.c                           |   2 +
 src/bin/psql/describe.c                       |  36 +-
 src/bin/psql/help.c                           |  40 +-
 src/bin/psql/print.c                          |  60 ++-
 src/bin/psql/print.h                          |   8 +-
 src/bin/psql/startup.c                        |   2 +-
 src/bin/psql/tab-complete.c                   |  54 +-
 src/bin/scripts/common.c                      |   2 +-
 src/bin/scripts/reindexdb.c                   |  20 +-
 src/bin/scripts/t/102_vacuumdb_stages.pl      |   2 +-
 src/bin/scripts/vacuumdb.c                    |   4 +-
 src/common/restricted_token.c                 |   2 +-
 src/include/access/brin_page.h                |   4 +-
 src/include/access/commit_ts.h                |   8 +-
 src/include/access/gin.h                      |   2 +-
 src/include/access/gist_private.h             |   4 +-
 src/include/access/hash.h                     |   2 +-
 src/include/access/heapam.h                   |   2 +-
 src/include/access/htup_details.h             |   2 +-
 src/include/access/multixact.h                |   2 +-
 src/include/access/parallel.h                 |  18 +-
 src/include/access/relscan.h                  |   4 +-
 src/include/access/stratnum.h                 |   2 +-
 src/include/access/tablesample.h              |  27 +-
 src/include/access/xact.h                     |  52 +-
 src/include/access/xlog.h                     |   6 +-
 src/include/access/xloginsert.h               |  15 +-
 src/include/access/xlogreader.h               |   2 +-
 src/include/access/xlogrecord.h               |  18 +-
 src/include/access/xlogutils.h                |  10 +-
 src/include/bootstrap/bootstrap.h             |   2 +-
 src/include/catalog/binary_upgrade.h          |   2 +-
 src/include/catalog/index.h                   |   4 +-
 src/include/catalog/indexing.h                |   2 +-
 src/include/catalog/objectaddress.h           |   2 +-
 src/include/catalog/opfam_internal.h          |   2 +-
 src/include/catalog/pg_aggregate.h            |  30 +-
 src/include/catalog/pg_amop.h                 | 476 ++++++++---------
 src/include/catalog/pg_amproc.h               | 368 ++++++-------
 src/include/catalog/pg_attribute.h            |  12 +-
 src/include/catalog/pg_cast.h                 |   2 +-
 src/include/catalog/pg_class.h                |   2 +-
 src/include/catalog/pg_control.h              |   6 +-
 src/include/catalog/pg_description.h          |   2 +-
 src/include/catalog/pg_extension.h            |   2 +-
 src/include/catalog/pg_largeobject.h          |   2 +-
 src/include/catalog/pg_opclass.h              |  18 +-
 src/include/catalog/pg_operator.h             |   4 +-
 src/include/catalog/pg_pltemplate.h           |   5 +-
 src/include/catalog/pg_policy.h               |  20 +-
 src/include/catalog/pg_proc.h                 | 142 ++---
 src/include/catalog/pg_replication_origin.h   |   6 +-
 src/include/catalog/pg_seclabel.h             |   4 +-
 src/include/catalog/pg_shdescription.h        |   2 +-
 src/include/catalog/pg_shseclabel.h           |   4 +-
 src/include/catalog/pg_tablesample_method.h   |  21 +-
 src/include/catalog/pg_transform.h            |   2 +-
 src/include/catalog/pg_trigger.h              |   2 +-
 src/include/catalog/pg_type.h                 |   6 +-
 src/include/commands/defrem.h                 |   6 +-
 src/include/commands/event_trigger.h          |   2 +-
 src/include/commands/explain.h                |   2 +-
 src/include/commands/vacuum.h                 |  20 +-
 src/include/common/fe_memutils.h              |   4 +-
 src/include/common/pg_lzcompress.h            |   2 +-
 src/include/common/restricted_token.h         |   8 +-
 src/include/common/string.h                   |   4 +-
 src/include/executor/executor.h               |   2 +-
 src/include/executor/hashjoin.h               |  18 +-
 src/include/fmgr.h                            |   2 +-
 src/include/funcapi.h                         |   2 +-
 src/include/lib/bipartite_match.h             |   2 +-
 src/include/lib/hyperloglog.h                 |   2 +-
 src/include/lib/pairingheap.h                 |  14 +-
 src/include/libpq/libpq-be.h                  |   4 +-
 src/include/libpq/libpq.h                     |  22 +-
 src/include/libpq/pqmq.h                      |   2 +-
 src/include/nodes/execnodes.h                 |  22 +-
 src/include/nodes/nodes.h                     |   6 +-
 src/include/nodes/parsenodes.h                |  57 +-
 src/include/nodes/plannodes.h                 |   6 +-
 src/include/nodes/primnodes.h                 |  11 +-
 src/include/optimizer/pathnode.h              |   2 +-
 src/include/optimizer/prep.h                  |   2 +-
 src/include/optimizer/tlist.h                 |   2 +-
 src/include/parser/parse_clause.h             |   2 +-
 src/include/parser/parse_func.h               |   6 +-
 src/include/parser/parse_relation.h           |  12 +-
 src/include/pgstat.h                          |  32 +-
 src/include/port/atomics.h                    |  39 +-
 src/include/port/atomics/arch-ia64.h          |   6 +-
 src/include/port/atomics/arch-x86.h           | 131 ++---
 src/include/port/atomics/fallback.h           |  25 +-
 src/include/port/atomics/generic-acc.h        |  45 +-
 src/include/port/atomics/generic-gcc.h        |  49 +-
 src/include/port/atomics/generic-msvc.h       |  25 +-
 src/include/port/atomics/generic-sunpro.h     |  31 +-
 src/include/port/atomics/generic-xlc.h        |  35 +-
 src/include/port/atomics/generic.h            |  52 +-
 src/include/port/pg_crc32c.h                  |   2 +-
 src/include/postmaster/bgworker.h             |   2 +-
 src/include/replication/origin.h              |  24 +-
 src/include/replication/output_plugin.h       |   2 +-
 src/include/replication/reorderbuffer.h       |   4 +-
 src/include/replication/walsender.h           |   2 +-
 src/include/rewrite/rowsecurity.h             |  28 +-
 src/include/storage/lmgr.h                    |   6 +-
 src/include/storage/lock.h                    |   2 +-
 src/include/storage/shm_mq.h                  |   6 +-
 src/include/tcop/deparse_utility.h            |  40 +-
 src/include/tcop/fastpath.h                   |   2 +-
 src/include/utils/acl.h                       |   2 +-
 src/include/utils/aclchk_internal.h           |   2 +-
 src/include/utils/builtins.h                  |   6 +-
 src/include/utils/guc.h                       |   3 +-
 src/include/utils/guc_tables.h                |   2 +-
 src/include/utils/jsonapi.h                   |   2 +-
 src/include/utils/jsonb.h                     |  10 +-
 src/include/utils/lsyscache.h                 |   4 +-
 src/include/utils/palloc.h                    |   2 +-
 src/include/utils/pg_crc.h                    |   2 +-
 src/include/utils/plancache.h                 |   2 +-
 src/include/utils/rls.h                       |  18 +-
 src/include/utils/ruleutils.h                 |   2 +-
 src/include/utils/sampling.h                  |  10 +-
 src/include/utils/selfuncs.h                  |   2 +-
 src/include/utils/snapshot.h                  |   2 +-
 src/include/utils/sortsupport.h               |  83 +--
 src/interfaces/ecpg/ecpglib/data.c            |   3 +-
 src/interfaces/ecpg/ecpglib/execute.c         |  14 +-
 src/interfaces/ecpg/ecpglib/memory.c          |   2 +-
 src/interfaces/ecpg/preproc/parse.pl          |  21 +-
 src/interfaces/libpq/fe-connect.c             |  11 +-
 src/interfaces/libpq/fe-misc.c                |  14 +-
 src/interfaces/libpq/fe-secure-openssl.c      |  86 ++--
 src/interfaces/libpq/fe-secure.c              |  18 +-
 src/pl/plperl/plperl.c                        |   2 +-
 src/pl/plpython/plpy_procedure.c              |   5 +-
 src/pl/plpython/plpy_typeio.c                 |  90 ++--
 src/port/gettimeofday.c                       |  22 +-
 src/port/pg_crc32c_choose.c                   |   4 +-
 src/port/pg_crc32c_sse42.c                    |   3 +-
 src/port/win32setlocale.c                     |   8 +-
 .../test_ddl_deparse/test_ddl_deparse.c       |   8 +-
 .../modules/test_rls_hooks/test_rls_hooks.c   |  84 +--
 src/test/perl/TestLib.pm                      |  13 +-
 src/test/regress/pg_regress.c                 |  19 +-
 src/test/regress/regress.c                    |  22 +-
 src/test/ssl/ServerSetup.pm                   | 105 ++--
 src/test/ssl/t/001_ssltests.pl                | 110 ++--
 src/tools/msvc/Install.pm                     |  24 +-
 src/tools/msvc/Mkvcbuild.pm                   | 126 +++--
 src/tools/msvc/Project.pm                     |   1 +
 src/tools/msvc/Solution.pm                    |  19 +-
 src/tools/msvc/VCBuildProject.pm              |   2 +-
 src/tools/msvc/VSObjectFactory.pm             |   5 +-
 src/tools/msvc/config_default.pl              |  34 +-
 src/tools/msvc/vcregress.pl                   |  42 +-
 414 files changed, 5830 insertions(+), 5328 deletions(-)

diff --git a/contrib/btree_gin/btree_gin.c b/contrib/btree_gin/btree_gin.c
index 6e3bf172e50..f74e912ed74 100644
--- a/contrib/btree_gin/btree_gin.c
+++ b/contrib/btree_gin/btree_gin.c
@@ -113,12 +113,12 @@ gin_btree_compare_prefix(FunctionCallInfo fcinfo)
 				cmp;
 
 	cmp = DatumGetInt32(DirectFunctionCall2Coll(
-				data->typecmp,
-				PG_GET_COLLATION(),
-				(data->strategy == BTLessStrategyNumber ||
-				 data->strategy == BTLessEqualStrategyNumber)
-				 ? data->datum : a,
-				b));
+												data->typecmp,
+												PG_GET_COLLATION(),
+								   (data->strategy == BTLessStrategyNumber ||
+								 data->strategy == BTLessEqualStrategyNumber)
+												? data->datum : a,
+												b));
 
 	switch (data->strategy)
 	{
@@ -186,14 +186,14 @@ Datum																		\
 gin_extract_value_##type(PG_FUNCTION_ARGS)									\
 {																			\
 	return gin_btree_extract_value(fcinfo, is_varlena);						\
-}																			\
+}	\
 PG_FUNCTION_INFO_V1(gin_extract_query_##type);								\
 Datum																		\
 gin_extract_query_##type(PG_FUNCTION_ARGS)									\
 {																			\
 	return gin_btree_extract_query(fcinfo,									\
 								   is_varlena, leftmostvalue, typecmp);		\
-}																			\
+}	\
 PG_FUNCTION_INFO_V1(gin_compare_prefix_##type);								\
 Datum																		\
 gin_compare_prefix_##type(PG_FUNCTION_ARGS)									\
@@ -209,6 +209,7 @@ leftmostvalue_int2(void)
 {
 	return Int16GetDatum(SHRT_MIN);
 }
+
 GIN_SUPPORT(int2, false, leftmostvalue_int2, btint2cmp)
 
 static Datum
@@ -216,6 +217,7 @@ leftmostvalue_int4(void)
 {
 	return Int32GetDatum(INT_MIN);
 }
+
 GIN_SUPPORT(int4, false, leftmostvalue_int4, btint4cmp)
 
 static Datum
@@ -226,6 +228,7 @@ leftmostvalue_int8(void)
 	 */
 	return Int64GetDatum(SEQ_MINVALUE);
 }
+
 GIN_SUPPORT(int8, false, leftmostvalue_int8, btint8cmp)
 
 static Datum
@@ -233,6 +236,7 @@ leftmostvalue_float4(void)
 {
 	return Float4GetDatum(-get_float4_infinity());
 }
+
 GIN_SUPPORT(float4, false, leftmostvalue_float4, btfloat4cmp)
 
 static Datum
@@ -240,6 +244,7 @@ leftmostvalue_float8(void)
 {
 	return Float8GetDatum(-get_float8_infinity());
 }
+
 GIN_SUPPORT(float8, false, leftmostvalue_float8, btfloat8cmp)
 
 static Datum
@@ -250,6 +255,7 @@ leftmostvalue_money(void)
 	 */
 	return Int64GetDatum(SEQ_MINVALUE);
 }
+
 GIN_SUPPORT(money, false, leftmostvalue_money, cash_cmp)
 
 static Datum
@@ -257,6 +263,7 @@ leftmostvalue_oid(void)
 {
 	return ObjectIdGetDatum(0);
 }
+
 GIN_SUPPORT(oid, false, leftmostvalue_oid, btoidcmp)
 
 static Datum
@@ -264,6 +271,7 @@ leftmostvalue_timestamp(void)
 {
 	return TimestampGetDatum(DT_NOBEGIN);
 }
+
 GIN_SUPPORT(timestamp, false, leftmostvalue_timestamp, timestamp_cmp)
 
 GIN_SUPPORT(timestamptz, false, leftmostvalue_timestamp, timestamp_cmp)
@@ -273,6 +281,7 @@ leftmostvalue_time(void)
 {
 	return TimeADTGetDatum(0);
 }
+
 GIN_SUPPORT(time, false, leftmostvalue_time, time_cmp)
 
 static Datum
@@ -285,6 +294,7 @@ leftmostvalue_timetz(void)
 
 	return TimeTzADTPGetDatum(v);
 }
+
 GIN_SUPPORT(timetz, false, leftmostvalue_timetz, timetz_cmp)
 
 static Datum
@@ -292,6 +302,7 @@ leftmostvalue_date(void)
 {
 	return DateADTGetDatum(DATEVAL_NOBEGIN);
 }
+
 GIN_SUPPORT(date, false, leftmostvalue_date, date_cmp)
 
 static Datum
@@ -304,6 +315,7 @@ leftmostvalue_interval(void)
 	v->month = 0;
 	return IntervalPGetDatum(v);
 }
+
 GIN_SUPPORT(interval, false, leftmostvalue_interval, interval_cmp)
 
 static Datum
@@ -313,6 +325,7 @@ leftmostvalue_macaddr(void)
 
 	return MacaddrPGetDatum(v);
 }
+
 GIN_SUPPORT(macaddr, false, leftmostvalue_macaddr, macaddr_cmp)
 
 static Datum
@@ -320,6 +333,7 @@ leftmostvalue_inet(void)
 {
 	return DirectFunctionCall1(inet_in, CStringGetDatum("0.0.0.0/0"));
 }
+
 GIN_SUPPORT(inet, true, leftmostvalue_inet, network_cmp)
 
 GIN_SUPPORT(cidr, true, leftmostvalue_inet, network_cmp)
@@ -329,6 +343,7 @@ leftmostvalue_text(void)
 {
 	return PointerGetDatum(cstring_to_text_with_len("", 0));
 }
+
 GIN_SUPPORT(text, true, leftmostvalue_text, bttextcmp)
 
 static Datum
@@ -336,6 +351,7 @@ leftmostvalue_char(void)
 {
 	return CharGetDatum(SCHAR_MIN);
 }
+
 GIN_SUPPORT(char, false, leftmostvalue_char, btcharcmp)
 
 GIN_SUPPORT(bytea, true, leftmostvalue_text, byteacmp)
@@ -348,6 +364,7 @@ leftmostvalue_bit(void)
 							   ObjectIdGetDatum(0),
 							   Int32GetDatum(-1));
 }
+
 GIN_SUPPORT(bit, true, leftmostvalue_bit, bitcmp)
 
 static Datum
@@ -358,6 +375,7 @@ leftmostvalue_varbit(void)
 							   ObjectIdGetDatum(0),
 							   Int32GetDatum(-1));
 }
+
 GIN_SUPPORT(varbit, true, leftmostvalue_varbit, bitcmp)
 
 /*
@@ -402,4 +420,5 @@ leftmostvalue_numeric(void)
 {
 	return PointerGetDatum(NULL);
 }
+
 GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp)
diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c
index 5bfe659f917..99cb41f5f57 100644
--- a/contrib/btree_gist/btree_utils_num.c
+++ b/contrib/btree_gist/btree_utils_num.c
@@ -13,7 +13,7 @@
 GISTENTRY *
 gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo)
 {
-	GISTENTRY *retval;
+	GISTENTRY  *retval;
 
 	if (entry->leafkey)
 	{
diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c
index 78e8662addd..8105a3b0350 100644
--- a/contrib/btree_gist/btree_utils_var.c
+++ b/contrib/btree_gist/btree_utils_var.c
@@ -71,7 +71,7 @@ gbt_var_key_readable(const GBT_VARKEY *k)
  * Create a leaf-entry to store in the index, from a single Datum.
  */
 static GBT_VARKEY *
-gbt_var_key_from_datum(const struct varlena *u)
+gbt_var_key_from_datum(const struct varlena * u)
 {
 	int32		lowersize = VARSIZE(u);
 	GBT_VARKEY *r;
diff --git a/contrib/fuzzystrmatch/dmetaphone.c b/contrib/fuzzystrmatch/dmetaphone.c
index 7c8457e7344..147c8501ee8 100644
--- a/contrib/fuzzystrmatch/dmetaphone.c
+++ b/contrib/fuzzystrmatch/dmetaphone.c
@@ -195,7 +195,7 @@ dmetaphone_alt(PG_FUNCTION_ARGS)
  * in a case like this.
  */
 
-#define META_FREE(x) ((void)true) /* pfree((x)) */
+#define META_FREE(x) ((void)true)		/* pfree((x)) */
 #else							/* not defined DMETAPHONE_MAIN */
 
 /* use the standard malloc library when not running in PostgreSQL */
diff --git a/contrib/hstore/hstore_gist.c b/contrib/hstore/hstore_gist.c
index dde37fb6e60..0fb769de7da 100644
--- a/contrib/hstore/hstore_gist.c
+++ b/contrib/hstore/hstore_gist.c
@@ -72,7 +72,7 @@ typedef struct
 static pg_crc32
 crc32_sz(char *buf, int size)
 {
-	pg_crc32 crc;
+	pg_crc32	crc;
 
 	INIT_TRADITIONAL_CRC32(crc);
 	COMP_TRADITIONAL_CRC32(crc, buf, size);
diff --git a/contrib/hstore_plperl/hstore_plperl.c b/contrib/hstore_plperl/hstore_plperl.c
index cdc224c30e0..dcc74b12e83 100644
--- a/contrib/hstore_plperl/hstore_plperl.c
+++ b/contrib/hstore_plperl/hstore_plperl.c
@@ -9,7 +9,7 @@ PG_MODULE_MAGIC;
 
 
 PG_FUNCTION_INFO_V1(hstore_to_plperl);
-Datum hstore_to_plperl(PG_FUNCTION_ARGS);
+Datum		hstore_to_plperl(PG_FUNCTION_ARGS);
 
 Datum
 hstore_to_plperl(PG_FUNCTION_ARGS)
@@ -26,10 +26,10 @@ hstore_to_plperl(PG_FUNCTION_ARGS)
 	for (i = 0; i < count; i++)
 	{
 		const char *key;
-		SV	   *value;
+		SV		   *value;
 
 		key = pnstrdup(HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
-		value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base,i), HS_VALLEN(entries, i)));
+		value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base, i), HS_VALLEN(entries, i)));
 
 		(void) hv_store(hv, key, strlen(key), value, 0);
 	}
@@ -39,7 +39,7 @@ hstore_to_plperl(PG_FUNCTION_ARGS)
 
 
 PG_FUNCTION_INFO_V1(plperl_to_hstore);
-Datum plperl_to_hstore(PG_FUNCTION_ARGS);
+Datum		plperl_to_hstore(PG_FUNCTION_ARGS);
 
 Datum
 plperl_to_hstore(PG_FUNCTION_ARGS)
@@ -61,8 +61,8 @@ plperl_to_hstore(PG_FUNCTION_ARGS)
 	i = 0;
 	while ((he = hv_iternext(hv)))
 	{
-		char	 *key = sv2cstr(HeSVKEY_force(he));
-		SV		 *value = HeVAL(he);
+		char	   *key = sv2cstr(HeSVKEY_force(he));
+		SV		   *value = HeVAL(he);
 
 		pairs[i].key = pstrdup(key);
 		pairs[i].keylen = hstoreCheckKeyLen(strlen(pairs[i].key));
diff --git a/contrib/hstore_plpython/hstore_plpython.c b/contrib/hstore_plpython/hstore_plpython.c
index 92cd4f800f6..94404a50617 100644
--- a/contrib/hstore_plpython/hstore_plpython.c
+++ b/contrib/hstore_plpython/hstore_plpython.c
@@ -8,7 +8,7 @@ PG_MODULE_MAGIC;
 
 
 PG_FUNCTION_INFO_V1(hstore_to_plpython);
-Datum hstore_to_plpython(PG_FUNCTION_ARGS);
+Datum		hstore_to_plpython(PG_FUNCTION_ARGS);
 
 Datum
 hstore_to_plpython(PG_FUNCTION_ARGS)
@@ -31,9 +31,9 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
 			PyDict_SetItem(dict, key, Py_None);
 		else
 		{
-			PyObject *value;
+			PyObject   *value;
 
-			value = PyString_FromStringAndSize(HS_VAL(entries, base,i), HS_VALLEN(entries, i));
+			value = PyString_FromStringAndSize(HS_VAL(entries, base, i), HS_VALLEN(entries, i));
 			PyDict_SetItem(dict, key, value);
 			Py_XDECREF(value);
 		}
@@ -45,7 +45,7 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
 
 
 PG_FUNCTION_INFO_V1(plpython_to_hstore);
-Datum plpython_to_hstore(PG_FUNCTION_ARGS);
+Datum		plpython_to_hstore(PG_FUNCTION_ARGS);
 
 Datum
 plpython_to_hstore(PG_FUNCTION_ARGS)
@@ -75,9 +75,9 @@ plpython_to_hstore(PG_FUNCTION_ARGS)
 
 		for (i = 0; i < pcount; i++)
 		{
-			PyObject *tuple;
-			PyObject *key;
-			PyObject *value;
+			PyObject   *tuple;
+			PyObject   *key;
+			PyObject   *value;
 
 			tuple = PyList_GetItem(items, i);
 			key = PyTuple_GetItem(tuple, 0);
diff --git a/contrib/ltree/crc32.c b/contrib/ltree/crc32.c
index 1c08d264f72..403dae0d7d4 100644
--- a/contrib/ltree/crc32.c
+++ b/contrib/ltree/crc32.c
@@ -26,13 +26,14 @@
 unsigned int
 ltree_crc32_sz(char *buf, int size)
 {
-	pg_crc32 crc;
+	pg_crc32	crc;
 	char	   *p = buf;
 
 	INIT_TRADITIONAL_CRC32(crc);
 	while (size > 0)
 	{
-		char c = (char) TOLOWER(*p);
+		char		c = (char) TOLOWER(*p);
+
 		COMP_TRADITIONAL_CRC32(crc, &c, 1);
 		size--;
 		p++;
diff --git a/contrib/ltree_plpython/ltree_plpython.c b/contrib/ltree_plpython/ltree_plpython.c
index 111e3e356e5..af166a720f0 100644
--- a/contrib/ltree_plpython/ltree_plpython.c
+++ b/contrib/ltree_plpython/ltree_plpython.c
@@ -7,7 +7,7 @@ PG_MODULE_MAGIC;
 
 
 PG_FUNCTION_INFO_V1(ltree_to_plpython);
-Datum ltree_to_plpython(PG_FUNCTION_ARGS);
+Datum		ltree_to_plpython(PG_FUNCTION_ARGS);
 
 Datum
 ltree_to_plpython(PG_FUNCTION_ARGS)
diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c
index bd3191d5d28..7adcfa89370 100644
--- a/contrib/pageinspect/brinfuncs.c
+++ b/contrib/pageinspect/brinfuncs.c
@@ -58,7 +58,7 @@ brin_page_type(PG_FUNCTION_ARGS)
 {
 	bytea	   *raw_page = PG_GETARG_BYTEA_P(0);
 	Page		page = VARDATA(raw_page);
-	char *type;
+	char	   *type;
 
 	switch (BrinPageType(page))
 	{
@@ -86,8 +86,8 @@ brin_page_type(PG_FUNCTION_ARGS)
 static Page
 verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
 {
-	Page	page;
-	int		raw_page_size;
+	Page		page;
+	int			raw_page_size;
 
 	raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
 
@@ -95,7 +95,7 @@ verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
 				 errmsg("input page too small"),
-				 errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
+			  errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
 
 	page = VARDATA(raw_page);
 
@@ -153,7 +153,7 @@ brin_page_items(PG_FUNCTION_ARGS)
 		indexRel = index_open(indexRelid, AccessShareLock);
 
 		state = palloc(offsetof(brin_page_state, columns) +
-					   sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
+			  sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
 
 		state->bdesc = brin_build_desc(indexRel);
 		state->page = page;
@@ -168,10 +168,10 @@ brin_page_items(PG_FUNCTION_ARGS)
 		 */
 		for (attno = 1; attno <= state->bdesc->bd_tupdesc->natts; attno++)
 		{
-			Oid		output;
-			bool	isVarlena;
+			Oid			output;
+			bool		isVarlena;
 			BrinOpcInfo *opcinfo;
-			int		i;
+			int			i;
 			brin_column_state *column;
 
 			opcinfo = state->bdesc->bd_info[attno - 1];
@@ -213,7 +213,7 @@ brin_page_items(PG_FUNCTION_ARGS)
 		 */
 		if (state->dtup == NULL)
 		{
-			BrinTuple	   *tup;
+			BrinTuple  *tup;
 			MemoryContext mctx;
 			ItemId		itemId;
 
@@ -225,8 +225,8 @@ brin_page_items(PG_FUNCTION_ARGS)
 			if (ItemIdIsUsed(itemId))
 			{
 				tup = (BrinTuple *) PageGetItem(state->page,
-											  PageGetItemId(state->page,
-															state->offset));
+												PageGetItemId(state->page,
+															  state->offset));
 				state->dtup = brin_deform_tuple(state->bdesc, tup);
 				state->attno = 1;
 				state->unusedItem = false;
@@ -253,7 +253,7 @@ brin_page_items(PG_FUNCTION_ARGS)
 		}
 		else
 		{
-			int		att = state->attno - 1;
+			int			att = state->attno - 1;
 
 			values[0] = UInt16GetDatum(state->offset);
 			values[1] = UInt32GetDatum(state->dtup->bt_blkno);
@@ -263,8 +263,8 @@ brin_page_items(PG_FUNCTION_ARGS)
 			values[5] = BoolGetDatum(state->dtup->bt_placeholder);
 			if (!state->dtup->bt_columns[att].bv_allnulls)
 			{
-				BrinValues   *bvalues = &state->dtup->bt_columns[att];
-				StringInfoData	s;
+				BrinValues *bvalues = &state->dtup->bt_columns[att];
+				StringInfoData s;
 				bool		first;
 				int			i;
 
@@ -274,7 +274,7 @@ brin_page_items(PG_FUNCTION_ARGS)
 				first = true;
 				for (i = 0; i < state->columns[att]->nstored; i++)
 				{
-					char   *val;
+					char	   *val;
 
 					if (!first)
 						appendStringInfoString(&s, " .. ");
@@ -312,8 +312,8 @@ brin_page_items(PG_FUNCTION_ARGS)
 		}
 
 		/*
-		 * If we're beyond the end of the page, set flag to end the function in
-		 * the following iteration.
+		 * If we're beyond the end of the page, set flag to end the function
+		 * in the following iteration.
 		 */
 		if (state->offset > PageGetMaxOffsetNumber(state->page))
 			state->done = true;
@@ -366,8 +366,8 @@ brin_revmap_data(PG_FUNCTION_ARGS)
 	struct
 	{
 		ItemPointerData *tids;
-		int		idx;
-	} *state;
+		int			idx;
+	}		   *state;
 	FuncCallContext *fctx;
 
 	if (!superuser())
diff --git a/contrib/pageinspect/ginfuncs.c b/contrib/pageinspect/ginfuncs.c
index 701b2ca763c..c0de3be8df8 100644
--- a/contrib/pageinspect/ginfuncs.c
+++ b/contrib/pageinspect/ginfuncs.c
@@ -167,7 +167,7 @@ typedef struct gin_leafpage_items_state
 	TupleDesc	tupd;
 	GinPostingList *seg;
 	GinPostingList *lastseg;
-}	gin_leafpage_items_state;
+} gin_leafpage_items_state;
 
 Datum
 gin_leafpage_items(PG_FUNCTION_ARGS)
diff --git a/contrib/pg_audit/pg_audit.c b/contrib/pg_audit/pg_audit.c
index 4b75fefc34a..a4b05a6df11 100644
--- a/contrib/pg_audit/pg_audit.c
+++ b/contrib/pg_audit/pg_audit.c
@@ -40,11 +40,11 @@
 
 PG_MODULE_MAGIC;
 
-void _PG_init(void);
+void		_PG_init(void);
 
 /* Prototypes for functions used with event triggers */
-Datum pg_audit_ddl_command_end(PG_FUNCTION_ARGS);
-Datum pg_audit_sql_drop(PG_FUNCTION_ARGS);
+Datum		pg_audit_ddl_command_end(PG_FUNCTION_ARGS);
+Datum		pg_audit_sql_drop(PG_FUNCTION_ARGS);
 
 PG_FUNCTION_INFO_V1(pg_audit_ddl_command_end);
 PG_FUNCTION_INFO_V1(pg_audit_sql_drop);
@@ -67,14 +67,14 @@ PG_FUNCTION_INFO_V1(pg_audit_sql_drop);
 #define LOG_ROLE		(1 << 4)	/* GRANT/REVOKE, CREATE/ALTER/DROP ROLE */
 #define LOG_WRITE		(1 << 5)	/* INSERT, UPDATE, DELETE, TRUNCATE */
 
-#define LOG_NONE		0			/* nothing */
+#define LOG_NONE		0		/* nothing */
 #define LOG_ALL			(0xFFFFFFFF)	/* All */
 
 /* GUC variable for pg_audit.log, which defines the classes to log. */
-char *auditLog = NULL;
+char	   *auditLog = NULL;
 
 /* Bitmap of classes selected */
-static int auditLogBitmap = LOG_NONE;
+static int	auditLogBitmap = LOG_NONE;
 
 /*
  * String constants for log classes - used when processing tokens in the
@@ -97,7 +97,7 @@ static int auditLogBitmap = LOG_NONE;
  * the query are in pg_catalog.  Interactive sessions (eg: psql) can cause
  * a lot of noise in the logs which might be uninteresting.
  */
-bool auditLogCatalog = true;
+bool		auditLogCatalog = true;
 
 /*
  * GUC variable for pg_audit.log_level
@@ -106,8 +106,8 @@ bool auditLogCatalog = true;
  * at.  The default level is LOG, which goes into the server log but does
  * not go to the client.  Set to NOTICE in the regression tests.
  */
-char *auditLogLevelString = NULL;
-int auditLogLevel = LOG;
+char	   *auditLogLevelString = NULL;
+int			auditLogLevel = LOG;
 
 /*
  * GUC variable for pg_audit.log_parameter
@@ -115,7 +115,7 @@ int auditLogLevel = LOG;
  * Administrators can choose if parameters passed into a statement are
  * included in the audit log.
  */
-bool auditLogParameter = false;
+bool		auditLogParameter = false;
 
 /*
  * GUC variable for pg_audit.log_relation
@@ -124,7 +124,7 @@ bool auditLogParameter = false;
  * in READ/WRITE class queries.  By default, SESSION logs include the query but
  * do not have a log entry for each relation.
  */
-bool auditLogRelation = false;
+bool		auditLogRelation = false;
 
 /*
  * GUC variable for pg_audit.log_statement_once
@@ -134,7 +134,7 @@ bool auditLogRelation = false;
  * the audit log to facilitate searching, but this can cause the log to be
  * unnecessairly bloated in some environments.
  */
-bool auditLogStatementOnce = false;
+bool		auditLogStatementOnce = false;
 
 /*
  * GUC variable for pg_audit.role
@@ -143,7 +143,7 @@ bool auditLogStatementOnce = false;
  * Object-level auditing uses the privileges which are granted to this role to
  * determine if a statement should be logged.
  */
-char *auditRole = NULL;
+char	   *auditRole = NULL;
 
 /*
  * String constants for the audit log fields.
@@ -213,23 +213,23 @@ char *auditRole = NULL;
  */
 typedef struct
 {
-	int64 statementId;			/* Simple counter */
-	int64 substatementId;		/* Simple counter */
+	int64		statementId;	/* Simple counter */
+	int64		substatementId; /* Simple counter */
 
 	LogStmtLevel logStmtLevel;	/* From GetCommandLogLevel when possible, */
-								/* generated when not. */
-	NodeTag commandTag;			/* same here */
+	/* generated when not. */
+	NodeTag		commandTag;		/* same here */
 	const char *command;		/* same here */
 	const char *objectType;		/* From event trigger when possible */
-								/* generated when not. */
-	char *objectName;			/* Fully qualified object identification */
+	/* generated when not. */
+	char	   *objectName;		/* Fully qualified object identification */
 	const char *commandText;	/* sourceText / queryString */
 	ParamListInfo paramList;	/* QueryDesc/ProcessUtility parameters */
 
-	bool granted;				/* Audit role has object permissions? */
-	bool logged;				/* Track if we have logged this event, used */
-								/* post-ProcessUtility to make sure we log */
-	bool statementLogged;		/* Track if we have logged the statement */
+	bool		granted;		/* Audit role has object permissions? */
+	bool		logged;			/* Track if we have logged this event, used */
+	/* post-ProcessUtility to make sure we log */
+	bool		statementLogged;	/* Track if we have logged the statement */
 } AuditEvent;
 
 /*
@@ -239,9 +239,9 @@ typedef struct AuditEventStackItem
 {
 	struct AuditEventStackItem *next;
 
-	AuditEvent auditEvent;
+	AuditEvent	auditEvent;
 
-	int64 stackId;
+	int64		stackId;
 
 	MemoryContext contextAudit;
 	MemoryContextCallback contextCallback;
@@ -288,7 +288,7 @@ stack_free(void *stackFree)
 	while (nextItem != NULL)
 	{
 		/* Check if this item matches the item to be freed */
-		if (nextItem == (AuditEventStackItem *)stackFree)
+		if (nextItem == (AuditEventStackItem *) stackFree)
 		{
 			/* Move top of stack to the item after the freed item */
 			auditEventStack = nextItem->next;
@@ -309,7 +309,8 @@ stack_free(void *stackFree)
 				substatementTotal = 0;
 
 				/*
-				 * Reset statement logged so that next statement will be logged.
+				 * Reset statement logged so that next statement will be
+				 * logged.
 				 */
 				statementLogged = false;
 			}
@@ -356,7 +357,7 @@ stack_push()
 	 * the stack at this item.
 	 */
 	stackItem->contextCallback.func = stack_free;
-	stackItem->contextCallback.arg = (void *)stackItem;
+	stackItem->contextCallback.arg = (void *) stackItem;
 	MemoryContextRegisterResetCallback(contextAudit,
 									   &stackItem->contextCallback);
 
@@ -431,7 +432,7 @@ append_valid_csv(StringInfoData *buffer, const char *appendStr)
 
 		for (pChar = appendStr; *pChar; pChar++)
 		{
-			if (*pChar == '"') /* double single quotes */
+			if (*pChar == '"')	/* double single quotes */
 				appendStringInfoCharMacro(buffer, *pChar);
 
 			appendStringInfoCharMacro(buffer, *pChar);
@@ -461,23 +462,23 @@ static void
 log_audit_event(AuditEventStackItem *stackItem)
 {
 	/* By default, put everything in the MISC class. */
-	int				class = LOG_MISC;
-	const char	   *className = CLASS_MISC;
-	MemoryContext	contextOld;
-	StringInfoData	auditStr;
+	int			class = LOG_MISC;
+	const char *className = CLASS_MISC;
+	MemoryContext contextOld;
+	StringInfoData auditStr;
 
 
 	/* Classify the statement using log stmt level and the command tag */
 	switch (stackItem->auditEvent.logStmtLevel)
 	{
-		/* All mods go in WRITE class, execpt EXECUTE */
+			/* All mods go in WRITE class, execpt EXECUTE */
 		case LOGSTMT_MOD:
 			className = CLASS_WRITE;
 			class = LOG_WRITE;
 
 			switch (stackItem->auditEvent.commandTag)
 			{
-				/* Currently, only EXECUTE is different */
+					/* Currently, only EXECUTE is different */
 				case T_ExecuteStmt:
 					className = CLASS_MISC;
 					class = LOG_MISC;
@@ -487,7 +488,7 @@ log_audit_event(AuditEventStackItem *stackItem)
 			}
 			break;
 
-		/* These are DDL, unless they are ROLE */
+			/* These are DDL, unless they are ROLE */
 		case LOGSTMT_DDL:
 			className = CLASS_DDL;
 			class = LOG_DDL;
@@ -495,7 +496,7 @@ log_audit_event(AuditEventStackItem *stackItem)
 			/* Identify role statements */
 			switch (stackItem->auditEvent.commandTag)
 			{
-				/* We know these are all role statements */
+					/* We know these are all role statements */
 				case T_GrantStmt:
 				case T_GrantRoleStmt:
 				case T_CreateRoleStmt:
@@ -505,11 +506,12 @@ log_audit_event(AuditEventStackItem *stackItem)
 					className = CLASS_ROLE;
 					class = LOG_ROLE;
 					break;
-				/*
-				 * Rename and Drop are general and therefore we have to do an
-				 * additional check against the command string to see if they
-				 * are role or regular DDL.
-				 */
+
+					/*
+					 * Rename and Drop are general and therefore we have to do
+					 * an additional check against the command string to see
+					 * if they are role or regular DDL.
+					 */
 				case T_RenameStmt:
 				case T_DropStmt:
 					if (pg_strcasecmp(stackItem->auditEvent.command,
@@ -527,11 +529,11 @@ log_audit_event(AuditEventStackItem *stackItem)
 			}
 			break;
 
-		/* Classify the rest */
+			/* Classify the rest */
 		case LOGSTMT_ALL:
 			switch (stackItem->auditEvent.commandTag)
 			{
-				/* READ statements */
+					/* READ statements */
 				case T_CopyStmt:
 				case T_SelectStmt:
 				case T_PrepareStmt:
@@ -540,7 +542,7 @@ log_audit_event(AuditEventStackItem *stackItem)
 					class = LOG_READ;
 					break;
 
-				/* FUNCTION statements */
+					/* FUNCTION statements */
 				case T_DoStmt:
 					className = CLASS_FUNCTION;
 					class = LOG_FUNCTION;
@@ -558,8 +560,8 @@ log_audit_event(AuditEventStackItem *stackItem)
 	/*
 	 * Only log the statement if:
 	 *
-	 * 1. If object was selected for audit logging (granted)
-	 * 2. The statement belongs to a class that is being logged
+	 * 1. If object was selected for audit logging (granted) 2. The statement
+	 * belongs to a class that is being logged
 	 *
 	 * If neither of these is true, return.
 	 */
@@ -615,10 +617,10 @@ log_audit_event(AuditEventStackItem *stackItem)
 		/* Handle parameter logging, if enabled. */
 		if (auditLogParameter)
 		{
-			int				paramIdx;
-			int				numParams;
-			StringInfoData	paramStrResult;
-			ParamListInfo	paramList = stackItem->auditEvent.paramList;
+			int			paramIdx;
+			int			numParams;
+			StringInfoData paramStrResult;
+			ParamListInfo paramList = stackItem->auditEvent.paramList;
 
 			numParams = paramList == NULL ? 0 : paramList->numParams;
 
@@ -630,9 +632,9 @@ log_audit_event(AuditEventStackItem *stackItem)
 				 paramIdx++)
 			{
 				ParamExternData *prm = &paramList->params[paramIdx];
-				Oid 			 typeOutput;
-				bool 			 typeIsVarLena;
-				char 			*paramStr;
+				Oid			typeOutput;
+				bool		typeIsVarLena;
+				char	   *paramStr;
 
 				/* Add a comma for each param */
 				if (paramIdx != 0)
@@ -663,7 +665,7 @@ log_audit_event(AuditEventStackItem *stackItem)
 	else
 		/* we were asked to not log it */
 		appendStringInfoString(&auditStr,
-				"<previously logged>,<previously logged>");
+							   "<previously logged>,<previously logged>");
 
 	/*
 	 * Log the audit entry.  Note: use of INT64_FORMAT here is bad for
@@ -696,7 +698,7 @@ audit_on_acl(Datum aclDatum,
 {
 	bool		result = false;
 	Acl		   *acl;
-	AclItem	   *aclItemData;
+	AclItem    *aclItemData;
 	int			aclIndex;
 	int			aclTotal;
 
@@ -710,7 +712,7 @@ audit_on_acl(Datum aclDatum,
 	/* Check privileges granted directly to auditOid */
 	for (aclIndex = 0; aclIndex < aclTotal; aclIndex++)
 	{
-		AclItem *aclItem = &aclItemData[aclIndex];
+		AclItem    *aclItem = &aclItemData[aclIndex];
 
 		if (aclItem->ai_grantee == auditOid &&
 			aclItem->ai_privs & mask)
@@ -731,7 +733,7 @@ audit_on_acl(Datum aclDatum,
 	{
 		for (aclIndex = 0; aclIndex < aclTotal; aclIndex++)
 		{
-			AclItem *aclItem = &aclItemData[aclIndex];
+			AclItem    *aclItem = &aclItemData[aclIndex];
 
 			/* Don't test public or auditOid (it has been tested already) */
 			if (aclItem->ai_grantee == ACL_ID_PUBLIC ||
@@ -838,9 +840,9 @@ audit_on_any_attribute(Oid relOid,
 					   Bitmapset *attributeSet,
 					   AclMode mode)
 {
-	bool result = false;
-	AttrNumber col;
-	Bitmapset *tmpSet;
+	bool		result = false;
+	AttrNumber	col;
+	Bitmapset  *tmpSet;
 
 	/* If bms is empty then check for any column match */
 	if (bms_is_empty(attributeSet))
@@ -891,9 +893,9 @@ audit_on_any_attribute(Oid relOid,
 static void
 log_select_dml(Oid auditOid, List *rangeTabls)
 {
-	ListCell *lr;
-	bool first = true;
-	bool found = false;
+	ListCell   *lr;
+	bool		first = true;
+	bool		found = false;
 
 	/* Do not log if this is an internal statement */
 	if (internalStatement)
@@ -901,8 +903,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
 
 	foreach(lr, rangeTabls)
 	{
-		Oid relOid;
-		Relation rel;
+		Oid			relOid;
+		Relation	rel;
 		RangeTblEntry *rte = lfirst(lr);
 
 		/* We only care about tables, and can ignore subqueries etc. */
@@ -912,8 +914,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
 		found = true;
 
 		/*
-		 * If we are not logging all-catalog queries (auditLogCatalog is false)
-		 * then filter out any system relations here.
+		 * If we are not logging all-catalog queries (auditLogCatalog is
+		 * false) then filter out any system relations here.
 		 */
 		relOid = rte->relid;
 		rel = relation_open(relOid, NoLock);
@@ -982,63 +984,72 @@ log_select_dml(Oid auditOid, List *rangeTabls)
 		{
 			case RELKIND_RELATION:
 				auditEventStack->auditEvent.objectType =
-					OBJECT_TYPE_TABLE;
+				OBJECT_TYPE_TABLE;
+
 				break;
 
 			case RELKIND_INDEX:
 				auditEventStack->auditEvent.objectType =
-					OBJECT_TYPE_INDEX;
+				OBJECT_TYPE_INDEX;
+
 				break;
 
 			case RELKIND_SEQUENCE:
 				auditEventStack->auditEvent.objectType =
-					OBJECT_TYPE_SEQUENCE;
+				OBJECT_TYPE_SEQUENCE;
+
 				break;
 
 			case RELKIND_TOASTVALUE:
 				auditEventStack->auditEvent.objectType =
-					OBJECT_TYPE_TOASTVALUE;
+				OBJECT_TYPE_TOASTVALUE;
+
 				break;
 
 			case RELKIND_VIEW:
 				auditEventStack->auditEvent.objectType =
-					OBJECT_TYPE_VIEW;
+				OBJECT_TYPE_VIEW;
+
 				break;
 
 			case RELKIND_COMPOSITE_TYPE:
 				auditEventStack->auditEvent.objectType =
-					OBJECT_TYPE_COMPOSITE_TYPE;
+				OBJECT_TYPE_COMPOSITE_TYPE;
+
 				break;
 
 			case RELKIND_FOREIGN_TABLE:
 				auditEventStack->auditEvent.objectType =
-					OBJECT_TYPE_FOREIGN_TABLE;
+				OBJECT_TYPE_FOREIGN_TABLE;
+
 				break;
 
 			case RELKIND_MATVIEW:
 				auditEventStack->auditEvent.objectType =
-					OBJECT_TYPE_MATVIEW;
+				OBJECT_TYPE_MATVIEW;
+
 				break;
 
 			default:
 				auditEventStack->auditEvent.objectType =
-					OBJECT_TYPE_UNKNOWN;
+				OBJECT_TYPE_UNKNOWN;
+
 				break;
 		}
 
 		/* Get a copy of the relation name and assign it to object name */
 		auditEventStack->auditEvent.objectName =
 			quote_qualified_identifier(get_namespace_name(
-									   RelationGetNamespace(rel)),
+												  RelationGetNamespace(rel)),
 									   RelationGetRelationName(rel));
 		relation_close(rel, NoLock);
 
 		/* Perform object auditing only if the audit role is valid */
 		if (auditOid != InvalidOid)
 		{
-			AclMode auditPerms =
-				(ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
-				rte->requiredPerms;
+			AclMode		auditPerms =
+			(ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
+			rte->requiredPerms;
 
 			/*
 			 * If any of the required permissions for the relation are granted
@@ -1104,8 +1115,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
 
 	/*
 	 * If no tables were found that means that RangeTbls was empty or all
-	 * relations were in the system schema.  In that case still log a
-	 * session record.
+	 * relations were in the system schema.  In that case still log a session
+	 * record.
 	 */
 	if (!found)
 	{
@@ -1123,7 +1134,7 @@ log_select_dml(Oid auditOid, List *rangeTabls)
 static void
 log_function_execute(Oid objectId)
 {
-	HeapTuple proctup;
+	HeapTuple	proctup;
 	Form_pg_proc proc;
 	AuditEventStackItem *stackItem;
 
@@ -1159,6 +1170,7 @@ log_function_execute(Oid objectId)
 	stackItem->auditEvent.commandTag = T_DoStmt;
 	stackItem->auditEvent.command = COMMAND_EXECUTE;
 	stackItem->auditEvent.objectType = OBJECT_TYPE_FUNCTION;
+
 	stackItem->auditEvent.commandText = stackItem->next->auditEvent.commandText;
 
 	log_audit_event(stackItem);
@@ -1236,9 +1248,9 @@ pg_audit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags)
 		standard_ExecutorStart(queryDesc, eflags);
 
 	/*
-	 * Move the stack memory context to the query memory context.  This needs to
-	 * be done here because the query context does not exist before the call
-	 * to standard_ExecutorStart() but the stack item is required by
+	 * Move the stack memory context to the query memory context.  This needs
+	 * to be done here because the query context does not exist before the
+	 * call to standard_ExecutorStart() but the stack item is required by
 	 * pg_audit_ExecutorCheckPerms_hook() which is called during
 	 * standard_ExecutorStart().
 	 */
@@ -1253,7 +1265,7 @@ pg_audit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags)
 static bool
 pg_audit_ExecutorCheckPerms_hook(List *rangeTabls, bool abort)
 {
-	Oid auditOid;
+	Oid			auditOid;
 
 	/* Get the audit oid if the role exists */
 	auditOid = get_role_oid(auditRole, true);
@@ -1283,7 +1295,7 @@ pg_audit_ProcessUtility_hook(Node *parsetree,
 							 char *completionTag)
 {
 	AuditEventStackItem *stackItem = NULL;
-	int64 stackId = 0;
+	int64		stackId = 0;
 
 	/*
 	 * Don't audit substatements.  All the substatements we care about should
@@ -1328,19 +1340,22 @@ pg_audit_ProcessUtility_hook(Node *parsetree,
 								params, dest, completionTag);
 
 	/*
-	 * Process the audit event if there is one.  Also check that this event was
-	 * not popped off the stack by a memory context being free'd elsewhere.
+	 * Process the audit event if there is one.  Also check that this event
+	 * was not popped off the stack by a memory context being free'd
+	 * elsewhere.
 	 */
 	if (stackItem && !IsAbortedTransactionBlockState())
 	{
 		/*
-		 * Make sure the item we want to log is still on the stack - if not then
-		 * something has gone wrong and an error will be raised.
+		 * Make sure the item we want to log is still on the stack - if not
+		 * then something has gone wrong and an error will be raised.
 		 */
 		stack_valid(stackId);
 
-		/* Log the utility command if logging is on, the command has not already
-		 * been logged by another hook, and the transaction is not aborted.
+		/*
+		 * Log the utility command if logging is on, the command has not
+		 * already been logged by another hook, and the transaction is not
+		 * aborted.
 		 */
 		if (auditLogBitmap != 0 && !stackItem->auditEvent.logged)
 			log_audit_event(stackItem);
@@ -1380,11 +1395,12 @@ Datum
 pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
 {
 	EventTriggerData *eventData;
-	int				  result, row;
-	TupleDesc		  spiTupDesc;
-	const char		 *query;
-	MemoryContext 	  contextQuery;
-	MemoryContext 	  contextOld;
+	int			result,
+				row;
+	TupleDesc	spiTupDesc;
+	const char *query;
+	MemoryContext contextQuery;
+	MemoryContext contextOld;
 
 	/* Continue only if session DDL logging is enabled */
 	if (~auditLogBitmap & LOG_DDL)
@@ -1393,7 +1409,7 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
 	/* Be sure the module was loaded */
 	if (!auditEventStack)
 		elog(ERROR, "pg_audit not loaded before call to "
-					"pg_audit_ddl_command_end()");
+			 "pg_audit_ddl_command_end()");
 
 	/* This is an internal statement - do not log it */
 	internalStatement = true;
@@ -1404,11 +1420,11 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
 
 	/* Switch memory context for query */
 	contextQuery = AllocSetContextCreate(
-					CurrentMemoryContext,
-					"pg_audit_func_ddl_command_end temporary context",
-					ALLOCSET_DEFAULT_MINSIZE,
-					ALLOCSET_DEFAULT_INITSIZE,
-					ALLOCSET_DEFAULT_MAXSIZE);
+										 CurrentMemoryContext,
+						   "pg_audit_func_ddl_command_end temporary context",
+										 ALLOCSET_DEFAULT_MINSIZE,
+										 ALLOCSET_DEFAULT_INITSIZE,
+										 ALLOCSET_DEFAULT_MAXSIZE);
 	contextOld = MemoryContextSwitchTo(contextQuery);
 
 	/* Get information about triggered events */
@@ -1423,31 +1439,32 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
 
 	/* Return objects affected by the (non drop) DDL statement */
 	query = "SELECT UPPER(object_type), object_identity\n"
-			"  FROM pg_event_trigger_ddl_commands()";
+		"  FROM pg_event_trigger_ddl_commands()";
 
 	/* Attempt to connect */
 	result = SPI_connect();
 	if (result < 0)
 		elog(ERROR, "pg_audit_ddl_command_end: SPI_connect returned %d",
-					result);
+			 result);
 
 	/* Execute the query */
 	result = SPI_execute(query, true, 0);
 	if (result != SPI_OK_SELECT)
 		elog(ERROR, "pg_audit_ddl_command_end: SPI_execute returned %d",
-					result);
+			 result);
 
 	/* Iterate returned rows */
 	spiTupDesc = SPI_tuptable->tupdesc;
 	for (row = 0; row < SPI_processed; row++)
 	{
-		HeapTuple  spiTuple;
+		HeapTuple	spiTuple;
 
 		spiTuple = SPI_tuptable->vals[row];
 
 		/* Supply object name and type for audit event */
 		auditEventStack->auditEvent.objectType =
-			SPI_getvalue(spiTuple, spiTupDesc, 1);
+		SPI_getvalue(spiTuple, spiTupDesc, 1);
+
 		auditEventStack->auditEvent.objectName =
 			SPI_getvalue(spiTuple, spiTupDesc, 2);
 
@@ -1473,11 +1490,12 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
 Datum
 pg_audit_sql_drop(PG_FUNCTION_ARGS)
 {
-	int				  result, row;
-	TupleDesc		  spiTupDesc;
-	const char		 *query;
-	MemoryContext 	  contextQuery;
-	MemoryContext 	  contextOld;
+	int			result,
+				row;
+	TupleDesc	spiTupDesc;
+	const char *query;
+	MemoryContext contextQuery;
+	MemoryContext contextOld;
 
 	if (~auditLogBitmap & LOG_DDL)
 		PG_RETURN_NULL();
@@ -1485,7 +1503,7 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
 	/* Be sure the module was loaded */
 	if (!auditEventStack)
 		elog(ERROR, "pg_audit not loaded before call to "
-					"pg_audit_sql_drop()");
+			 "pg_audit_sql_drop()");
 
 	/* This is an internal statement - do not log it */
 	internalStatement = true;
@@ -1496,44 +1514,45 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
 
 	/* Switch memory context for the query */
 	contextQuery = AllocSetContextCreate(
-					CurrentMemoryContext,
-					"pg_audit_func_ddl_command_end temporary context",
-					ALLOCSET_DEFAULT_MINSIZE,
-					ALLOCSET_DEFAULT_INITSIZE,
-					ALLOCSET_DEFAULT_MAXSIZE);
+										 CurrentMemoryContext,
+						   "pg_audit_func_ddl_command_end temporary context",
+										 ALLOCSET_DEFAULT_MINSIZE,
+										 ALLOCSET_DEFAULT_INITSIZE,
+										 ALLOCSET_DEFAULT_MAXSIZE);
 	contextOld = MemoryContextSwitchTo(contextQuery);
 
 	/* Return objects affected by the drop statement */
 	query = "SELECT UPPER(object_type),\n"
-			"       object_identity\n"
-			"  FROM pg_event_trigger_dropped_objects()\n"
-			" WHERE lower(object_type) <> 'type'\n"
-			"   AND schema_name <> 'pg_toast'";
+		"       object_identity\n"
+		"  FROM pg_event_trigger_dropped_objects()\n"
+		" WHERE lower(object_type) <> 'type'\n"
+		"   AND schema_name <> 'pg_toast'";
 
 	/* Attempt to connect */
 	result = SPI_connect();
 	if (result < 0)
 		elog(ERROR, "pg_audit_ddl_drop: SPI_connect returned %d",
-					result);
+			 result);
 
 	/* Execute the query */
 	result = SPI_execute(query, true, 0);
 	if (result != SPI_OK_SELECT)
 		elog(ERROR, "pg_audit_ddl_drop: SPI_execute returned %d",
-					result);
+			 result);
 
 	/* Iterate returned rows */
 	spiTupDesc = SPI_tuptable->tupdesc;
 	for (row = 0; row < SPI_processed; row++)
 	{
-		HeapTuple  spiTuple;
+		HeapTuple	spiTuple;
 
 		spiTuple = SPI_tuptable->vals[row];
 
 		auditEventStack->auditEvent.objectType =
-			SPI_getvalue(spiTuple, spiTupDesc, 1);
+		SPI_getvalue(spiTuple, spiTupDesc, 1);
+
 		auditEventStack->auditEvent.objectName =
-				SPI_getvalue(spiTuple, spiTupDesc, 2);
+			SPI_getvalue(spiTuple, spiTupDesc, 2);
 
 		log_audit_event(auditEventStack);
 	}
@@ -1562,10 +1581,10 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
 static bool
 check_pg_audit_log(char **newVal, void **extra, GucSource source)
 {
-	List *flagRawList;
-	char *rawVal;
-	ListCell *lt;
-	int *flags;
+	List	   *flagRawList;
+	char	   *rawVal;
+	ListCell   *lt;
+	int		   *flags;
 
 	/* Make sure newval is a comma-separated list of tokens. */
 	rawVal = pstrdup(*newVal);
@@ -1581,18 +1600,18 @@ check_pg_audit_log(char **newVal, void **extra, GucSource source)
 	 * Check that we recognise each token, and add it to the bitmap we're
 	 * building up in a newly-allocated int *f.
 	 */
-	if (!(flags = (int *)malloc(sizeof(int))))
+	if (!(flags = (int *) malloc(sizeof(int))))
 		return false;
 
 	*flags = 0;
 
 	foreach(lt, flagRawList)
 	{
-		bool subtract = false;
-		int class;
+		bool		subtract = false;
+		int			class;
 
 		/* Retrieve a token */
-		char *token = (char *)lfirst(lt);
+		char	   *token = (char *) lfirst(lt);
 
 		/* If token is preceded by -, then the token is subtractive */
 		if (strstr(token, "-") == token)
@@ -1651,7 +1670,7 @@ static void
 assign_pg_audit_log(const char *newVal, void *extra)
 {
 	if (extra)
-		auditLogBitmap = *(int *)extra;
+		auditLogBitmap = *(int *) extra;
 }
 
 /*
@@ -1662,10 +1681,10 @@ assign_pg_audit_log(const char *newVal, void *extra)
 static bool
 check_pg_audit_log_level(char **newVal, void **extra, GucSource source)
 {
-	int *logLevel;
+	int		   *logLevel;
 
 	/* Allocate memory to store the log level */
-	if (!(logLevel = (int *)malloc(sizeof(int))))
+	if (!(logLevel = (int *) malloc(sizeof(int))))
 		return false;
 
 	/* Find the log level enum */
@@ -1718,7 +1737,7 @@ static void
 assign_pg_audit_log_level(const char *newVal, void *extra)
 {
 	if (extra)
-		auditLogLevel = *(int *)extra;
+		auditLogLevel = *(int *) extra;
 }
 
 /*
@@ -1729,126 +1748,126 @@ _PG_init(void)
 {
 	/* Define pg_audit.log */
 	DefineCustomStringVariable(
-		"pg_audit.log",
-
-		"Specifies which classes of statements will be logged by session audit "
-		"logging. Multiple classes can be provided using a comma-separated "
-		"list and classes can be subtracted by prefacing the class with a "
-		"- sign.",
-
-		NULL,
-		&auditLog,
-		"none",
-		PGC_SUSET,
-		GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
-		check_pg_audit_log,
-		assign_pg_audit_log,
-		NULL);
+							   "pg_audit.log",
+
+	 "Specifies which classes of statements will be logged by session audit "
+		 "logging. Multiple classes can be provided using a comma-separated "
+		  "list and classes can be subtracted by prefacing the class with a "
+							   "- sign.",
+
+							   NULL,
+							   &auditLog,
+							   "none",
+							   PGC_SUSET,
+							   GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
+							   check_pg_audit_log,
+							   assign_pg_audit_log,
+							   NULL);
 
 	/* Define pg_audit.log_catalog */
 	DefineCustomBoolVariable(
-		"pg_audit.log_catalog",
+							 "pg_audit.log_catalog",
 
 		"Specifies that session logging should be enabled in the case where "
-		"all relations in a statement are in pg_catalog.  Disabling this "
-		"setting will reduce noise in the log from tools like psql and PgAdmin "
-		"that query the catalog heavily.",
+		   "all relations in a statement are in pg_catalog.  Disabling this "
+	 "setting will reduce noise in the log from tools like psql and PgAdmin "
+							 "that query the catalog heavily.",
 
-		NULL,
-		&auditLogCatalog,
-		true,
-		PGC_SUSET,
-		GUC_NOT_IN_SAMPLE,
-		NULL, NULL, NULL);
+							 NULL,
+							 &auditLogCatalog,
+							 true,
+							 PGC_SUSET,
+							 GUC_NOT_IN_SAMPLE,
+							 NULL, NULL, NULL);
 
 	/* Define pg_audit.log_level */
 	DefineCustomStringVariable(
-		"pg_audit.log_level",
-
-		"Specifies the log level that will be used for log entries. This "
-		"setting is used for regression testing and may also be useful to end "
-		"users for testing or other purposes.  It is not intended to be used "
-		"in a production environment as it may leak which statements are being "
-		"logged to the user.",
-
-		NULL,
-		&auditLogLevelString,
-		"log",
-		PGC_SUSET,
-		GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
-		check_pg_audit_log_level,
-		assign_pg_audit_log_level,
-		NULL);
+							   "pg_audit.log_level",
+
+		   "Specifies the log level that will be used for log entries. This "
+	  "setting is used for regression testing and may also be useful to end "
+	   "users for testing or other purposes.  It is not intended to be used "
+	 "in a production environment as it may leak which statements are being "
+							   "logged to the user.",
+
+							   NULL,
+							   &auditLogLevelString,
+							   "log",
+							   PGC_SUSET,
+							   GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
+							   check_pg_audit_log_level,
+							   assign_pg_audit_log_level,
+							   NULL);
 
 	/* Define pg_audit.log_parameter */
 	DefineCustomBoolVariable(
-		"pg_audit.log_parameter",
+							 "pg_audit.log_parameter",
 
-		"Specifies that audit logging should include the parameters that were "
-		"passed with the statement. When parameters are present they will be "
-		"be included in CSV format after the statement text.",
+	  "Specifies that audit logging should include the parameters that were "
+	   "passed with the statement. When parameters are present they will be "
+					   "be included in CSV format after the statement text.",
 
-		NULL,
-		&auditLogParameter,
-		false,
-		PGC_SUSET,
-		GUC_NOT_IN_SAMPLE,
-		NULL, NULL, NULL);
+							 NULL,
+							 &auditLogParameter,
+							 false,
+							 PGC_SUSET,
+							 GUC_NOT_IN_SAMPLE,
+							 NULL, NULL, NULL);
 
 	/* Define pg_audit.log_relation */
 	DefineCustomBoolVariable(
-		"pg_audit.log_relation",
+							 "pg_audit.log_relation",
 
-		"Specifies whether session audit logging should create a separate log "
-		"entry for each relation referenced in a SELECT or DML statement. "
-		"This is a useful shortcut for exhaustive logging without using object "
-		"audit logging.",
+	  "Specifies whether session audit logging should create a separate log "
+		  "entry for each relation referenced in a SELECT or DML statement. "
+	 "This is a useful shortcut for exhaustive logging without using object "
+							 "audit logging.",
 
-		NULL,
-		&auditLogRelation,
-		false,
-		PGC_SUSET,
-		GUC_NOT_IN_SAMPLE,
-		NULL, NULL, NULL);
+							 NULL,
+							 &auditLogRelation,
+							 false,
+							 PGC_SUSET,
+							 GUC_NOT_IN_SAMPLE,
+							 NULL, NULL, NULL);
 
 	/* Define pg_audit.log_statement_once */
 	DefineCustomBoolVariable(
-		"pg_audit.log_statement_once",
-
-		"Specifies whether logging will include the statement text and "
-		"parameters with the first log entry for a statement/substatement "
-		"combination or with every entry.  Disabling this setting will result "
-		"in less verbose logging but may make it more difficult to determine "
-		"the statement that generated a log entry, though the "
-		"statement/substatement pair along with the process id should suffice "
-		"to identify the statement text logged with a previous entry.",
-
-		NULL,
-		&auditLogStatementOnce,
-		false,
-		PGC_SUSET,
-		GUC_NOT_IN_SAMPLE,
-		NULL, NULL, NULL);
+							 "pg_audit.log_statement_once",
+
+			 "Specifies whether logging will include the statement text and "
+		  "parameters with the first log entry for a statement/substatement "
+	  "combination or with every entry.  Disabling this setting will result "
+	   "in less verbose logging but may make it more difficult to determine "
+					  "the statement that generated a log entry, though the "
+	  "statement/substatement pair along with the process id should suffice "
+			  "to identify the statement text logged with a previous entry.",
+
+							 NULL,
+							 &auditLogStatementOnce,
+							 false,
+							 PGC_SUSET,
+							 GUC_NOT_IN_SAMPLE,
+							 NULL, NULL, NULL);
 
 	/* Define pg_audit.role */
 	DefineCustomStringVariable(
-		"pg_audit.role",
+							   "pg_audit.role",
 
-		"Specifies the master role to use for object audit logging.  Muliple "
-		"audit roles can be defined by granting them to the master role. This "
-		"allows multiple groups to be in charge of different aspects of audit "
-		"logging.",
+	   "Specifies the master role to use for object audit logging.  Muliple "
+	  "audit roles can be defined by granting them to the master role. This "
+	  "allows multiple groups to be in charge of different aspects of audit "
+							   "logging.",
 
-		NULL,
-		&auditRole,
-		"",
-		PGC_SUSET,
-		GUC_NOT_IN_SAMPLE,
-		NULL, NULL, NULL);
+							   NULL,
+							   &auditRole,
+							   "",
+							   PGC_SUSET,
+							   GUC_NOT_IN_SAMPLE,
+							   NULL, NULL, NULL);
 
 	/*
-	 * Install our hook functions after saving the existing pointers to preserve
-	 * the chains.
+	 * Install our hook functions after saving the existing pointers to
+	 * preserve the chains.
 	 */
 	next_ExecutorStart_hook = ExecutorStart_hook;
 	ExecutorStart_hook = pg_audit_ExecutorStart_hook;
diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c
index 761c277c63b..6622d22f5f8 100644
--- a/contrib/pg_buffercache/pg_buffercache_pages.c
+++ b/contrib/pg_buffercache/pg_buffercache_pages.c
@@ -34,6 +34,7 @@ typedef struct
 	bool		isvalid;
 	bool		isdirty;
 	uint16		usagecount;
+
 	/*
 	 * An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
 	 * being pinned by too many backends and each backend will only pin once
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 3cc687bdb70..0eb991cdf0e 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -138,10 +138,10 @@ typedef struct Counters
 {
 	int64		calls;			/* # of times executed */
 	double		total_time;		/* total execution time, in msec */
-	double      min_time;       /* minimim execution time in msec */
-	double      max_time;       /* maximum execution time in msec */
-	double      mean_time;      /* mean execution time in msec */
-	double      sum_var_time;   /* sum of variances in execution time in msec */
+	double		min_time;		/* minimim execution time in msec */
+	double		max_time;		/* maximum execution time in msec */
+	double		mean_time;		/* mean execution time in msec */
+	double		sum_var_time;	/* sum of variances in execution time in msec */
 	int64		rows;			/* total # of retrieved or affected rows */
 	int64		shared_blks_hit;	/* # of shared buffer hits */
 	int64		shared_blks_read;		/* # of shared disk blocks read */
@@ -1231,10 +1231,10 @@ pgss_store(const char *query, uint32 queryId,
 		else
 		{
 			/*
-			 * Welford's method for accurately computing variance.
-			 * See <http://www.johndcook.com/blog/standard_deviation/>
+			 * Welford's method for accurately computing variance. See
+			 * <http://www.johndcook.com/blog/standard_deviation/>
 			 */
-			double old_mean = e->counters.mean_time;
+			double		old_mean = e->counters.mean_time;
 
 			e->counters.mean_time +=
 				(total_time - old_mean) / e->counters.calls;
@@ -1572,10 +1572,11 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
 			values[i++] = Float8GetDatumFast(tmp.min_time);
 			values[i++] = Float8GetDatumFast(tmp.max_time);
 			values[i++] = Float8GetDatumFast(tmp.mean_time);
+
 			/*
 			 * Note we are calculating the population variance here, not the
-			 * sample variance, as we have data for the whole population,
-			 * so Bessel's correction is not used, and we don't divide by
+			 * sample variance, as we have data for the whole population, so
+			 * Bessel's correction is not used, and we don't divide by
 			 * tmp.calls - 1.
 			 */
 			if (tmp.calls > 1)
@@ -2687,16 +2688,16 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
 			break;
 		case T_OnConflictExpr:
 			{
-				OnConflictExpr   *conf = (OnConflictExpr *) node;
+				OnConflictExpr *conf = (OnConflictExpr *) node;
 
 				APP_JUMB(conf->action);
 				JumbleExpr(jstate, (Node *) conf->arbiterElems);
 				JumbleExpr(jstate, conf->arbiterWhere);
-				JumbleExpr(jstate, (Node  *) conf->onConflictSet);
+				JumbleExpr(jstate, (Node *) conf->onConflictSet);
 				JumbleExpr(jstate, conf->onConflictWhere);
 				APP_JUMB(conf->constraint);
 				APP_JUMB(conf->exclRelIndex);
-				JumbleExpr(jstate, (Node  *) conf->exclRelTlist);
+				JumbleExpr(jstate, (Node *) conf->exclRelTlist);
 			}
 			break;
 		case T_List:
diff --git a/contrib/pgcrypto/pgp-armor.c b/contrib/pgcrypto/pgp-armor.c
index 24eb42fa891..5c8355808a9 100644
--- a/contrib/pgcrypto/pgp-armor.c
+++ b/contrib/pgcrypto/pgp-armor.c
@@ -399,7 +399,7 @@ pgp_extract_armor_headers(const uint8 *src, unsigned len,
 	char	   *line;
 	char	   *nextline;
 	char	   *eol,
-				*colon;
+			   *colon;
 	int			hlen;
 	char	   *buf;
 	int			hdrlines;
diff --git a/contrib/pgcrypto/pgp-pgsql.c b/contrib/pgcrypto/pgp-pgsql.c
index d0da05cd13a..1842985e53d 100644
--- a/contrib/pgcrypto/pgp-pgsql.c
+++ b/contrib/pgcrypto/pgp-pgsql.c
@@ -259,6 +259,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
 		res = pgp_set_convert_crlf(ctx, atoi(val));
 	else if (strcmp(key, "unicode-mode") == 0)
 		res = pgp_set_unicode_mode(ctx, atoi(val));
+
 	/*
 	 * The remaining options are for debugging/testing and are therefore not
 	 * documented in the user-facing docs.
@@ -834,22 +835,22 @@ static int
 parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
 					   char ***p_keys, char ***p_values)
 {
-	int		nkdims = ARR_NDIM(key_array);
-	int		nvdims = ARR_NDIM(val_array);
-	char   **keys,
-		   **values;
-	Datum  *key_datums,
-		   *val_datums;
-	bool   *key_nulls,
-		   *val_nulls;
-	int		key_count,
-			val_count;
-	int		i;
+	int			nkdims = ARR_NDIM(key_array);
+	int			nvdims = ARR_NDIM(val_array);
+	char	  **keys,
+			  **values;
+	Datum	   *key_datums,
+			   *val_datums;
+	bool	   *key_nulls,
+			   *val_nulls;
+	int			key_count,
+				val_count;
+	int			i;
 
 	if (nkdims > 1 || nkdims != nvdims)
 		ereport(ERROR,
 				(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
-				errmsg("wrong number of array subscripts")));
+				 errmsg("wrong number of array subscripts")));
 	if (nkdims == 0)
 		return 0;
 
@@ -871,7 +872,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
 
 	for (i = 0; i < key_count; i++)
 	{
-		char *v;
+		char	   *v;
 
 		/* Check that the key doesn't contain anything funny */
 		if (key_nulls[i])
@@ -884,7 +885,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
 		if (!string_is_ascii(v))
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("header key must not contain non-ASCII characters")));
+				errmsg("header key must not contain non-ASCII characters")));
 		if (strstr(v, ": "))
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -906,7 +907,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
 		if (!string_is_ascii(v))
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("header value must not contain non-ASCII characters")));
+			  errmsg("header value must not contain non-ASCII characters")));
 		if (strchr(v, '\n'))
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -1045,7 +1046,7 @@ pgp_armor_headers(PG_FUNCTION_ARGS)
 		SRF_RETURN_DONE(funcctx);
 	else
 	{
-		char	  *values[2];
+		char	   *values[2];
 
 		/* we assume that the keys (and values) are in UTF-8. */
 		utf8key = state->keys[funcctx->call_cntr];
diff --git a/contrib/pgcrypto/pgp.h b/contrib/pgcrypto/pgp.h
index 2ce429d1b20..62b8517c27c 100644
--- a/contrib/pgcrypto/pgp.h
+++ b/contrib/pgcrypto/pgp.h
@@ -278,11 +278,11 @@ void		pgp_cfb_free(PGP_CFB *ctx);
 int			pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
 int			pgp_cfb_decrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
 
-void		pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
-							 int num_headers, char **keys, char **values);
+void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
+				 int num_headers, char **keys, char **values);
 int			pgp_armor_decode(const uint8 *src, int len, StringInfo dst);
-int			pgp_extract_armor_headers(const uint8 *src, unsigned len,
-									  int *nheaders, char ***keys, char ***values);
+int pgp_extract_armor_headers(const uint8 *src, unsigned len,
+						  int *nheaders, char ***keys, char ***values);
 
 int			pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst);
 int			pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src);
diff --git a/contrib/pgstattuple/pgstatapprox.c b/contrib/pgstattuple/pgstatapprox.c
index ae5ed56f986..22c5f7a9eef 100644
--- a/contrib/pgstattuple/pgstatapprox.c
+++ b/contrib/pgstattuple/pgstatapprox.c
@@ -84,8 +84,8 @@ statapprox_heap(Relation rel, output_type *stat)
 		CHECK_FOR_INTERRUPTS();
 
 		/*
-		 * If the page has only visible tuples, then we can find out the
-		 * free space from the FSM and move on.
+		 * If the page has only visible tuples, then we can find out the free
+		 * space from the FSM and move on.
 		 */
 		if (visibilitymap_test(rel, blkno, &vmbuffer))
 		{
@@ -103,8 +103,8 @@ statapprox_heap(Relation rel, output_type *stat)
 		page = BufferGetPage(buf);
 
 		/*
-		 * It's not safe to call PageGetHeapFreeSpace() on new pages, so
-		 * we treat them as being free space for our purposes.
+		 * It's not safe to call PageGetHeapFreeSpace() on new pages, so we
+		 * treat them as being free space for our purposes.
 		 */
 		if (!PageIsNew(page))
 			stat->free_space += PageGetHeapFreeSpace(page);
@@ -120,9 +120,9 @@ statapprox_heap(Relation rel, output_type *stat)
 		scanned++;
 
 		/*
-		 * Look at each tuple on the page and decide whether it's live
-		 * or dead, then count it and its size. Unlike lazy_scan_heap,
-		 * we can afford to ignore problems and special cases.
+		 * Look at each tuple on the page and decide whether it's live or
+		 * dead, then count it and its size. Unlike lazy_scan_heap, we can
+		 * afford to ignore problems and special cases.
 		 */
 		maxoff = PageGetMaxOffsetNumber(page);
 
@@ -179,9 +179,10 @@ statapprox_heap(Relation rel, output_type *stat)
 		UnlockReleaseBuffer(buf);
 	}
 
-	stat->table_len = (uint64) nblocks * BLCKSZ;
+	stat->table_len = (uint64) nblocks *BLCKSZ;
+
 	stat->tuple_count = vac_estimate_reltuples(rel, false, nblocks, scanned,
-											   stat->tuple_count+misc_count);
+											 stat->tuple_count + misc_count);
 
 	/*
 	 * Calculate percentages if the relation has one or more pages.
@@ -240,9 +241,9 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
 				 errmsg("cannot access temporary tables of other sessions")));
 
 	/*
-	 * We support only ordinary relations and materialised views,
-	 * because we depend on the visibility map and free space map
-	 * for our estimates about unscanned pages.
+	 * We support only ordinary relations and materialised views, because we
+	 * depend on the visibility map and free space map for our estimates about
+	 * unscanned pages.
 	 */
 	if (!(rel->rd_rel->relkind == RELKIND_RELATION ||
 		  rel->rd_rel->relkind == RELKIND_MATVIEW))
@@ -268,6 +269,6 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
 	values[i++] = Int64GetDatum(stat.free_space);
 	values[i++] = Float8GetDatum(stat.free_percent);
 
-	ret =  heap_form_tuple(tupdesc, values, nulls);
+	ret = heap_form_tuple(tupdesc, values, nulls);
 	return HeapTupleGetDatum(ret);
 }
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index d420cb2d0c0..6da01e1d6f3 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -203,7 +203,7 @@ typedef struct PgFdwAnalyzeState
 	/* for random sampling */
 	double		samplerows;		/* # of rows fetched */
 	double		rowstoskip;		/* # of rows to skip before next sample */
-	ReservoirStateData rstate;		/* state for reservoir sampling*/
+	ReservoirStateData rstate;	/* state for reservoir sampling */
 
 	/* working memory contexts */
 	MemoryContext anl_cxt;		/* context for per-analyze lifespan data */
diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c
index ae2aca8a8db..32d57430185 100644
--- a/contrib/test_decoding/test_decoding.c
+++ b/contrib/test_decoding/test_decoding.c
@@ -53,16 +53,16 @@ static void pg_decode_shutdown(LogicalDecodingContext *ctx);
 static void pg_decode_begin_txn(LogicalDecodingContext *ctx,
 					ReorderBufferTXN *txn);
 static void pg_output_begin(LogicalDecodingContext *ctx,
-							TestDecodingData *data,
-							ReorderBufferTXN *txn,
-							bool last_write);
+				TestDecodingData *data,
+				ReorderBufferTXN *txn,
+				bool last_write);
 static void pg_decode_commit_txn(LogicalDecodingContext *ctx,
 					 ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
 static void pg_decode_change(LogicalDecodingContext *ctx,
 				 ReorderBufferTXN *txn, Relation rel,
 				 ReorderBufferChange *change);
 static bool pg_decode_filter(LogicalDecodingContext *ctx,
-							 RepOriginId origin_id);
+				 RepOriginId origin_id);
 
 void
 _PG_init(void)
diff --git a/contrib/tsm_system_rows/tsm_system_rows.c b/contrib/tsm_system_rows/tsm_system_rows.c
index 14efb27f0db..e325eaff498 100644
--- a/contrib/tsm_system_rows/tsm_system_rows.c
+++ b/contrib/tsm_system_rows/tsm_system_rows.c
@@ -33,14 +33,14 @@ PG_MODULE_MAGIC;
 typedef struct
 {
 	SamplerRandomState randstate;
-	uint32			seed;			/* random seed */
-	BlockNumber		nblocks;		/* number of block in relation */
-	int32			ntuples;		/* number of tuples to return */
-	int32			donetuples;		/* tuples already returned */
-	OffsetNumber	lt;				/* last tuple returned from current block */
-	BlockNumber		step;			/* step size */
-	BlockNumber		lb;				/* last block visited */
-	BlockNumber		doneblocks;		/* number of already returned blocks */
+	uint32		seed;			/* random seed */
+	BlockNumber nblocks;		/* number of block in relation */
+	int32		ntuples;		/* number of tuples to return */
+	int32		donetuples;		/* tuples already returned */
+	OffsetNumber lt;			/* last tuple returned from current block */
+	BlockNumber step;			/* step size */
+	BlockNumber lb;				/* last block visited */
+	BlockNumber doneblocks;		/* number of already returned blocks */
 } SystemSamplerData;
 
 
@@ -60,11 +60,11 @@ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
 Datum
 tsm_system_rows_init(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	uint32				seed = PG_GETARG_UINT32(1);
-	int32				ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
-	HeapScanDesc		scan = tsdesc->heapScan;
-	SystemSamplerData  *sampler;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	uint32		seed = PG_GETARG_UINT32(1);
+	int32		ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
+	HeapScanDesc scan = tsdesc->heapScan;
+	SystemSamplerData *sampler;
 
 	if (ntuples < 1)
 		ereport(ERROR,
@@ -86,6 +86,7 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
 
 	/* Find relative prime as step size for linear probing. */
 	sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+
 	/*
 	 * Randomize start position so that blocks close to step size don't have
 	 * higher probability of being chosen on very short scan.
@@ -106,8 +107,8 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
 Datum
 tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
 	sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
 	sampler->doneblocks++;
@@ -127,10 +128,10 @@ tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
 Datum
 tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	OffsetNumber		maxoffset = PG_GETARG_UINT16(2);
-	SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
-	OffsetNumber		tupoffset = sampler->lt;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+	SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+	OffsetNumber tupoffset = sampler->lt;
 
 	if (tupoffset == InvalidOffsetNumber)
 		tupoffset = FirstOffsetNumber;
@@ -152,9 +153,9 @@ tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
 Datum
 tsm_system_rows_examinetuple(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	bool				visible = PG_GETARG_BOOL(3);
-	SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	bool		visible = PG_GETARG_BOOL(3);
+	SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
 	if (!visible)
 		PG_RETURN_BOOL(false);
@@ -183,8 +184,8 @@ tsm_system_rows_end(PG_FUNCTION_ARGS)
 Datum
 tsm_system_rows_reset(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
 	sampler->lt = InvalidOffsetNumber;
 	sampler->donetuples = 0;
@@ -203,14 +204,14 @@ tsm_system_rows_reset(PG_FUNCTION_ARGS)
 Datum
 tsm_system_rows_cost(PG_FUNCTION_ARGS)
 {
-	PlannerInfo	   *root = (PlannerInfo *) PG_GETARG_POINTER(0);
-	Path		   *path = (Path *) PG_GETARG_POINTER(1);
-	RelOptInfo	   *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
-	List		   *args = (List *) PG_GETARG_POINTER(3);
-	BlockNumber	   *pages = (BlockNumber *) PG_GETARG_POINTER(4);
-	double		   *tuples = (double *) PG_GETARG_POINTER(5);
-	Node		   *limitnode;
-	int32			ntuples;
+	PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+	Path	   *path = (Path *) PG_GETARG_POINTER(1);
+	RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+	List	   *args = (List *) PG_GETARG_POINTER(3);
+	BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+	double	   *tuples = (double *) PG_GETARG_POINTER(5);
+	Node	   *limitnode;
+	int32		ntuples;
 
 	limitnode = linitial(args);
 	limitnode = estimate_expression_value(root, limitnode);
@@ -235,9 +236,9 @@ tsm_system_rows_cost(PG_FUNCTION_ARGS)
 
 
 static uint32
-gcd (uint32 a, uint32 b)
+gcd(uint32 a, uint32 b)
 {
-	uint32 c;
+	uint32		c;
 
 	while (a != 0)
 	{
@@ -253,8 +254,8 @@ static uint32
 random_relative_prime(uint32 n, SamplerRandomState randstate)
 {
 	/* Pick random starting number, with some limits on what it can be. */
-	uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
-		   t;
+	uint32		r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
+				t;
 
 	/*
 	 * This should only take 2 or 3 iterations as the probability of 2 numbers
diff --git a/contrib/tsm_system_time/tsm_system_time.c b/contrib/tsm_system_time/tsm_system_time.c
index 9af9e749216..7708fc07617 100644
--- a/contrib/tsm_system_time/tsm_system_time.c
+++ b/contrib/tsm_system_time/tsm_system_time.c
@@ -35,16 +35,17 @@ PG_MODULE_MAGIC;
 typedef struct
 {
 	SamplerRandomState randstate;
-	uint32			seed;			/* random seed */
-	BlockNumber		nblocks;		/* number of block in relation */
-	int32			time;			/* time limit for sampling */
-	TimestampTz		start_time;		/* start time of sampling */
-	TimestampTz		end_time;		/* end time of sampling */
-	OffsetNumber	lt;				/* last tuple returned from current block */
-	BlockNumber		step;			/* step size */
-	BlockNumber		lb;				/* last block visited */
-	BlockNumber		estblocks;		/* estimated number of returned blocks (moving) */
-	BlockNumber		doneblocks;		/* number of already returned blocks */
+	uint32		seed;			/* random seed */
+	BlockNumber nblocks;		/* number of block in relation */
+	int32		time;			/* time limit for sampling */
+	TimestampTz start_time;		/* start time of sampling */
+	TimestampTz end_time;		/* end time of sampling */
+	OffsetNumber lt;			/* last tuple returned from current block */
+	BlockNumber step;			/* step size */
+	BlockNumber lb;				/* last block visited */
+	BlockNumber estblocks;		/* estimated number of returned blocks
+								 * (moving) */
+	BlockNumber doneblocks;		/* number of already returned blocks */
 } SystemSamplerData;
 
 
@@ -63,11 +64,11 @@ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
 Datum
 tsm_system_time_init(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	uint32				seed = PG_GETARG_UINT32(1);
-	int32				time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
-	HeapScanDesc		scan = tsdesc->heapScan;
-	SystemSamplerData  *sampler;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	uint32		seed = PG_GETARG_UINT32(1);
+	int32		time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
+	HeapScanDesc scan = tsdesc->heapScan;
+	SystemSamplerData *sampler;
 
 	if (time < 1)
 		ereport(ERROR,
@@ -92,6 +93,7 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
 
 	/* Find relative prime as step size for linear probing. */
 	sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+
 	/*
 	 * Randomize start position so that blocks close to step size don't have
 	 * higher probability of being chosen on very short scan.
@@ -111,8 +113,8 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
 Datum
 tsm_system_time_nextblock(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
 	sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
 	sampler->doneblocks++;
@@ -125,16 +127,16 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
 	 * Update the estimations for time limit at least 10 times per estimated
 	 * number of returned blocks to handle variations in block read speed.
 	 */
-	if (sampler->doneblocks % Max(sampler->estblocks/10, 1) == 0)
+	if (sampler->doneblocks % Max(sampler->estblocks / 10, 1) == 0)
 	{
-		TimestampTz	now = GetCurrentTimestamp();
-		long        secs;
-		int         usecs;
+		TimestampTz now = GetCurrentTimestamp();
+		long		secs;
+		int			usecs;
 		int			usecs_remaining;
 		int			time_per_block;
 
 		TimestampDifference(sampler->start_time, now, &secs, &usecs);
-		usecs += (int) secs * 1000000;
+		usecs += (int) secs *1000000;
 
 		time_per_block = usecs / sampler->doneblocks;
 
@@ -144,7 +146,7 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
 			PG_RETURN_UINT32(InvalidBlockNumber);
 
 		/* Remaining microseconds */
-		usecs_remaining = usecs + (int) secs * 1000000;
+		usecs_remaining = usecs + (int) secs *1000000;
 
 		/* Recalculate estimated returned number of blocks */
 		if (time_per_block < usecs_remaining && time_per_block > 0)
@@ -161,10 +163,10 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
 Datum
 tsm_system_time_nexttuple(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	OffsetNumber		maxoffset = PG_GETARG_UINT16(2);
-	SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
-	OffsetNumber		tupoffset = sampler->lt;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+	SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+	OffsetNumber tupoffset = sampler->lt;
 
 	if (tupoffset == InvalidOffsetNumber)
 		tupoffset = FirstOffsetNumber;
@@ -198,8 +200,8 @@ tsm_system_time_end(PG_FUNCTION_ARGS)
 Datum
 tsm_system_time_reset(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
 	sampler->lt = InvalidOffsetNumber;
 	sampler->start_time = GetCurrentTimestamp();
@@ -221,18 +223,18 @@ tsm_system_time_reset(PG_FUNCTION_ARGS)
 Datum
 tsm_system_time_cost(PG_FUNCTION_ARGS)
 {
-	PlannerInfo	   *root = (PlannerInfo *) PG_GETARG_POINTER(0);
-	Path		   *path = (Path *) PG_GETARG_POINTER(1);
-	RelOptInfo	   *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
-	List		   *args = (List *) PG_GETARG_POINTER(3);
-	BlockNumber	   *pages = (BlockNumber *) PG_GETARG_POINTER(4);
-	double		   *tuples = (double *) PG_GETARG_POINTER(5);
-	Node		   *limitnode;
-	int32			time;
-	BlockNumber		relpages;
-	double			reltuples;
-	double			density;
-	double			spc_random_page_cost;
+	PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+	Path	   *path = (Path *) PG_GETARG_POINTER(1);
+	RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+	List	   *args = (List *) PG_GETARG_POINTER(3);
+	BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+	double	   *tuples = (double *) PG_GETARG_POINTER(5);
+	Node	   *limitnode;
+	int32		time;
+	BlockNumber relpages;
+	double		reltuples;
+	double		density;
+	double		spc_random_page_cost;
 
 	limitnode = linitial(args);
 	limitnode = estimate_expression_value(root, limitnode);
@@ -269,10 +271,10 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
 	/*
 	 * Assumption here is that we'll never read less than 1% of table pages,
 	 * this is here mainly because it is much less bad to overestimate than
-	 * underestimate and using just spc_random_page_cost will probably lead
-	 * to underestimations in general.
+	 * underestimate and using just spc_random_page_cost will probably lead to
+	 * underestimations in general.
 	 */
-	*pages = Min(baserel->pages, Max(time/spc_random_page_cost, baserel->pages/100));
+	*pages = Min(baserel->pages, Max(time / spc_random_page_cost, baserel->pages / 100));
 	*tuples = rint(density * (double) *pages * path->rows / baserel->tuples);
 	path->rows = *tuples;
 
@@ -280,9 +282,9 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
 }
 
 static uint32
-gcd (uint32 a, uint32 b)
+gcd(uint32 a, uint32 b)
 {
-	uint32 c;
+	uint32		c;
 
 	while (a != 0)
 	{
@@ -298,8 +300,8 @@ static uint32
 random_relative_prime(uint32 n, SamplerRandomState randstate)
 {
 	/* Pick random starting number, with some limits on what it can be. */
-	uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
-		   t;
+	uint32		r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
+				t;
 
 	/*
 	 * This should only take 2 or 3 iterations as the probability of 2 numbers
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 199512551e5..ff18b220c2b 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -387,7 +387,7 @@ bringetbitmap(PG_FUNCTION_ARGS)
 					 */
 					Assert((key->sk_flags & SK_ISNULL) ||
 						   (key->sk_collation ==
-					   bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
+					  bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
 
 					/* First time this column? look up consistent function */
 					if (consistentFn[keyattno - 1].fn_oid == InvalidOid)
@@ -523,10 +523,10 @@ brinbuildCallback(Relation index,
 	thisblock = ItemPointerGetBlockNumber(&htup->t_self);
 
 	/*
-	 * If we're in a block that belongs to a future range, summarize what we've
-	 * got and start afresh.  Note the scan might have skipped many pages,
-	 * if they were devoid of live tuples; make sure to insert index tuples
-	 * for those too.
+	 * If we're in a block that belongs to a future range, summarize what
+	 * we've got and start afresh.  Note the scan might have skipped many
+	 * pages, if they were devoid of live tuples; make sure to insert index
+	 * tuples for those too.
 	 */
 	while (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
 	{
@@ -660,7 +660,6 @@ brinbuild(PG_FUNCTION_ARGS)
 Datum
 brinbuildempty(PG_FUNCTION_ARGS)
 {
-
 	Relation	index = (Relation) PG_GETARG_POINTER(0);
 	Buffer		metabuf;
 
@@ -696,7 +695,7 @@ brinbulkdelete(PG_FUNCTION_ARGS)
 {
 	/* other arguments are not currently used */
 	IndexBulkDeleteResult *stats =
-		(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+	(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
 
 	/* allocate stats if first time through, else re-use existing struct */
 	if (stats == NULL)
@@ -714,7 +713,7 @@ brinvacuumcleanup(PG_FUNCTION_ARGS)
 {
 	IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
 	IndexBulkDeleteResult *stats =
-		(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+	(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
 	Relation	heapRel;
 
 	/* No-op in ANALYZE ONLY mode */
@@ -900,7 +899,7 @@ terminate_brin_buildstate(BrinBuildState *state)
 
 		page = BufferGetPage(state->bs_currentInsertBuf);
 		RecordPageWithFreeSpace(state->bs_irel,
-								BufferGetBlockNumber(state->bs_currentInsertBuf),
+							BufferGetBlockNumber(state->bs_currentInsertBuf),
 								PageGetFreeSpace(page));
 		ReleaseBuffer(state->bs_currentInsertBuf);
 	}
diff --git a/src/backend/access/brin/brin_inclusion.c b/src/backend/access/brin/brin_inclusion.c
index 1f0bc7fdb1f..803b07f10a9 100644
--- a/src/backend/access/brin/brin_inclusion.c
+++ b/src/backend/access/brin/brin_inclusion.c
@@ -61,11 +61,11 @@
  * 0 - the union of the values in the block range
  * 1 - whether an empty value is present in any tuple in the block range
  * 2 - whether the values in the block range cannot be merged (e.g. an IPv6
- *     address amidst IPv4 addresses).
+ *	   address amidst IPv4 addresses).
  */
-#define	INCLUSION_UNION				0
-#define	INCLUSION_UNMERGEABLE		1
-#define	INCLUSION_CONTAINS_EMPTY	2
+#define INCLUSION_UNION				0
+#define INCLUSION_UNMERGEABLE		1
+#define INCLUSION_CONTAINS_EMPTY	2
 
 
 typedef struct InclusionOpaque
@@ -294,22 +294,22 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
 	unionval = column->bv_values[INCLUSION_UNION];
 	switch (key->sk_strategy)
 	{
-		/*
-		 * Placement strategies
-		 *
-		 * These are implemented by logically negating the result of the
-		 * converse placement operator; for this to work, the converse operator
-		 * must be part of the opclass.  An error will be thrown by
-		 * inclusion_get_strategy_procinfo() if the required strategy is not
-		 * part of the opclass.
-		 *
-		 * These all return false if either argument is empty, so there is
-		 * no need to check for empty elements.
-		 */
+			/*
+			 * Placement strategies
+			 *
+			 * These are implemented by logically negating the result of the
+			 * converse placement operator; for this to work, the converse
+			 * operator must be part of the opclass.  An error will be thrown
+			 * by inclusion_get_strategy_procinfo() if the required strategy
+			 * is not part of the opclass.
+			 *
+			 * These all return false if either argument is empty, so there is
+			 * no need to check for empty elements.
+			 */
 
 		case RTLeftStrategyNumber:
 			finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
-													RTOverRightStrategyNumber);
+												  RTOverRightStrategyNumber);
 			result = FunctionCall2Coll(finfo, colloid, unionval, query);
 			PG_RETURN_BOOL(!DatumGetBool(result));
 
@@ -333,7 +333,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
 
 		case RTBelowStrategyNumber:
 			finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
-													RTOverAboveStrategyNumber);
+												  RTOverAboveStrategyNumber);
 			result = FunctionCall2Coll(finfo, colloid, unionval, query);
 			PG_RETURN_BOOL(!DatumGetBool(result));
 
@@ -351,7 +351,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
 
 		case RTAboveStrategyNumber:
 			finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
-													RTOverBelowStrategyNumber);
+												  RTOverBelowStrategyNumber);
 			result = FunctionCall2Coll(finfo, colloid, unionval, query);
 			PG_RETURN_BOOL(!DatumGetBool(result));
 
@@ -381,8 +381,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
 			 * strategies because some elements can be contained even though
 			 * the union is not; instead we use the overlap operator.
 			 *
-			 * We check for empty elements separately as they are not merged to
-			 * the union but contained by everything.
+			 * We check for empty elements separately as they are not merged
+			 * to the union but contained by everything.
 			 */
 
 		case RTContainedByStrategyNumber:
@@ -400,8 +400,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
 			/*
 			 * Adjacent strategy
 			 *
-			 * We test for overlap first but to be safe we need to call
-			 * the actual adjacent operator also.
+			 * We test for overlap first but to be safe we need to call the
+			 * actual adjacent operator also.
 			 *
 			 * An empty element cannot be adjacent to any other, so there is
 			 * no need to check for it.
@@ -426,8 +426,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
 			 * the contains operator.  Generally, inequality strategies do not
 			 * make much sense for the types which will be used with the
 			 * inclusion BRIN family of opclasses, but is is possible to
-			 * implement them with logical negation of the left-of and right-of
-			 * operators.
+			 * implement them with logical negation of the left-of and
+			 * right-of operators.
 			 *
 			 * NB: These strategies cannot be used with geometric datatypes
 			 * that use comparison of areas!  The only exception is the "same"
diff --git a/src/backend/access/brin/brin_minmax.c b/src/backend/access/brin/brin_minmax.c
index b105f980eca..7cd98887c0f 100644
--- a/src/backend/access/brin/brin_minmax.c
+++ b/src/backend/access/brin/brin_minmax.c
@@ -33,7 +33,7 @@ Datum		brin_minmax_add_value(PG_FUNCTION_ARGS);
 Datum		brin_minmax_consistent(PG_FUNCTION_ARGS);
 Datum		brin_minmax_union(PG_FUNCTION_ARGS);
 static FmgrInfo *minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno,
-					Oid subtype, uint16 strategynum);
+							 Oid subtype, uint16 strategynum);
 
 
 Datum
@@ -209,7 +209,7 @@ brin_minmax_consistent(PG_FUNCTION_ARGS)
 				break;
 			/* max() >= scankey */
 			finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype,
-												 BTGreaterEqualStrategyNumber);
+											   BTGreaterEqualStrategyNumber);
 			matches = FunctionCall2Coll(finfo, colloid, column->bv_values[1],
 										value);
 			break;
@@ -260,10 +260,10 @@ brin_minmax_union(PG_FUNCTION_ARGS)
 	attr = bdesc->bd_tupdesc->attrs[attno - 1];
 
 	/*
-	 * Adjust "allnulls".  If A doesn't have values, just copy the values
-	 * from B into A, and we're done.  We cannot run the operators in this
-	 * case, because values in A might contain garbage.  Note we already
-	 * established that B contains values.
+	 * Adjust "allnulls".  If A doesn't have values, just copy the values from
+	 * B into A, and we're done.  We cannot run the operators in this case,
+	 * because values in A might contain garbage.  Note we already established
+	 * that B contains values.
 	 */
 	if (col_a->bv_allnulls)
 	{
@@ -355,7 +355,7 @@ minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno, Oid subtype,
 				 strategynum, attr->atttypid, subtype, opfamily);
 
 		oprid = DatumGetObjectId(SysCacheGetAttr(AMOPSTRATEGY, tuple,
-												 Anum_pg_amop_amopopr, &isNull));
+											 Anum_pg_amop_amopopr, &isNull));
 		ReleaseSysCache(tuple);
 		Assert(!isNull && RegProcedureIsValid(oprid));
 
diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c
index 80795eca650..62d440f76b8 100644
--- a/src/backend/access/brin/brin_revmap.c
+++ b/src/backend/access/brin/brin_revmap.c
@@ -48,7 +48,7 @@ struct BrinRevmap
 {
 	Relation	rm_irel;
 	BlockNumber rm_pagesPerRange;
-	BlockNumber rm_lastRevmapPage; /* cached from the metapage */
+	BlockNumber rm_lastRevmapPage;		/* cached from the metapage */
 	Buffer		rm_metaBuf;
 	Buffer		rm_currBuf;
 };
@@ -57,7 +57,7 @@ struct BrinRevmap
 
 
 static BlockNumber revmap_get_blkno(BrinRevmap *revmap,
-				  BlockNumber heapBlk);
+				 BlockNumber heapBlk);
 static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk);
 static BlockNumber revmap_extend_and_get_blkno(BrinRevmap *revmap,
 							BlockNumber heapBlk);
@@ -110,7 +110,7 @@ brinRevmapTerminate(BrinRevmap *revmap)
 void
 brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
 {
-	BlockNumber	mapBlk PG_USED_FOR_ASSERTS_ONLY;
+	BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
 
 	mapBlk = revmap_extend_and_get_blkno(revmap, heapBlk);
 
@@ -245,7 +245,7 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk,
 		if (ItemPointerIsValid(&previptr) && ItemPointerEquals(&previptr, iptr))
 			ereport(ERROR,
 					(errcode(ERRCODE_INDEX_CORRUPTED),
-					 errmsg_internal("corrupted BRIN index: inconsistent range map")));
+			errmsg_internal("corrupted BRIN index: inconsistent range map")));
 		previptr = *iptr;
 
 		blk = ItemPointerGetBlockNumber(iptr);
@@ -356,7 +356,7 @@ revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)
 static BlockNumber
 revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
 {
-	BlockNumber	targetblk;
+	BlockNumber targetblk;
 
 	/* obtain revmap block number, skip 1 for metapage block */
 	targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
@@ -445,10 +445,10 @@ revmap_physical_extend(BrinRevmap *revmap)
 	if (!PageIsNew(page) && !BRIN_IS_REGULAR_PAGE(page))
 		ereport(ERROR,
 				(errcode(ERRCODE_INDEX_CORRUPTED),
-				 errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
-						BrinPageType(page),
-						RelationGetRelationName(irel),
-						BufferGetBlockNumber(buf))));
+		  errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
+				 BrinPageType(page),
+				 RelationGetRelationName(irel),
+				 BufferGetBlockNumber(buf))));
 
 	/* If the page is in use, evacuate it and restart */
 	if (brin_start_evacuating_page(irel, buf))
diff --git a/src/backend/access/brin/brin_tuple.c b/src/backend/access/brin/brin_tuple.c
index 22ce74a4f43..72356c066c7 100644
--- a/src/backend/access/brin/brin_tuple.c
+++ b/src/backend/access/brin/brin_tuple.c
@@ -68,7 +68,7 @@ brtuple_disk_tupdesc(BrinDesc *brdesc)
 		{
 			for (j = 0; j < brdesc->bd_info[i]->oi_nstored; j++)
 				TupleDescInitEntry(tupdesc, attno++, NULL,
-								   brdesc->bd_info[i]->oi_typcache[j]->type_id,
+								 brdesc->bd_info[i]->oi_typcache[j]->type_id,
 								   -1, 0);
 		}
 
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index 3e2b8b5fedf..54b2db88a68 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -1785,7 +1785,8 @@ gingetbitmap(PG_FUNCTION_ARGS)
 	/*
 	 * Set up the scan keys, and check for unsatisfiable query.
 	 */
-	ginFreeScanKeys(so); /* there should be no keys yet, but just to be sure */
+	ginFreeScanKeys(so);		/* there should be no keys yet, but just to be
+								 * sure */
 	ginNewScanKey(scan);
 
 	if (GinIsVoidRes(scan))
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 445466b4477..cb4e32fe66b 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -527,7 +527,7 @@ ginoptions(PG_FUNCTION_ARGS)
 	static const relopt_parse_elt tab[] = {
 		{"fastupdate", RELOPT_TYPE_BOOL, offsetof(GinOptions, useFastUpdate)},
 		{"gin_pending_list_limit", RELOPT_TYPE_INT, offsetof(GinOptions,
-																pendingListCleanupSize)}
+													 pendingListCleanupSize)}
 	};
 
 	options = parseRelOptions(reloptions, validate, RELOPT_KIND_GIN,
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 96b7701633f..0e499598a42 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -1407,7 +1407,7 @@ initGISTstate(Relation index)
 		/* opclasses are not required to provide a Fetch method */
 		if (OidIsValid(index_getprocid(index, i + 1, GIST_FETCH_PROC)))
 			fmgr_info_copy(&(giststate->fetchFn[i]),
-						 index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
+						   index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
 						   scanCxt);
 		else
 			giststate->fetchFn[i].fn_oid = InvalidOid;
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index beb402357c0..ad392948756 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -154,8 +154,8 @@ gistrescan(PG_FUNCTION_ARGS)
 	}
 
 	/*
-	 * If we're doing an index-only scan, on the first call, also initialize
-	 * a tuple descriptor to represent the returned index tuples and create a
+	 * If we're doing an index-only scan, on the first call, also initialize a
+	 * tuple descriptor to represent the returned index tuples and create a
 	 * memory context to hold them during the scan.
 	 */
 	if (scan->xs_want_itup && !scan->xs_itupdesc)
@@ -169,7 +169,7 @@ gistrescan(PG_FUNCTION_ARGS)
 		 * descriptor. Instead, construct a descriptor with the original data
 		 * types.
 		 */
-		natts =  RelationGetNumberOfAttributes(scan->indexRelation);
+		natts = RelationGetNumberOfAttributes(scan->indexRelation);
 		so->giststate->fetchTupdesc = CreateTemplateTupleDesc(natts, false);
 		for (attno = 1; attno <= natts; attno++)
 		{
@@ -288,9 +288,9 @@ gistrescan(PG_FUNCTION_ARGS)
 			fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
 
 			/*
-			 * Look up the datatype returned by the original ordering operator.
-			 * GiST always uses a float8 for the distance function, but the
-			 * ordering operator could be anything else.
+			 * Look up the datatype returned by the original ordering
+			 * operator. GiST always uses a float8 for the distance function,
+			 * but the ordering operator could be anything else.
 			 *
 			 * XXX: The distance function is only allowed to be lossy if the
 			 * ordering operator's result type is float4 or float8.  Otherwise
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index bf9fbf30a8b..7d596a3e2e6 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -583,7 +583,7 @@ gistFormTuple(GISTSTATE *giststate, Relation r,
 						  isleaf);
 			cep = (GISTENTRY *)
 				DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[i],
-										   giststate->supportCollation[i],
+											  giststate->supportCollation[i],
 												  PointerGetDatum(&centry)));
 			compatt[i] = cep->key;
 		}
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index cb86a4fa3e6..caacc105d25 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -80,7 +80,7 @@ bool		synchronize_seqscans = true;
 static HeapScanDesc heap_beginscan_internal(Relation relation,
 						Snapshot snapshot,
 						int nkeys, ScanKey key,
-						bool allow_strat, bool allow_sync, bool allow_pagemode,
+					  bool allow_strat, bool allow_sync, bool allow_pagemode,
 						bool is_bitmapscan, bool is_samplescan,
 						bool temp_snap);
 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
@@ -1366,8 +1366,8 @@ heap_beginscan_sampling(Relation relation, Snapshot snapshot,
 static HeapScanDesc
 heap_beginscan_internal(Relation relation, Snapshot snapshot,
 						int nkeys, ScanKey key,
-						bool allow_strat, bool allow_sync, bool allow_pagemode,
-						bool is_bitmapscan, bool is_samplescan, bool temp_snap)
+					  bool allow_strat, bool allow_sync, bool allow_pagemode,
+					  bool is_bitmapscan, bool is_samplescan, bool temp_snap)
 {
 	HeapScanDesc scan;
 
@@ -2284,9 +2284,9 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
 {
 	/*
 	 * For now, parallel operations are required to be strictly read-only.
-	 * Unlike heap_update() and heap_delete(), an insert should never create
-	 * a combo CID, so it might be possible to relax this restriction, but
-	 * not without more thought and testing.
+	 * Unlike heap_update() and heap_delete(), an insert should never create a
+	 * combo CID, so it might be possible to relax this restriction, but not
+	 * without more thought and testing.
 	 */
 	if (IsInParallelMode())
 		ereport(ERROR,
@@ -2768,8 +2768,8 @@ l1:
 		infomask = tp.t_data->t_infomask;
 
 		/*
-		 * Sleep until concurrent transaction ends -- except when there's a single
-		 * locker and it's our own transaction.  Note we don't care
+		 * Sleep until concurrent transaction ends -- except when there's a
+		 * single locker and it's our own transaction.  Note we don't care
 		 * which lock mode the locker has, because we need the strongest one.
 		 *
 		 * Before sleeping, we need to acquire tuple lock to establish our
@@ -2822,8 +2822,8 @@ l1:
 		else if (!TransactionIdIsCurrentTransactionId(xwait))
 		{
 			/*
-			 * Wait for regular transaction to end; but first, acquire
-			 * tuple lock.
+			 * Wait for regular transaction to end; but first, acquire tuple
+			 * lock.
 			 */
 			LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 			heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
@@ -3336,8 +3336,8 @@ l2:
 		 *
 		 * Before sleeping, we need to acquire tuple lock to establish our
 		 * priority for the tuple (see heap_lock_tuple).  LockTuple will
-		 * release us when we are next-in-line for the tuple.  Note we must not
-		 * acquire the tuple lock until we're sure we're going to sleep;
+		 * release us when we are next-in-line for the tuple.  Note we must
+		 * not acquire the tuple lock until we're sure we're going to sleep;
 		 * otherwise we're open for race conditions with other transactions
 		 * holding the tuple lock which sleep on us.
 		 *
@@ -3374,8 +3374,8 @@ l2:
 				 */
 				if (xmax_infomask_changed(oldtup.t_data->t_infomask,
 										  infomask) ||
-					!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
-										 xwait))
+				!TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+									 xwait))
 					goto l2;
 			}
 
@@ -3425,9 +3425,9 @@ l2:
 		else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
 		{
 			/*
-			 * If it's just a key-share locker, and we're not changing the
-			 * key columns, we don't need to wait for it to end; but we
-			 * need to preserve it as locker.
+			 * If it's just a key-share locker, and we're not changing the key
+			 * columns, we don't need to wait for it to end; but we need to
+			 * preserve it as locker.
 			 */
 			checked_lockers = true;
 			locker_remains = true;
@@ -3436,8 +3436,8 @@ l2:
 		else
 		{
 			/*
-			 * Wait for regular transaction to end; but first, acquire
-			 * tuple lock.
+			 * Wait for regular transaction to end; but first, acquire tuple
+			 * lock.
 			 */
 			LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 			heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
@@ -3454,7 +3454,7 @@ l2:
 			 */
 			if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
 				!TransactionIdEquals(xwait,
-									 HeapTupleHeaderGetRawXmax(oldtup.t_data)))
+								   HeapTupleHeaderGetRawXmax(oldtup.t_data)))
 				goto l2;
 
 			/* Otherwise check if it committed or aborted */
@@ -3779,7 +3779,7 @@ l2:
 		HeapTupleClearHeapOnly(newtup);
 	}
 
-	RelationPutHeapTuple(relation, newbuf, heaptup, false);	/* insert new tuple */
+	RelationPutHeapTuple(relation, newbuf, heaptup, false);		/* insert new tuple */
 
 	if (!already_marked)
 	{
@@ -4477,7 +4477,7 @@ l3:
 		if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
 			TransactionIdIsCurrentTransactionId(xwait))
 		{
-			 /* ... but if the xmax changed in the meantime, start over */
+			/* ... but if the xmax changed in the meantime, start over */
 			LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
 			if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
 				!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@@ -4501,8 +4501,8 @@ l3:
 			 * for the tuple.  We must do this even if we are share-locking.
 			 *
 			 * If we are forced to "start over" below, we keep the tuple lock;
-			 * this arranges that we stay at the head of the line while rechecking
-			 * tuple state.
+			 * this arranges that we stay at the head of the line while
+			 * rechecking tuple state.
 			 */
 			if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
 									  &have_tuple_lock))
@@ -4530,11 +4530,11 @@ l3:
 				{
 					case LockWaitBlock:
 						MultiXactIdWait((MultiXactId) xwait, status, infomask,
-										relation, &tuple->t_self, XLTW_Lock, NULL);
+								  relation, &tuple->t_self, XLTW_Lock, NULL);
 						break;
 					case LockWaitSkip:
 						if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
-														status, infomask, relation,
+												  status, infomask, relation,
 														NULL))
 						{
 							result = HeapTupleWouldBlock;
@@ -4545,12 +4545,12 @@ l3:
 						break;
 					case LockWaitError:
 						if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
-														status, infomask, relation,
+												  status, infomask, relation,
 														NULL))
 							ereport(ERROR,
 									(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
 									 errmsg("could not obtain lock on row in relation \"%s\"",
-											RelationGetRelationName(relation))));
+										RelationGetRelationName(relation))));
 
 						break;
 				}
@@ -4588,7 +4588,7 @@ l3:
 							ereport(ERROR,
 									(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
 									 errmsg("could not obtain lock on row in relation \"%s\"",
-											RelationGetRelationName(relation))));
+										RelationGetRelationName(relation))));
 						break;
 				}
 			}
@@ -4613,9 +4613,9 @@ l3:
 			LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
 
 			/*
-			 * xwait is done, but if xwait had just locked the tuple then
-			 * some other xact could update this tuple before we get to
-			 * this point.  Check for xmax change, and start over if so.
+			 * xwait is done, but if xwait had just locked the tuple then some
+			 * other xact could update this tuple before we get to this point.
+			 * Check for xmax change, and start over if so.
 			 */
 			if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
 				!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@@ -4628,9 +4628,9 @@ l3:
 				 * Otherwise check if it committed or aborted.  Note we cannot
 				 * be here if the tuple was only locked by somebody who didn't
 				 * conflict with us; that would have been handled above.  So
-				 * that transaction must necessarily be gone by now.  But don't
-				 * check for this in the multixact case, because some locker
-				 * transactions might still be running.
+				 * that transaction must necessarily be gone by now.  But
+				 * don't check for this in the multixact case, because some
+				 * locker transactions might still be running.
 				 */
 				UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
 			}
@@ -4810,8 +4810,8 @@ heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
 			if (!ConditionalLockTupleTuplock(relation, tid, mode))
 				ereport(ERROR,
 						(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
-						 errmsg("could not obtain lock on row in relation \"%s\"",
-								RelationGetRelationName(relation))));
+					errmsg("could not obtain lock on row in relation \"%s\"",
+						   RelationGetRelationName(relation))));
 			break;
 	}
 	*have_tuple_lock = true;
@@ -5513,8 +5513,8 @@ heap_finish_speculative(Relation relation, HeapTuple tuple)
 	MarkBufferDirty(buffer);
 
 	/*
-	 * Replace the speculative insertion token with a real t_ctid,
-	 * pointing to itself like it does on regular tuples.
+	 * Replace the speculative insertion token with a real t_ctid, pointing to
+	 * itself like it does on regular tuples.
 	 */
 	htup->t_ctid = tuple->t_self;
 
@@ -6447,23 +6447,23 @@ static bool
 DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
 						LockTupleMode lockmode)
 {
-	bool	allow_old;
-	int		nmembers;
+	bool		allow_old;
+	int			nmembers;
 	MultiXactMember *members;
-	bool	result = false;
-	LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
+	bool		result = false;
+	LOCKMODE	wanted = tupleLockExtraInfo[lockmode].hwlock;
 
 	allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
 	nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
 									 HEAP_XMAX_IS_LOCKED_ONLY(infomask));
 	if (nmembers >= 0)
 	{
-		int		i;
+		int			i;
 
 		for (i = 0; i < nmembers; i++)
 		{
-			TransactionId		memxid;
-			LOCKMODE			memlockmode;
+			TransactionId memxid;
+			LOCKMODE	memlockmode;
 
 			memlockmode = LOCKMODE_from_mxstatus(members[i].status);
 
@@ -7093,7 +7093,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
 	{
 		XLogRegisterBufData(0,
 							((char *) newtup->t_data) + SizeofHeapTupleHeader,
-							newtup->t_len - SizeofHeapTupleHeader - suffixlen);
+						  newtup->t_len - SizeofHeapTupleHeader - suffixlen);
 	}
 	else
 	{
@@ -7105,8 +7105,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
 		if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
 		{
 			XLogRegisterBufData(0,
-								((char *) newtup->t_data) + SizeofHeapTupleHeader,
-								newtup->t_data->t_hoff - SizeofHeapTupleHeader);
+						   ((char *) newtup->t_data) + SizeofHeapTupleHeader,
+							 newtup->t_data->t_hoff - SizeofHeapTupleHeader);
 		}
 
 		/* data after common prefix */
@@ -7289,8 +7289,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *
 		{
 			/*
 			 * The OID column can appear in an index definition, but that's
-			 * OK, because we always copy the OID if present (see below). Other
-			 * system columns may not.
+			 * OK, because we always copy the OID if present (see below).
+			 * Other system columns may not.
 			 */
 			if (attno == ObjectIdAttributeNumber)
 				continue;
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index a9f0ca35e49..6db73bf9d00 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -60,9 +60,9 @@ RelationPutHeapTuple(Relation relation,
 	ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
 
 	/*
-	 * Insert the correct position into CTID of the stored tuple, too
-	 * (unless this is a speculative insertion, in which case the token is
-	 * held in CTID field instead)
+	 * Insert the correct position into CTID of the stored tuple, too (unless
+	 * this is a speculative insertion, in which case the token is held in
+	 * CTID field instead)
 	 */
 	if (!token)
 	{
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index e6e4d28b74f..1043362f914 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -185,11 +185,11 @@ BuildIndexValueDescription(Relation indexRelation,
 	 * Check permissions- if the user does not have access to view all of the
 	 * key columns then return NULL to avoid leaking data.
 	 *
-	 * First check if RLS is enabled for the relation.  If so, return NULL
-	 * to avoid leaking data.
+	 * First check if RLS is enabled for the relation.  If so, return NULL to
+	 * avoid leaking data.
 	 *
-	 * Next we need to check table-level SELECT access and then, if
-	 * there is no access there, check column-level permissions.
+	 * Next we need to check table-level SELECT access and then, if there is
+	 * no access there, check column-level permissions.
 	 */
 
 	/*
@@ -215,18 +215,18 @@ BuildIndexValueDescription(Relation indexRelation,
 	if (aclresult != ACLCHECK_OK)
 	{
 		/*
-		 * No table-level access, so step through the columns in the
-		 * index and make sure the user has SELECT rights on all of them.
+		 * No table-level access, so step through the columns in the index and
+		 * make sure the user has SELECT rights on all of them.
 		 */
 		for (keyno = 0; keyno < idxrec->indnatts; keyno++)
 		{
 			AttrNumber	attnum = idxrec->indkey.values[keyno];
 
 			/*
-			 * Note that if attnum == InvalidAttrNumber, then this is an
-			 * index based on an expression and we return no detail rather
-			 * than try to figure out what column(s) the expression includes
-			 * and if the user has SELECT rights on them.
+			 * Note that if attnum == InvalidAttrNumber, then this is an index
+			 * based on an expression and we return no detail rather than try
+			 * to figure out what column(s) the expression includes and if the
+			 * user has SELECT rights on them.
 			 */
 			if (attnum == InvalidAttrNumber ||
 				pg_attribute_aclcheck(indrelid, attnum, GetUserId(),
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 4a60c5fa2c8..77c2fdf90b4 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -160,8 +160,8 @@ top:
 	 */
 	if (checkUnique != UNIQUE_CHECK_NO)
 	{
-		TransactionId	xwait;
-		uint32			speculativeToken;
+		TransactionId xwait;
+		uint32		speculativeToken;
 
 		offset = _bt_binsrch(rel, buf, natts, itup_scankey, false);
 		xwait = _bt_check_unique(rel, itup, heapRel, buf, offset, itup_scankey,
@@ -171,9 +171,10 @@ top:
 		{
 			/* Have to wait for the other guy ... */
 			_bt_relbuf(rel, buf);
+
 			/*
-			 * If it's a speculative insertion, wait for it to finish (ie.
-			 * to go ahead with the insertion, or kill the tuple).  Otherwise
+			 * If it's a speculative insertion, wait for it to finish (ie. to
+			 * go ahead with the insertion, or kill the tuple).  Otherwise
 			 * wait for the transaction to finish as usual.
 			 */
 			if (speculativeToken)
@@ -417,8 +418,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
 								(errcode(ERRCODE_UNIQUE_VIOLATION),
 								 errmsg("duplicate key value violates unique constraint \"%s\"",
 										RelationGetRelationName(rel)),
-								 key_desc ? errdetail("Key %s already exists.",
-													  key_desc) : 0,
+							   key_desc ? errdetail("Key %s already exists.",
+													key_desc) : 0,
 								 errtableconstraint(heapRel,
 											 RelationGetRelationName(rel))));
 					}
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 0f4128253f4..6e65db91eb5 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -1233,6 +1233,7 @@ _bt_pagedel(Relation rel, Buffer buf)
 					lbuf = _bt_getbuf(rel, leftsib, BT_READ);
 					lpage = BufferGetPage(lbuf);
 					lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
+
 					/*
 					 * If the left sibling is split again by another backend,
 					 * after we released the lock, we know that the first
@@ -1345,11 +1346,11 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
 	leafrightsib = opaque->btpo_next;
 
 	/*
-	 * Before attempting to lock the parent page, check that the right
-	 * sibling is not in half-dead state.  A half-dead right sibling would
-	 * have no downlink in the parent, which would be highly confusing later
-	 * when we delete the downlink that follows the current page's downlink.
-	 * (I believe the deletion would work correctly, but it would fail the
+	 * Before attempting to lock the parent page, check that the right sibling
+	 * is not in half-dead state.  A half-dead right sibling would have no
+	 * downlink in the parent, which would be highly confusing later when we
+	 * delete the downlink that follows the current page's downlink. (I
+	 * believe the deletion would work correctly, but it would fail the
 	 * cross-check we make that the following downlink points to the right
 	 * sibling of the delete page.)
 	 */
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index c2d52faa960..9431ab5d042 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -40,9 +40,8 @@ typedef struct
 	BTSpool    *spool;
 
 	/*
-	 * spool2 is needed only when the index is a unique index. Dead tuples
-	 * are put into spool2 instead of spool in order to avoid uniqueness
-	 * check.
+	 * spool2 is needed only when the index is a unique index. Dead tuples are
+	 * put into spool2 instead of spool in order to avoid uniqueness check.
 	 */
 	BTSpool    *spool2;
 	double		indtuples;
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index cfb1d64f86a..d69a0577a87 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -1027,10 +1027,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
 		offnum = OffsetNumberPrev(offnum);
 
 	/*
-	 * By here the scan position is now set for the first key.  If all
-	 * further tuples are expected to match we set the SK_BT_MATCHED flag
-	 * to avoid re-checking the scan key later.  This is a big win for
-	 * slow key matches though is still significant even for fast datatypes.
+	 * By here the scan position is now set for the first key.  If all further
+	 * tuples are expected to match we set the SK_BT_MATCHED flag to avoid
+	 * re-checking the scan key later.  This is a big win for slow key matches
+	 * though is still significant even for fast datatypes.
 	 */
 	switch (startKeys[0]->sk_strategy)
 	{
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 625f490af80..f95f67ad4b5 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -742,7 +742,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
 			{
 				for (i = 1; i <= keysz; i++)
 				{
-					SortSupport	entry;
+					SortSupport entry;
 					Datum		attrDatum1,
 								attrDatum2;
 					bool		isNull1,
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index d1589f05eff..91331bad651 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -1430,8 +1430,8 @@ _bt_checkkeys(IndexScanDesc scan,
 		Datum		test;
 
 		/*
-		 * If the scan key has already matched we can skip this key, as
-		 * long as the index tuple does not contain NULL values.
+		 * If the scan key has already matched we can skip this key, as long
+		 * as the index tuple does not contain NULL values.
 		 */
 		if (key->sk_flags & SK_BT_MATCHED && !IndexTupleHasNulls(tuple))
 			continue;
@@ -1740,7 +1740,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
  * any items from the page, and so there is no need to search left from the
  * recorded offset.  (This observation also guarantees that the item is still
  * the right one to delete, which might otherwise be questionable since heap
- * TIDs can get recycled.)  This holds true even if the page has been modified
+ * TIDs can get recycled.)	This holds true even if the page has been modified
  * by inserts and page splits, so there is no need to consult the LSN.
  *
  * If the pin was released after reading the page, then we re-read it.  If it
diff --git a/src/backend/access/rmgrdesc/committsdesc.c b/src/backend/access/rmgrdesc/committsdesc.c
index 088fd1bc8b6..59975eae9a6 100644
--- a/src/backend/access/rmgrdesc/committsdesc.c
+++ b/src/backend/access/rmgrdesc/committsdesc.c
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * committsdesc.c
- *    rmgr descriptor routines for access/transam/commit_ts.c
+ *	  rmgr descriptor routines for access/transam/commit_ts.c
  *
  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/committsdesc.c
+ *	  src/backend/access/rmgrdesc/committsdesc.c
  *
  *-------------------------------------------------------------------------
  */
@@ -41,7 +41,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
 	else if (info == COMMIT_TS_SETTS)
 	{
 		xl_commit_ts_set *xlrec = (xl_commit_ts_set *) rec;
-		int		nsubxids;
+		int			nsubxids;
 
 		appendStringInfo(buf, "set %s/%d for: %u",
 						 timestamptz_to_str(xlrec->timestamp),
@@ -51,7 +51,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
 					sizeof(TransactionId));
 		if (nsubxids > 0)
 		{
-			int		i;
+			int			i;
 			TransactionId *subxids;
 
 			subxids = palloc(sizeof(TransactionId) * nsubxids);
diff --git a/src/backend/access/rmgrdesc/replorigindesc.c b/src/backend/access/rmgrdesc/replorigindesc.c
index 19bae9a0f84..60cf0f679db 100644
--- a/src/backend/access/rmgrdesc/replorigindesc.c
+++ b/src/backend/access/rmgrdesc/replorigindesc.c
@@ -1,13 +1,13 @@
 /*-------------------------------------------------------------------------
  *
  * replorigindesc.c
- *    rmgr descriptor routines for replication/logical/replication_origin.c
+ *	  rmgr descriptor routines for replication/logical/replication_origin.c
  *
  * Portions Copyright (c) 2015, PostgreSQL Global Development Group
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/replorigindesc.c
+ *	  src/backend/access/rmgrdesc/replorigindesc.c
  *
  *-------------------------------------------------------------------------
  */
@@ -26,6 +26,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
 		case XLOG_REPLORIGIN_SET:
 			{
 				xl_replorigin_set *xlrec;
+
 				xlrec = (xl_replorigin_set *) rec;
 
 				appendStringInfo(buf, "set %u; lsn %X/%X; force: %d",
@@ -38,6 +39,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
 		case XLOG_REPLORIGIN_DROP:
 			{
 				xl_replorigin_drop *xlrec;
+
 				xlrec = (xl_replorigin_drop *) rec;
 
 				appendStringInfo(buf, "drop %u", xlrec->node_id);
diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c
index 793f9bb51fa..7b5f9830507 100644
--- a/src/backend/access/rmgrdesc/xactdesc.c
+++ b/src/backend/access/rmgrdesc/xactdesc.c
@@ -37,7 +37,8 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
 
 	memset(parsed, 0, sizeof(*parsed));
 
-	parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
+	parsed->xinfo = 0;			/* default, if no XLOG_XACT_HAS_INFO is
+								 * present */
 
 	parsed->xact_time = xlrec->xact_time;
 
@@ -62,7 +63,7 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
 
 	if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
 	{
-		xl_xact_subxacts   *xl_subxacts = (xl_xact_subxacts *) data;
+		xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
 
 		parsed->nsubxacts = xl_subxacts->nsubxacts;
 		parsed->subxacts = xl_subxacts->subxacts;
@@ -123,7 +124,8 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
 
 	memset(parsed, 0, sizeof(*parsed));
 
-	parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
+	parsed->xinfo = 0;			/* default, if no XLOG_XACT_HAS_INFO is
+								 * present */
 
 	parsed->xact_time = xlrec->xact_time;
 
@@ -138,7 +140,7 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
 
 	if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
 	{
-		xl_xact_subxacts   *xl_subxacts = (xl_xact_subxacts *) data;
+		xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
 
 		parsed->nsubxacts = xl_subxacts->nsubxacts;
 		parsed->subxacts = xl_subxacts->subxacts;
@@ -236,8 +238,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
 	{
 		appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
 						 origin_id,
-						 (uint32)(parsed.origin_lsn >> 32),
-						 (uint32)parsed.origin_lsn,
+						 (uint32) (parsed.origin_lsn >> 32),
+						 (uint32) parsed.origin_lsn,
 						 timestamptz_to_str(parsed.origin_timestamp));
 	}
 }
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index 06c6944fc72..8a0d9098c5e 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -658,6 +658,7 @@ Datum
 spgcanreturn(PG_FUNCTION_ARGS)
 {
 	Relation	index = (Relation) PG_GETARG_POINTER(0);
+
 	/* int			i = PG_GETARG_INT32(1); */
 	SpGistCache *cache;
 
diff --git a/src/backend/access/tablesample/bernoulli.c b/src/backend/access/tablesample/bernoulli.c
index c91f3f593e5..563a9168f0f 100644
--- a/src/backend/access/tablesample/bernoulli.c
+++ b/src/backend/access/tablesample/bernoulli.c
@@ -27,13 +27,15 @@
 /* tsdesc */
 typedef struct
 {
-	uint32 seed;				/* random seed */
-	BlockNumber startblock;		/* starting block, we use ths for syncscan support */
+	uint32		seed;			/* random seed */
+	BlockNumber startblock;		/* starting block, we use ths for syncscan
+								 * support */
 	BlockNumber nblocks;		/* number of blocks */
 	BlockNumber blockno;		/* current block */
-	float4 probability;			/* probabilty that tuple will be returned (0.0-1.0) */
+	float4		probability;	/* probabilty that tuple will be returned
+								 * (0.0-1.0) */
 	OffsetNumber lt;			/* last tuple returned from current block */
-	SamplerRandomState randstate; /* random generator tsdesc */
+	SamplerRandomState randstate;		/* random generator tsdesc */
 } BernoulliSamplerData;
 
 /*
@@ -42,10 +44,10 @@ typedef struct
 Datum
 tsm_bernoulli_init(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	uint32				seed = PG_GETARG_UINT32(1);
-	float4				percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
-	HeapScanDesc		scan = tsdesc->heapScan;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	uint32		seed = PG_GETARG_UINT32(1);
+	float4		percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
+	HeapScanDesc scan = tsdesc->heapScan;
 	BernoulliSamplerData *sampler;
 
 	if (percent < 0 || percent > 100)
@@ -77,14 +79,13 @@ tsm_bernoulli_init(PG_FUNCTION_ARGS)
 Datum
 tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc		   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	BernoulliSamplerData   *sampler =
-		(BernoulliSamplerData *) tsdesc->tsmdata;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	BernoulliSamplerData *sampler =
+	(BernoulliSamplerData *) tsdesc->tsmdata;
 
 	/*
-	 * Bernoulli sampling scans all blocks on the table and supports
-	 * syncscan so loop from startblock to startblock instead of
-	 * from 0 to nblocks.
+	 * Bernoulli sampling scans all blocks on the table and supports syncscan
+	 * so loop from startblock to startblock instead of from 0 to nblocks.
 	 */
 	if (sampler->blockno == InvalidBlockNumber)
 		sampler->blockno = sampler->startblock;
@@ -116,7 +117,7 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
  * tuples have same probability of being returned the visible and invisible
  * tuples will be returned in same ratio as they have in the actual table.
  * This means that there is no skew towards either visible or invisible tuples
- * and the  number returned visible tuples to from the executor node is the
+ * and the	number returned visible tuples to from the executor node is the
  * fraction of visible tuples which was specified in input.
  *
  * This is faster than doing the coinflip in the examinetuple because we don't
@@ -128,12 +129,12 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
 Datum
 tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc		   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	OffsetNumber			maxoffset = PG_GETARG_UINT16(2);
-	BernoulliSamplerData   *sampler =
-		(BernoulliSamplerData *) tsdesc->tsmdata;
-	OffsetNumber			tupoffset = sampler->lt;
-	float4					probability = sampler->probability;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+	BernoulliSamplerData *sampler =
+	(BernoulliSamplerData *) tsdesc->tsmdata;
+	OffsetNumber tupoffset = sampler->lt;
+	float4		probability = sampler->probability;
 
 	if (tupoffset == InvalidOffsetNumber)
 		tupoffset = FirstOffsetNumber;
@@ -142,8 +143,8 @@ tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
 
 	/*
 	 * Loop over tuple offsets until the random generator returns value that
-	 * is within the probability of returning the tuple or until we reach
-	 * end of the block.
+	 * is within the probability of returning the tuple or until we reach end
+	 * of the block.
 	 *
 	 * (This is our implementation of bernoulli trial)
 	 */
@@ -183,9 +184,9 @@ tsm_bernoulli_end(PG_FUNCTION_ARGS)
 Datum
 tsm_bernoulli_reset(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc		   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	BernoulliSamplerData   *sampler =
-		(BernoulliSamplerData *) tsdesc->tsmdata;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	BernoulliSamplerData *sampler =
+	(BernoulliSamplerData *) tsdesc->tsmdata;
 
 	sampler->blockno = InvalidBlockNumber;
 	sampler->lt = InvalidOffsetNumber;
@@ -200,14 +201,14 @@ tsm_bernoulli_reset(PG_FUNCTION_ARGS)
 Datum
 tsm_bernoulli_cost(PG_FUNCTION_ARGS)
 {
-	PlannerInfo	   *root = (PlannerInfo *) PG_GETARG_POINTER(0);
-	Path		   *path = (Path *) PG_GETARG_POINTER(1);
-	RelOptInfo	   *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
-	List		   *args = (List *) PG_GETARG_POINTER(3);
-	BlockNumber	   *pages = (BlockNumber *) PG_GETARG_POINTER(4);
-	double		   *tuples = (double *) PG_GETARG_POINTER(5);
-	Node		   *pctnode;
-	float4			samplesize;
+	PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+	Path	   *path = (Path *) PG_GETARG_POINTER(1);
+	RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+	List	   *args = (List *) PG_GETARG_POINTER(3);
+	BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+	double	   *tuples = (double *) PG_GETARG_POINTER(5);
+	Node	   *pctnode;
+	float4		samplesize;
 
 	*pages = baserel->pages;
 
diff --git a/src/backend/access/tablesample/system.c b/src/backend/access/tablesample/system.c
index 1412e511faf..1d834369a4b 100644
--- a/src/backend/access/tablesample/system.c
+++ b/src/backend/access/tablesample/system.c
@@ -31,9 +31,9 @@
 typedef struct
 {
 	BlockSamplerData bs;
-	uint32 seed;				/* random seed */
+	uint32		seed;			/* random seed */
 	BlockNumber nblocks;		/* number of block in relation */
-	int samplesize;				/* number of blocks to return */
+	int			samplesize;		/* number of blocks to return */
 	OffsetNumber lt;			/* last tuple returned from current block */
 } SystemSamplerData;
 
@@ -44,11 +44,11 @@ typedef struct
 Datum
 tsm_system_init(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	uint32				seed = PG_GETARG_UINT32(1);
-	float4				percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
-	HeapScanDesc		scan = tsdesc->heapScan;
-	SystemSamplerData  *sampler;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	uint32		seed = PG_GETARG_UINT32(1);
+	float4		percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
+	HeapScanDesc scan = tsdesc->heapScan;
+	SystemSamplerData *sampler;
 
 	if (percent < 0 || percent > 100)
 		ereport(ERROR,
@@ -80,9 +80,9 @@ tsm_system_init(PG_FUNCTION_ARGS)
 Datum
 tsm_system_nextblock(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
-	BlockNumber			blockno;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+	BlockNumber blockno;
 
 	if (!BlockSampler_HasMore(&sampler->bs))
 		PG_RETURN_UINT32(InvalidBlockNumber);
@@ -99,10 +99,10 @@ tsm_system_nextblock(PG_FUNCTION_ARGS)
 Datum
 tsm_system_nexttuple(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	OffsetNumber		maxoffset = PG_GETARG_UINT16(2);
-	SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
-	OffsetNumber		tupoffset = sampler->lt;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+	SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+	OffsetNumber tupoffset = sampler->lt;
 
 	if (tupoffset == InvalidOffsetNumber)
 		tupoffset = FirstOffsetNumber;
@@ -136,8 +136,8 @@ tsm_system_end(PG_FUNCTION_ARGS)
 Datum
 tsm_system_reset(PG_FUNCTION_ARGS)
 {
-	TableSampleDesc	   *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-	SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+	TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+	SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
 	sampler->lt = InvalidOffsetNumber;
 	BlockSampler_Init(&sampler->bs, sampler->nblocks, sampler->samplesize,
@@ -152,14 +152,14 @@ tsm_system_reset(PG_FUNCTION_ARGS)
 Datum
 tsm_system_cost(PG_FUNCTION_ARGS)
 {
-	PlannerInfo	   *root = (PlannerInfo *) PG_GETARG_POINTER(0);
-	Path		   *path = (Path *) PG_GETARG_POINTER(1);
-	RelOptInfo	   *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
-	List		   *args = (List *) PG_GETARG_POINTER(3);
-	BlockNumber	   *pages = (BlockNumber *) PG_GETARG_POINTER(4);
-	double		   *tuples = (double *) PG_GETARG_POINTER(5);
-	Node		   *pctnode;
-	float4			samplesize;
+	PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+	Path	   *path = (Path *) PG_GETARG_POINTER(1);
+	RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+	List	   *args = (List *) PG_GETARG_POINTER(3);
+	BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+	double	   *tuples = (double *) PG_GETARG_POINTER(5);
+	Node	   *pctnode;
+	float4		samplesize;
 
 	pctnode = linitial(args);
 	pctnode = estimate_expression_value(root, pctnode);
diff --git a/src/backend/access/tablesample/tablesample.c b/src/backend/access/tablesample/tablesample.c
index ef55d062e75..3398d02f854 100644
--- a/src/backend/access/tablesample/tablesample.c
+++ b/src/backend/access/tablesample/tablesample.c
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * tablesample.c
- *        TABLESAMPLE internal API
+ *		  TABLESAMPLE internal API
  *
  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *        src/backend/access/tablesample/tablesample.c
+ *		  src/backend/access/tablesample/tablesample.c
  *
  * TABLESAMPLE is the SQL standard clause for sampling the relations.
  *
@@ -53,7 +53,7 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
 	List	   *args = tablesample->args;
 	ListCell   *arg;
 	ExprContext *econtext = scanstate->ss.ps.ps_ExprContext;
-	TableSampleDesc	*tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
+	TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
 
 	/* Load functions */
 	fmgr_info(tablesample->tsminit, &(tsdesc->tsminit));
@@ -78,21 +78,21 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
 	fcinfo.argnull[0] = false;
 
 	/*
-	 * Second arg for init function is always REPEATABLE
-	 * When tablesample->repeatable is NULL then REPEATABLE clause was not
-	 * specified.
-	 * When specified, the expression cannot evaluate to NULL.
+	 * Second arg for init function is always REPEATABLE When
+	 * tablesample->repeatable is NULL then REPEATABLE clause was not
+	 * specified. When specified, the expression cannot evaluate to NULL.
 	 */
 	if (tablesample->repeatable)
 	{
 		ExprState  *argstate = ExecInitExpr((Expr *) tablesample->repeatable,
 											(PlanState *) scanstate);
+
 		fcinfo.arg[1] = ExecEvalExpr(argstate, econtext,
 									 &fcinfo.argnull[1], NULL);
 		if (fcinfo.argnull[1])
 			ereport(ERROR,
 					(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
-					 errmsg("REPEATABLE clause must be NOT NULL numeric value")));
+				errmsg("REPEATABLE clause must be NOT NULL numeric value")));
 	}
 	else
 	{
@@ -130,15 +130,15 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
 HeapTuple
 tablesample_getnext(TableSampleDesc *desc)
 {
-	HeapScanDesc	scan = desc->heapScan;
-	HeapTuple		tuple = &(scan->rs_ctup);
-	bool			pagemode = scan->rs_pageatatime;
-	BlockNumber		blockno;
-	Page			page;
-	bool			page_all_visible;
-	ItemId			itemid;
-	OffsetNumber	tupoffset,
-					maxoffset;
+	HeapScanDesc scan = desc->heapScan;
+	HeapTuple	tuple = &(scan->rs_ctup);
+	bool		pagemode = scan->rs_pageatatime;
+	BlockNumber blockno;
+	Page		page;
+	bool		page_all_visible;
+	ItemId		itemid;
+	OffsetNumber tupoffset,
+				maxoffset;
 
 	if (!scan->rs_inited)
 	{
@@ -152,7 +152,7 @@ tablesample_getnext(TableSampleDesc *desc)
 			return NULL;
 		}
 		blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
-											PointerGetDatum(desc)));
+											  PointerGetDatum(desc)));
 		if (!BlockNumberIsValid(blockno))
 		{
 			tuple->t_data = NULL;
@@ -184,14 +184,14 @@ tablesample_getnext(TableSampleDesc *desc)
 		CHECK_FOR_INTERRUPTS();
 
 		tupoffset = DatumGetUInt16(FunctionCall3(&desc->tsmnexttuple,
-											 PointerGetDatum(desc),
-											 UInt32GetDatum(blockno),
-											 UInt16GetDatum(maxoffset)));
+												 PointerGetDatum(desc),
+												 UInt32GetDatum(blockno),
+												 UInt16GetDatum(maxoffset)));
 
 		if (OffsetNumberIsValid(tupoffset))
 		{
-			bool	visible;
-			bool	found;
+			bool		visible;
+			bool		found;
 
 			/* Skip invalid tuple pointers. */
 			itemid = PageGetItemId(page, tupoffset);
@@ -208,8 +208,8 @@ tablesample_getnext(TableSampleDesc *desc)
 				visible = SampleTupleVisible(tuple, tupoffset, scan);
 
 			/*
-			 * Let the sampling method examine the actual tuple and decide if we
-			 * should return it.
+			 * Let the sampling method examine the actual tuple and decide if
+			 * we should return it.
 			 *
 			 * Note that we let it examine even invisible tuples for
 			 * statistical purposes, but not return them since user should
@@ -218,10 +218,10 @@ tablesample_getnext(TableSampleDesc *desc)
 			if (OidIsValid(desc->tsmexaminetuple.fn_oid))
 			{
 				found = DatumGetBool(FunctionCall4(&desc->tsmexaminetuple,
-											   PointerGetDatum(desc),
-											   UInt32GetDatum(blockno),
-											   PointerGetDatum(tuple),
-											   BoolGetDatum(visible)));
+												   PointerGetDatum(desc),
+												   UInt32GetDatum(blockno),
+												   PointerGetDatum(tuple),
+												   BoolGetDatum(visible)));
 				/* Should not happen if sampling method is well written. */
 				if (found && !visible)
 					elog(ERROR, "Sampling method wanted to return invisible tuple");
@@ -248,19 +248,19 @@ tablesample_getnext(TableSampleDesc *desc)
 			LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
 
 		blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
-										  PointerGetDatum(desc)));
+											  PointerGetDatum(desc)));
 
 		/*
-		 * Report our new scan position for synchronization purposes. We
-		 * don't do that when moving backwards, however. That would just
-		 * mess up any other forward-moving scanners.
+		 * Report our new scan position for synchronization purposes. We don't
+		 * do that when moving backwards, however. That would just mess up any
+		 * other forward-moving scanners.
 		 *
-		 * Note: we do this before checking for end of scan so that the
-		 * final state of the position hint is back at the start of the
-		 * rel.  That's not strictly necessary, but otherwise when you run
-		 * the same query multiple times the starting position would shift
-		 * a little bit backwards on every invocation, which is confusing.
-		 * We don't guarantee any specific ordering in general, though.
+		 * Note: we do this before checking for end of scan so that the final
+		 * state of the position hint is back at the start of the rel.  That's
+		 * not strictly necessary, but otherwise when you run the same query
+		 * multiple times the starting position would shift a little bit
+		 * backwards on every invocation, which is confusing. We don't
+		 * guarantee any specific ordering in general, though.
 		 */
 		if (scan->rs_syncscan)
 			ss_report_location(scan->rs_rd, BlockNumberIsValid(blockno) ?
@@ -321,25 +321,25 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
 {
 	/*
 	 * If this scan is reading whole pages at a time, there is already
-	 * visibility info present in rs_vistuples so we can just search it
-	 * for the tupoffset.
+	 * visibility info present in rs_vistuples so we can just search it for
+	 * the tupoffset.
 	 */
 	if (scan->rs_pageatatime)
 	{
-		int start = 0,
-			end = scan->rs_ntuples - 1;
+		int			start = 0,
+					end = scan->rs_ntuples - 1;
 
 		/*
 		 * Do the binary search over rs_vistuples, it's already sorted by
 		 * OffsetNumber so we don't need to do any sorting ourselves here.
 		 *
-		 * We could use bsearch() here but it's slower for integers because
-		 * of the function call overhead and because it needs boiler plate code
+		 * We could use bsearch() here but it's slower for integers because of
+		 * the function call overhead and because it needs boiler plate code
 		 * it would not save us anything code-wise anyway.
 		 */
 		while (start <= end)
 		{
-			int mid = start + (end - start) / 2;
+			int			mid = start + (end - start) / 2;
 			OffsetNumber curoffset = scan->rs_vistuples[mid];
 
 			if (curoffset == tupoffset)
@@ -358,7 +358,7 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
 		Snapshot	snapshot = scan->rs_snapshot;
 		Buffer		buffer = scan->rs_cbuf;
 
-		bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
+		bool		visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
 
 		CheckForSerializableConflictOut(visible, scan->rs_rd, tuple, buffer,
 										snapshot);
diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c
index 63344327e3d..5ad35c0d7f8 100644
--- a/src/backend/access/transam/commit_ts.c
+++ b/src/backend/access/transam/commit_ts.c
@@ -55,8 +55,8 @@
  */
 typedef struct CommitTimestampEntry
 {
-	TimestampTz		time;
-	RepOriginId		nodeid;
+	TimestampTz time;
+	RepOriginId nodeid;
 } CommitTimestampEntry;
 
 #define SizeOfCommitTimestampEntry (offsetof(CommitTimestampEntry, nodeid) + \
@@ -65,7 +65,7 @@ typedef struct CommitTimestampEntry
 #define COMMIT_TS_XACTS_PER_PAGE \
 	(BLCKSZ / SizeOfCommitTimestampEntry)
 
-#define TransactionIdToCTsPage(xid)	\
+#define TransactionIdToCTsPage(xid) \
 	((xid) / (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
 #define TransactionIdToCTsEntry(xid)	\
 	((xid) % (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
@@ -83,21 +83,21 @@ static SlruCtlData CommitTsCtlData;
  */
 typedef struct CommitTimestampShared
 {
-	TransactionId	xidLastCommit;
+	TransactionId xidLastCommit;
 	CommitTimestampEntry dataLastCommit;
 } CommitTimestampShared;
 
-CommitTimestampShared	*commitTsShared;
+CommitTimestampShared *commitTsShared;
 
 
 /* GUC variable */
-bool	track_commit_timestamp;
+bool		track_commit_timestamp;
 
 static void SetXidCommitTsInPage(TransactionId xid, int nsubxids,
 					 TransactionId *subxids, TimestampTz ts,
 					 RepOriginId nodeid, int pageno);
 static void TransactionIdSetCommitTs(TransactionId xid, TimestampTz ts,
-						  RepOriginId nodeid, int slotno);
+						 RepOriginId nodeid, int slotno);
 static int	ZeroCommitTsPage(int pageno, bool writeXlog);
 static bool CommitTsPagePrecedes(int page1, int page2);
 static void WriteZeroPageXlogRec(int pageno);
@@ -141,8 +141,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
 		return;
 
 	/*
-	 * Comply with the WAL-before-data rule: if caller specified it wants
-	 * this value to be recorded in WAL, do so before touching the data.
+	 * Comply with the WAL-before-data rule: if caller specified it wants this
+	 * value to be recorded in WAL, do so before touching the data.
 	 */
 	if (do_xlog)
 		WriteSetTimestampXlogRec(xid, nsubxids, subxids, timestamp, nodeid);
@@ -159,9 +159,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
 	/*
 	 * We split the xids to set the timestamp to in groups belonging to the
 	 * same SLRU page; the first element in each such set is its head.  The
-	 * first group has the main XID as the head; subsequent sets use the
-	 * first subxid not on the previous page as head.  This way, we only have
-	 * to lock/modify each SLRU page once.
+	 * first group has the main XID as the head; subsequent sets use the first
+	 * subxid not on the previous page as head.  This way, we only have to
+	 * lock/modify each SLRU page once.
 	 */
 	for (i = 0, headxid = xid;;)
 	{
@@ -183,8 +183,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
 			break;
 
 		/*
-		 * Set the new head and skip over it, as well as over the subxids
-		 * we just wrote.
+		 * Set the new head and skip over it, as well as over the subxids we
+		 * just wrote.
 		 */
 		headxid = subxids[j];
 		i += j - i + 1;
@@ -271,14 +271,14 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
 		ereport(ERROR,
 				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
 				 errmsg("could not get commit timestamp data"),
-				 errhint("Make sure the configuration parameter \"%s\" is set.",
-						 "track_commit_timestamp")));
+			  errhint("Make sure the configuration parameter \"%s\" is set.",
+					  "track_commit_timestamp")));
 
 	/* error if the given Xid doesn't normally commit */
 	if (!TransactionIdIsNormal(xid))
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-				 errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
+		errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
 
 	/*
 	 * Return empty if the requested value is outside our valid range.
@@ -350,15 +350,15 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
 TransactionId
 GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
 {
-	TransactionId	xid;
+	TransactionId xid;
 
 	/* Error if module not enabled */
 	if (!track_commit_timestamp)
 		ereport(ERROR,
 				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
 				 errmsg("could not get commit timestamp data"),
-				 errhint("Make sure the configuration parameter \"%s\" is set.",
-						 "track_commit_timestamp")));
+			  errhint("Make sure the configuration parameter \"%s\" is set.",
+					  "track_commit_timestamp")));
 
 	LWLockAcquire(CommitTsLock, LW_SHARED);
 	xid = commitTsShared->xidLastCommit;
@@ -377,9 +377,9 @@ GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
 Datum
 pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
 {
-	TransactionId	xid = PG_GETARG_UINT32(0);
-	TimestampTz		ts;
-	bool			found;
+	TransactionId xid = PG_GETARG_UINT32(0);
+	TimestampTz ts;
+	bool		found;
 
 	found = TransactionIdGetCommitTsData(xid, &ts, NULL);
 
@@ -393,11 +393,11 @@ pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
 Datum
 pg_last_committed_xact(PG_FUNCTION_ARGS)
 {
-	TransactionId	xid;
-	TimestampTz		ts;
-	Datum       values[2];
-	bool        nulls[2];
-	TupleDesc   tupdesc;
+	TransactionId xid;
+	TimestampTz ts;
+	Datum		values[2];
+	bool		nulls[2];
+	TupleDesc	tupdesc;
 	HeapTuple	htup;
 
 	/* and construct a tuple with our data */
@@ -462,7 +462,7 @@ CommitTsShmemSize(void)
 void
 CommitTsShmemInit(void)
 {
-	bool	found;
+	bool		found;
 
 	CommitTsCtl->PagePrecedes = CommitTsPagePrecedes;
 	SimpleLruInit(CommitTsCtl, "CommitTs Ctl", CommitTsShmemBuffers(), 0,
@@ -495,8 +495,8 @@ BootStrapCommitTs(void)
 {
 	/*
 	 * Nothing to do here at present, unlike most other SLRU modules; segments
-	 * are created when the server is started with this module enabled.
-	 * See StartupCommitTs.
+	 * are created when the server is started with this module enabled. See
+	 * StartupCommitTs.
 	 */
 }
 
@@ -561,9 +561,9 @@ CompleteCommitTsInitialization(void)
 
 /*
  * Activate this module whenever necessary.
- * 		This must happen during postmaster or standalong-backend startup,
- * 		or during WAL replay anytime the track_commit_timestamp setting is
- * 		changed in the master.
+ *		This must happen during postmaster or standalong-backend startup,
+ *		or during WAL replay anytime the track_commit_timestamp setting is
+ *		changed in the master.
  *
  * The reason why this SLRU needs separate activation/deactivation functions is
  * that it can be enabled/disabled during start and the activation/deactivation
@@ -612,7 +612,7 @@ ActivateCommitTs(void)
 	/* Finally, create the current segment file, if necessary */
 	if (!SimpleLruDoesPhysicalPageExist(CommitTsCtl, pageno))
 	{
-		int		slotno;
+		int			slotno;
 
 		LWLockAcquire(CommitTsControlLock, LW_EXCLUSIVE);
 		slotno = ZeroCommitTsPage(pageno, false);
@@ -834,7 +834,7 @@ WriteSetTimestampXlogRec(TransactionId mainxid, int nsubxids,
 						 TransactionId *subxids, TimestampTz timestamp,
 						 RepOriginId nodeid)
 {
-	xl_commit_ts_set	record;
+	xl_commit_ts_set record;
 
 	record.timestamp = timestamp;
 	record.nodeid = nodeid;
@@ -907,7 +907,7 @@ commit_ts_redo(XLogReaderState *record)
 			subxids = NULL;
 
 		TransactionTreeSetCommitTsData(setts->mainxid, nsubxids, subxids,
-									   setts->timestamp, setts->nodeid, false);
+									 setts->timestamp, setts->nodeid, false);
 		if (subxids)
 			pfree(subxids);
 	}
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 0218378ccb5..9568ff1ddb7 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -965,7 +965,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
 	 */
 	if (!MultiXactIdPrecedes(result, MultiXactState->multiVacLimit) ||
 		(MultiXactState->nextOffset - MultiXactState->oldestOffset
-			> MULTIXACT_MEMBER_SAFE_THRESHOLD))
+		 > MULTIXACT_MEMBER_SAFE_THRESHOLD))
 	{
 		/*
 		 * For safety's sake, we release MultiXactGenLock while sending
@@ -1190,9 +1190,9 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
 	MultiXactIdSetOldestVisible();
 
 	/*
-	 * If we know the multi is used only for locking and not for updates,
-	 * then we can skip checking if the value is older than our oldest
-	 * visible multi.  It cannot possibly still be running.
+	 * If we know the multi is used only for locking and not for updates, then
+	 * we can skip checking if the value is older than our oldest visible
+	 * multi.  It cannot possibly still be running.
 	 */
 	if (onlyLock &&
 		MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId]))
@@ -1207,14 +1207,14 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
 	 *
 	 * An ID older than MultiXactState->oldestMultiXactId cannot possibly be
 	 * useful; it has already been removed, or will be removed shortly, by
-	 * truncation.  Returning the wrong values could lead
-	 * to an incorrect visibility result.  However, to support pg_upgrade we
-	 * need to allow an empty set to be returned regardless, if the caller is
-	 * willing to accept it; the caller is expected to check that it's an
-	 * allowed condition (such as ensuring that the infomask bits set on the
-	 * tuple are consistent with the pg_upgrade scenario).  If the caller is
-	 * expecting this to be called only on recently created multis, then we
-	 * raise an error.
+	 * truncation.  Returning the wrong values could lead to an incorrect
+	 * visibility result.  However, to support pg_upgrade we need to allow an
+	 * empty set to be returned regardless, if the caller is willing to accept
+	 * it; the caller is expected to check that it's an allowed condition
+	 * (such as ensuring that the infomask bits set on the tuple are
+	 * consistent with the pg_upgrade scenario).  If the caller is expecting
+	 * this to be called only on recently created multis, then we raise an
+	 * error.
 	 *
 	 * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
 	 * seen, it implies undetected ID wraparound has occurred.  This raises a
@@ -2123,11 +2123,11 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
 	 * enough to contain the next value that would be created.
 	 *
 	 * We need to do this pretty early during the first startup in binary
-	 * upgrade mode: before StartupMultiXact() in fact, because this routine is
-	 * called even before that by StartupXLOG().  And we can't do it earlier
-	 * than at this point, because during that first call of this routine we
-	 * determine the MultiXactState->nextMXact value that MaybeExtendOffsetSlru
-	 * needs.
+	 * upgrade mode: before StartupMultiXact() in fact, because this routine
+	 * is called even before that by StartupXLOG().  And we can't do it
+	 * earlier than at this point, because during that first call of this
+	 * routine we determine the MultiXactState->nextMXact value that
+	 * MaybeExtendOffsetSlru needs.
 	 */
 	if (IsBinaryUpgrade)
 		MaybeExtendOffsetSlru();
@@ -2202,11 +2202,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
 
 	/*
 	 * Determine the offset of the oldest multixact that might still be
-	 * referenced.  Normally, we can read the offset from the multixact itself,
-	 * but there's an important special case: if there are no multixacts in
-	 * existence at all, oldest_datminmxid obviously can't point to one.  It
-	 * will instead point to the multixact ID that will be assigned the next
-	 * time one is needed.
+	 * referenced.  Normally, we can read the offset from the multixact
+	 * itself, but there's an important special case: if there are no
+	 * multixacts in existence at all, oldest_datminmxid obviously can't point
+	 * to one.  It will instead point to the multixact ID that will be
+	 * assigned the next time one is needed.
 	 *
 	 * NB: oldest_dataminmxid is the oldest multixact that might still be
 	 * referenced from a table, unlike in DetermineSafeOldestOffset, where we
@@ -2520,10 +2520,9 @@ DetermineSafeOldestOffset(MultiXactId oldestMXact)
 	 * obviously can't point to one.  It will instead point to the multixact
 	 * ID that will be assigned the next time one is needed.
 	 *
-	 * NB: oldestMXact should be the oldest multixact that still exists in
-	 * the SLRU, unlike in SetMultiXactIdLimit, where we do this same
-	 * computation based on the oldest value that might be referenced in a
-	 * table.
+	 * NB: oldestMXact should be the oldest multixact that still exists in the
+	 * SLRU, unlike in SetMultiXactIdLimit, where we do this same computation
+	 * based on the oldest value that might be referenced in a table.
 	 */
 	LWLockAcquire(MultiXactGenLock, LW_SHARED);
 	if (MultiXactState->nextMXact == oldestMXact)
@@ -2679,9 +2678,9 @@ int
 MultiXactMemberFreezeThreshold(void)
 {
 	MultiXactOffset members;
-	uint32 multixacts;
-	uint32 victim_multixacts;
-	double fraction;
+	uint32		multixacts;
+	uint32		victim_multixacts;
+	double		fraction;
 
 	ReadMultiXactCounts(&multixacts, &members);
 
@@ -2800,7 +2799,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
 void
 TruncateMultiXact(void)
 {
-	MultiXactId		oldestMXact;
+	MultiXactId oldestMXact;
 	MultiXactOffset oldestOffset;
 	MultiXactOffset nextOffset;
 	mxtruncinfo trunc;
diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c
index 8d6a3606794..f4ba8518b12 100644
--- a/src/backend/access/transam/parallel.c
+++ b/src/backend/access/transam/parallel.c
@@ -39,7 +39,7 @@
  * without blocking.  That way, a worker that errors out can write the whole
  * message into the queue and terminate without waiting for the user backend.
  */
-#define	PARALLEL_ERROR_QUEUE_SIZE			16384
+#define PARALLEL_ERROR_QUEUE_SIZE			16384
 
 /* Magic number for parallel context TOC. */
 #define PARALLEL_MAGIC						0x50477c7c
@@ -71,7 +71,7 @@ typedef struct FixedParallelState
 	BackendId	parallel_master_backend_id;
 
 	/* Entrypoint for parallel workers. */
-	parallel_worker_main_type	entrypoint;
+	parallel_worker_main_type entrypoint;
 
 	/* Mutex protects remaining fields. */
 	slock_t		mutex;
@@ -90,10 +90,10 @@ typedef struct FixedParallelState
  * and < the number of workers before any user code is invoked; each parallel
  * worker will get a different parallel worker number.
  */
-int ParallelWorkerNumber = -1;
+int			ParallelWorkerNumber = -1;
 
 /* Is there a parallel message pending which we need to receive? */
-bool ParallelMessagePending = false;
+bool		ParallelMessagePending = false;
 
 /* Pointer to our fixed parallel state. */
 static FixedParallelState *MyFixedParallelState;
@@ -115,8 +115,8 @@ static void ParallelWorkerMain(Datum main_arg);
 ParallelContext *
 CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
 {
-	MemoryContext	oldcontext;
-	ParallelContext	*pcxt;
+	MemoryContext oldcontext;
+	ParallelContext *pcxt;
 
 	/* It is unsafe to create a parallel context if not in parallel mode. */
 	Assert(IsInParallelMode());
@@ -159,7 +159,7 @@ CreateParallelContextForExternalFunction(char *library_name,
 										 char *function_name,
 										 int nworkers)
 {
-	MemoryContext	oldcontext;
+	MemoryContext oldcontext;
 	ParallelContext *pcxt;
 
 	/* We might be running in a very short-lived memory context. */
@@ -184,15 +184,15 @@ CreateParallelContextForExternalFunction(char *library_name,
 void
 InitializeParallelDSM(ParallelContext *pcxt)
 {
-	MemoryContext	oldcontext;
-	Size	library_len = 0;
-	Size	guc_len = 0;
-	Size	combocidlen = 0;
-	Size	tsnaplen = 0;
-	Size	asnaplen = 0;
-	Size	tstatelen = 0;
-	Size	segsize = 0;
-	int		i;
+	MemoryContext oldcontext;
+	Size		library_len = 0;
+	Size		guc_len = 0;
+	Size		combocidlen = 0;
+	Size		tsnaplen = 0;
+	Size		asnaplen = 0;
+	Size		tstatelen = 0;
+	Size		segsize = 0;
+	int			i;
 	FixedParallelState *fps;
 	Snapshot	transaction_snapshot = GetTransactionSnapshot();
 	Snapshot	active_snapshot = GetActiveSnapshot();
@@ -205,8 +205,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
 	shm_toc_estimate_keys(&pcxt->estimator, 1);
 
 	/*
-	 * Normally, the user will have requested at least one worker process,
-	 * but if by chance they have not, we can skip a bunch of things here.
+	 * Normally, the user will have requested at least one worker process, but
+	 * if by chance they have not, we can skip a bunch of things here.
 	 */
 	if (pcxt->nworkers > 0)
 	{
@@ -228,8 +228,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
 
 		/* Estimate space need for error queues. */
 		StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
-			PARALLEL_ERROR_QUEUE_SIZE,
-			"parallel error queue size not buffer-aligned");
+						 PARALLEL_ERROR_QUEUE_SIZE,
+						 "parallel error queue size not buffer-aligned");
 		shm_toc_estimate_chunk(&pcxt->estimator,
 							   PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
 		shm_toc_estimate_keys(&pcxt->estimator, 1);
@@ -251,9 +251,9 @@ InitializeParallelDSM(ParallelContext *pcxt)
 	 * memory segment; instead, just use backend-private memory.
 	 *
 	 * Also, if we can't create a dynamic shared memory segment because the
-	 * maximum number of segments have already been created, then fall back
-	 * to backend-private memory, and plan not to use any workers.  We hope
-	 * this won't happen very often, but it's better to abandon the use of
+	 * maximum number of segments have already been created, then fall back to
+	 * backend-private memory, and plan not to use any workers.  We hope this
+	 * won't happen very often, but it's better to abandon the use of
 	 * parallelism than to fail outright.
 	 */
 	segsize = shm_toc_estimate(&pcxt->estimator);
@@ -290,13 +290,13 @@ InitializeParallelDSM(ParallelContext *pcxt)
 	/* We can skip the rest of this if we're not budgeting for any workers. */
 	if (pcxt->nworkers > 0)
 	{
-		char   *libraryspace;
-		char   *gucspace;
-		char   *combocidspace;
-		char   *tsnapspace;
-		char   *asnapspace;
-		char   *tstatespace;
-		char   *error_queue_space;
+		char	   *libraryspace;
+		char	   *gucspace;
+		char	   *combocidspace;
+		char	   *tsnapspace;
+		char	   *asnapspace;
+		char	   *tstatespace;
+		char	   *error_queue_space;
 
 		/* Serialize shared libraries we have loaded. */
 		libraryspace = shm_toc_allocate(pcxt->toc, library_len);
@@ -338,12 +338,12 @@ InitializeParallelDSM(ParallelContext *pcxt)
 		 * should be transmitted via separate (possibly larger?) queues.
 		 */
 		error_queue_space =
-		   shm_toc_allocate(pcxt->toc,
-							PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
+			shm_toc_allocate(pcxt->toc,
+							 PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
 		for (i = 0; i < pcxt->nworkers; ++i)
 		{
-			char *start;
-			shm_mq *mq;
+			char	   *start;
+			shm_mq	   *mq;
 
 			start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
 			mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
@@ -355,8 +355,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
 		/* Serialize extension entrypoint information. */
 		if (pcxt->library_name != NULL)
 		{
-			Size	lnamelen = strlen(pcxt->library_name);
-			char *extensionstate;
+			Size		lnamelen = strlen(pcxt->library_name);
+			char	   *extensionstate;
 
 			extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
 										  + strlen(pcxt->function_name) + 2);
@@ -377,10 +377,10 @@ InitializeParallelDSM(ParallelContext *pcxt)
 void
 LaunchParallelWorkers(ParallelContext *pcxt)
 {
-	MemoryContext	oldcontext;
-	BackgroundWorker	worker;
-	int		i;
-	bool	any_registrations_failed = false;
+	MemoryContext oldcontext;
+	BackgroundWorker worker;
+	int			i;
+	bool		any_registrations_failed = false;
 
 	/* Skip this if we have no workers. */
 	if (pcxt->nworkers == 0)
@@ -408,8 +408,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
 	 *
 	 * The caller must be able to tolerate ending up with fewer workers than
 	 * expected, so there is no need to throw an error here if registration
-	 * fails.  It wouldn't help much anyway, because registering the worker
-	 * in no way guarantees that it will start up and initialize successfully.
+	 * fails.  It wouldn't help much anyway, because registering the worker in
+	 * no way guarantees that it will start up and initialize successfully.
 	 */
 	for (i = 0; i < pcxt->nworkers; ++i)
 	{
@@ -421,8 +421,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
 		else
 		{
 			/*
-			 * If we weren't able to register the worker, then we've bumped
-			 * up against the max_worker_processes limit, and future
+			 * If we weren't able to register the worker, then we've bumped up
+			 * against the max_worker_processes limit, and future
 			 * registrations will probably fail too, so arrange to skip them.
 			 * But we still have to execute this code for the remaining slots
 			 * to make sure that we forget about the error queues we budgeted
@@ -455,13 +455,13 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
 {
 	for (;;)
 	{
-		bool	anyone_alive = false;
-		int		i;
+		bool		anyone_alive = false;
+		int			i;
 
 		/*
-		 * This will process any parallel messages that are pending, which
-		 * may change the outcome of the loop that follows.  It may also
-		 * throw an error propagated from a worker.
+		 * This will process any parallel messages that are pending, which may
+		 * change the outcome of the loop that follows.  It may also throw an
+		 * error propagated from a worker.
 		 */
 		CHECK_FOR_INTERRUPTS();
 
@@ -502,7 +502,7 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
 void
 DestroyParallelContext(ParallelContext *pcxt)
 {
-	int		i;
+	int			i;
 
 	/*
 	 * Be careful about order of operations here!  We remove the parallel
@@ -548,7 +548,7 @@ DestroyParallelContext(ParallelContext *pcxt)
 	/* Wait until the workers actually die. */
 	for (i = 0; i < pcxt->nworkers; ++i)
 	{
-		BgwHandleStatus	status;
+		BgwHandleStatus status;
 
 		if (pcxt->worker[i].bgwhandle == NULL)
 			continue;
@@ -626,9 +626,9 @@ HandleParallelMessages(void)
 	dlist_foreach(iter, &pcxt_list)
 	{
 		ParallelContext *pcxt;
-		int		i;
-		Size	nbytes;
-		void   *data;
+		int			i;
+		Size		nbytes;
+		void	   *data;
 
 		pcxt = dlist_container(ParallelContext, node, iter.cur);
 		if (pcxt->worker == NULL)
@@ -637,14 +637,14 @@ HandleParallelMessages(void)
 		for (i = 0; i < pcxt->nworkers; ++i)
 		{
 			/*
-			 * Read as many messages as we can from each worker, but stop
-			 * when either (1) the error queue goes away, which can happen if
-			 * we receive a Terminate message from the worker; or (2) no more
+			 * Read as many messages as we can from each worker, but stop when
+			 * either (1) the error queue goes away, which can happen if we
+			 * receive a Terminate message from the worker; or (2) no more
 			 * messages can be read from the worker without blocking.
 			 */
 			while (pcxt->worker[i].error_mqh != NULL)
 			{
-				shm_mq_result	res;
+				shm_mq_result res;
 
 				res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
 									 &data, true);
@@ -652,7 +652,7 @@ HandleParallelMessages(void)
 					break;
 				else if (res == SHM_MQ_SUCCESS)
 				{
-					StringInfoData	msg;
+					StringInfoData msg;
 
 					initStringInfo(&msg);
 					appendBinaryStringInfo(&msg, data, nbytes);
@@ -661,7 +661,7 @@ HandleParallelMessages(void)
 				}
 				else
 					ereport(ERROR,
-							(errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */
+							(errcode(ERRCODE_INTERNAL_ERROR),	/* XXX: wrong errcode? */
 							 errmsg("lost connection to parallel worker")));
 
 				/* This might make the error queue go away. */
@@ -677,23 +677,24 @@ HandleParallelMessages(void)
 static void
 HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
 {
-	char	msgtype;
+	char		msgtype;
 
 	msgtype = pq_getmsgbyte(msg);
 
 	switch (msgtype)
 	{
-		case 'K':			/* BackendKeyData */
+		case 'K':				/* BackendKeyData */
 			{
-				int32	pid = pq_getmsgint(msg, 4);
+				int32		pid = pq_getmsgint(msg, 4);
+
 				(void) pq_getmsgint(msg, 4);	/* discard cancel key */
 				(void) pq_getmsgend(msg);
 				pcxt->worker[i].pid = pid;
 				break;
 			}
 
-		case 'E':			/* ErrorResponse */
-		case 'N':			/* NoticeResponse */
+		case 'E':				/* ErrorResponse */
+		case 'N':				/* NoticeResponse */
 			{
 				ErrorData	edata;
 				ErrorContextCallback errctx;
@@ -725,14 +726,14 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
 				break;
 			}
 
-		case 'A':		/* NotifyResponse */
+		case 'A':				/* NotifyResponse */
 			{
 				/* Propagate NotifyResponse. */
 				pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1);
 				break;
 			}
 
-		case 'X':		/* Terminate, indicating clean exit */
+		case 'X':				/* Terminate, indicating clean exit */
 			{
 				pfree(pcxt->worker[i].bgwhandle);
 				pfree(pcxt->worker[i].error_mqh);
@@ -797,18 +798,18 @@ static void
 ParallelWorkerMain(Datum main_arg)
 {
 	dsm_segment *seg;
-	shm_toc *toc;
+	shm_toc    *toc;
 	FixedParallelState *fps;
-	char   *error_queue_space;
-	shm_mq *mq;
+	char	   *error_queue_space;
+	shm_mq	   *mq;
 	shm_mq_handle *mqh;
-	char   *libraryspace;
-	char   *gucspace;
-	char   *combocidspace;
-	char   *tsnapspace;
-	char   *asnapspace;
-	char   *tstatespace;
-	StringInfoData	msgbuf;
+	char	   *libraryspace;
+	char	   *gucspace;
+	char	   *combocidspace;
+	char	   *tsnapspace;
+	char	   *asnapspace;
+	char	   *tstatespace;
+	StringInfoData msgbuf;
 
 	/* Establish signal handlers. */
 	pqsignal(SIGTERM, die);
@@ -824,8 +825,8 @@ ParallelWorkerMain(Datum main_arg)
 												 ALLOCSET_DEFAULT_MAXSIZE);
 
 	/*
-	 * Now that we have a resource owner, we can attach to the dynamic
-	 * shared memory segment and read the table of contents.
+	 * Now that we have a resource owner, we can attach to the dynamic shared
+	 * memory segment and read the table of contents.
 	 */
 	seg = dsm_attach(DatumGetUInt32(main_arg));
 	if (seg == NULL)
@@ -836,7 +837,7 @@ ParallelWorkerMain(Datum main_arg)
 	if (toc == NULL)
 		ereport(ERROR,
 				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-				 errmsg("bad magic number in dynamic shared memory segment")));
+			   errmsg("bad magic number in dynamic shared memory segment")));
 
 	/* Determine and set our worker number. */
 	fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
@@ -860,7 +861,7 @@ ParallelWorkerMain(Datum main_arg)
 	 */
 	error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
 	mq = (shm_mq *) (error_queue_space +
-		ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
+					 ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
 	shm_mq_set_sender(mq, MyProc);
 	mqh = shm_mq_attach(mq, seg, NULL);
 	pq_redirect_to_shm_mq(mq, mqh);
@@ -870,9 +871,9 @@ ParallelWorkerMain(Datum main_arg)
 	/*
 	 * Send a BackendKeyData message to the process that initiated parallelism
 	 * so that it has access to our PID before it receives any other messages
-	 * from us.  Our cancel key is sent, too, since that's the way the protocol
-	 * message is defined, but it won't actually be used for anything in this
-	 * case.
+	 * from us.  Our cancel key is sent, too, since that's the way the
+	 * protocol message is defined, but it won't actually be used for anything
+	 * in this case.
 	 */
 	pq_beginmessage(&msgbuf, 'K');
 	pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
@@ -880,13 +881,13 @@ ParallelWorkerMain(Datum main_arg)
 	pq_endmessage(&msgbuf);
 
 	/*
-	 * Hooray! Primary initialization is complete.  Now, we need to set up
-	 * our backend-local state to match the original backend.
+	 * Hooray! Primary initialization is complete.  Now, we need to set up our
+	 * backend-local state to match the original backend.
 	 */
 
 	/*
-	 * Load libraries that were loaded by original backend.  We want to do this
-	 * before restoring GUCs, because the libraries might define custom
+	 * Load libraries that were loaded by original backend.  We want to do
+	 * this before restoring GUCs, because the libraries might define custom
 	 * variables.
 	 */
 	libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
@@ -928,7 +929,8 @@ ParallelWorkerMain(Datum main_arg)
 	SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
 
 	/*
-	 * We've initialized all of our state now; nothing should change hereafter.
+	 * We've initialized all of our state now; nothing should change
+	 * hereafter.
 	 */
 	EnterParallelMode();
 
@@ -965,9 +967,9 @@ ParallelWorkerMain(Datum main_arg)
 static void
 ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
 {
-	char   *extensionstate;
-	char   *library_name;
-	char   *function_name;
+	char	   *extensionstate;
+	char	   *library_name;
+	char	   *function_name;
 	parallel_worker_main_type entrypt;
 
 	extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
@@ -988,7 +990,7 @@ ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
 static void
 ParallelErrorContext(void *arg)
 {
-	errcontext("parallel worker, pid %d", * (int32 *) arg);
+	errcontext("parallel worker, pid %d", *(int32 *) arg);
 }
 
 /*
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 4743cacefe6..177d1e1432e 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -117,7 +117,7 @@ typedef struct GlobalTransactionData
 	TimestampTz prepared_at;	/* time of preparation */
 	XLogRecPtr	prepare_lsn;	/* XLOG offset of prepare record */
 	Oid			owner;			/* ID of user that executed the xact */
-	BackendId	locking_backend; /* backend currently working on the xact */
+	BackendId	locking_backend;	/* backend currently working on the xact */
 	bool		valid;			/* TRUE if PGPROC entry is in proc array */
 	char		gid[GIDSIZE];	/* The GID assigned to the prepared xact */
 }	GlobalTransactionData;
@@ -256,24 +256,24 @@ AtAbort_Twophase(void)
 		return;
 
 	/*
-	 * What to do with the locked global transaction entry?  If we were in
-	 * the process of preparing the transaction, but haven't written the WAL
+	 * What to do with the locked global transaction entry?  If we were in the
+	 * process of preparing the transaction, but haven't written the WAL
 	 * record and state file yet, the transaction must not be considered as
 	 * prepared.  Likewise, if we are in the process of finishing an
-	 * already-prepared transaction, and fail after having already written
-	 * the 2nd phase commit or rollback record to the WAL, the transaction
-	 * should not be considered as prepared anymore.  In those cases, just
-	 * remove the entry from shared memory.
+	 * already-prepared transaction, and fail after having already written the
+	 * 2nd phase commit or rollback record to the WAL, the transaction should
+	 * not be considered as prepared anymore.  In those cases, just remove the
+	 * entry from shared memory.
 	 *
-	 * Otherwise, the entry must be left in place so that the transaction
-	 * can be finished later, so just unlock it.
+	 * Otherwise, the entry must be left in place so that the transaction can
+	 * be finished later, so just unlock it.
 	 *
 	 * If we abort during prepare, after having written the WAL record, we
 	 * might not have transferred all locks and other state to the prepared
 	 * transaction yet.  Likewise, if we abort during commit or rollback,
-	 * after having written the WAL record, we might not have released
-	 * all the resources held by the transaction yet.  In those cases, the
-	 * in-memory state can be wrong, but it's too late to back out.
+	 * after having written the WAL record, we might not have released all the
+	 * resources held by the transaction yet.  In those cases, the in-memory
+	 * state can be wrong, but it's too late to back out.
 	 */
 	if (!MyLockedGxact->valid)
 	{
@@ -408,8 +408,8 @@ MarkAsPreparing(TransactionId xid, const char *gid,
 	TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact;
 
 	/*
-	 * Remember that we have this GlobalTransaction entry locked for us.
-	 * If we abort after this, we must release it.
+	 * Remember that we have this GlobalTransaction entry locked for us. If we
+	 * abort after this, we must release it.
 	 */
 	MyLockedGxact = gxact;
 
@@ -499,8 +499,8 @@ LockGXact(const char *gid, Oid user)
 		if (gxact->locking_backend != InvalidBackendId)
 			ereport(ERROR,
 					(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-					 errmsg("prepared transaction with identifier \"%s\" is busy",
-							gid)));
+				errmsg("prepared transaction with identifier \"%s\" is busy",
+					   gid)));
 
 		if (user != gxact->owner && !superuser_arg(user))
 			ereport(ERROR,
@@ -1423,8 +1423,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
 
 	/*
 	 * In case we fail while running the callbacks, mark the gxact invalid so
-	 * no one else will try to commit/rollback, and so it will be recycled
-	 * if we fail after this point.  It is still locked by our backend so it
+	 * no one else will try to commit/rollback, and so it will be recycled if
+	 * we fail after this point.  It is still locked by our backend so it
 	 * won't go away yet.
 	 *
 	 * (We assume it's safe to do this without taking TwoPhaseStateLock.)
@@ -2055,8 +2055,9 @@ RecoverPreparedTransactions(void)
 				StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
 
 			/*
-			 * We're done with recovering this transaction. Clear MyLockedGxact,
-			 * like we do in PrepareTransaction() during normal operation.
+			 * We're done with recovering this transaction. Clear
+			 * MyLockedGxact, like we do in PrepareTransaction() during normal
+			 * operation.
 			 */
 			PostPrepare_Twophase();
 
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 23401057e2c..b53d95faf86 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -102,9 +102,9 @@ int			synchronous_commit = SYNCHRONOUS_COMMIT_ON;
  * The XIDs are stored sorted in numerical order (not logical order) to make
  * lookups as fast as possible.
  */
-TransactionId	XactTopTransactionId = InvalidTransactionId;
-int				nParallelCurrentXids = 0;
-TransactionId  *ParallelCurrentXids;
+TransactionId XactTopTransactionId = InvalidTransactionId;
+int			nParallelCurrentXids = 0;
+TransactionId *ParallelCurrentXids;
 
 /*
  * MyXactAccessedTempRel is set when a temporary relation is accessed.
@@ -142,7 +142,7 @@ typedef enum TBlockState
 	/* transaction block states */
 	TBLOCK_BEGIN,				/* starting transaction block */
 	TBLOCK_INPROGRESS,			/* live transaction */
-	TBLOCK_PARALLEL_INPROGRESS,	/* live transaction inside parallel worker */
+	TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */
 	TBLOCK_END,					/* COMMIT received */
 	TBLOCK_ABORT,				/* failed xact, awaiting ROLLBACK */
 	TBLOCK_ABORT_END,			/* failed xact, ROLLBACK received */
@@ -184,7 +184,7 @@ typedef struct TransactionStateData
 	bool		prevXactReadOnly;		/* entry-time xact r/o state */
 	bool		startedInRecovery;		/* did we start in recovery? */
 	bool		didLogXid;		/* has xid been included in WAL record? */
-	int			parallelModeLevel;	/* Enter/ExitParallelMode counter */
+	int			parallelModeLevel;		/* Enter/ExitParallelMode counter */
 	struct TransactionStateData *parent;		/* back link to parent */
 } TransactionStateData;
 
@@ -494,8 +494,8 @@ AssignTransactionId(TransactionState s)
 	Assert(s->state == TRANS_INPROGRESS);
 
 	/*
-	 * Workers synchronize transaction state at the beginning of each
-	 * parallel operation, so we can't account for new XIDs at this point.
+	 * Workers synchronize transaction state at the beginning of each parallel
+	 * operation, so we can't account for new XIDs at this point.
 	 */
 	if (IsInParallelMode())
 		elog(ERROR, "cannot assign XIDs during a parallel operation");
@@ -788,10 +788,10 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
 		return false;
 
 	/*
-	 * In parallel workers, the XIDs we must consider as current are stored
-	 * in ParallelCurrentXids rather than the transaction-state stack.  Note
-	 * that the XIDs in this array are sorted numerically rather than
-	 * according to transactionIdPrecedes order.
+	 * In parallel workers, the XIDs we must consider as current are stored in
+	 * ParallelCurrentXids rather than the transaction-state stack.  Note that
+	 * the XIDs in this array are sorted numerically rather than according to
+	 * transactionIdPrecedes order.
 	 */
 	if (nParallelCurrentXids > 0)
 	{
@@ -1204,7 +1204,7 @@ RecordTransactionCommit(void)
 							nchildren, children, nrels, rels,
 							nmsgs, invalMessages,
 							RelcacheInitFileInval, forceSyncCommit,
-							InvalidTransactionId /* plain commit */);
+							InvalidTransactionId /* plain commit */ );
 
 		/*
 		 * Record plain commit ts if not replaying remote actions, or if no
@@ -1505,7 +1505,7 @@ RecordTransactionAbort(bool isSubXact)
 	RelFileNode *rels;
 	int			nchildren;
 	TransactionId *children;
-	TimestampTz	xact_time;
+	TimestampTz xact_time;
 
 	/*
 	 * If we haven't been assigned an XID, nobody will care whether we aborted
@@ -2316,8 +2316,8 @@ PrepareTransaction(void)
 
 	/*
 	 * In normal commit-processing, this is all non-critical post-transaction
-	 * cleanup.  When the transaction is prepared, however, it's important that
-	 * the locks and other per-backend resources are transferred to the
+	 * cleanup.  When the transaction is prepared, however, it's important
+	 * that the locks and other per-backend resources are transferred to the
 	 * prepared transaction's PGPROC entry.  Note that if an error is raised
 	 * here, it's too late to abort the transaction. XXX: This probably should
 	 * be in a critical section, to force a PANIC if any of this fails, but
@@ -2358,9 +2358,8 @@ PrepareTransaction(void)
 
 	/*
 	 * Allow another backend to finish the transaction.  After
-	 * PostPrepare_Twophase(), the transaction is completely detached from
-	 * our backend.  The rest is just non-critical cleanup of backend-local
-	 * state.
+	 * PostPrepare_Twophase(), the transaction is completely detached from our
+	 * backend.  The rest is just non-critical cleanup of backend-local state.
 	 */
 	PostPrepare_Twophase();
 
@@ -2417,7 +2416,7 @@ AbortTransaction(void)
 {
 	TransactionState s = CurrentTransactionState;
 	TransactionId latestXid;
-	bool	is_parallel_worker;
+	bool		is_parallel_worker;
 
 	/* Prevent cancel/die interrupt while cleaning up */
 	HOLD_INTERRUPTS();
@@ -2520,9 +2519,9 @@ AbortTransaction(void)
 		latestXid = InvalidTransactionId;
 
 		/*
-		 * Since the parallel master won't get our value of XactLastRecEnd in this
-		 * case, we nudge WAL-writer ourselves in this case.  See related comments in
-		 * RecordTransactionAbort for why this matters.
+		 * Since the parallel master won't get our value of XactLastRecEnd in
+		 * this case, we nudge WAL-writer ourselves in this case.  See related
+		 * comments in RecordTransactionAbort for why this matters.
 		 */
 		XLogSetAsyncXactLSN(XactLastRecEnd);
 	}
@@ -3720,7 +3719,7 @@ DefineSavepoint(char *name)
 	if (IsInParallelMode())
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-				 errmsg("cannot define savepoints during a parallel operation")));
+			errmsg("cannot define savepoints during a parallel operation")));
 
 	switch (s->blockState)
 	{
@@ -3787,7 +3786,7 @@ ReleaseSavepoint(List *options)
 	if (IsInParallelMode())
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-				 errmsg("cannot release savepoints during a parallel operation")));
+		   errmsg("cannot release savepoints during a parallel operation")));
 
 	switch (s->blockState)
 	{
@@ -3900,7 +3899,7 @@ RollbackToSavepoint(List *options)
 	if (IsInParallelMode())
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-				 errmsg("cannot rollback to savepoints during a parallel operation")));
+		errmsg("cannot rollback to savepoints during a parallel operation")));
 
 	switch (s->blockState)
 	{
@@ -4017,17 +4016,18 @@ BeginInternalSubTransaction(char *name)
 
 	/*
 	 * Workers synchronize transaction state at the beginning of each parallel
-	 * operation, so we can't account for new subtransactions after that point.
-	 * We might be able to make an exception for the type of subtransaction
-	 * established by this function, which is typically used in contexts where
-	 * we're going to release or roll back the subtransaction before proceeding
-	 * further, so that no enduring change to the transaction state occurs.
-	 * For now, however, we prohibit this case along with all the others.
+	 * operation, so we can't account for new subtransactions after that
+	 * point. We might be able to make an exception for the type of
+	 * subtransaction established by this function, which is typically used in
+	 * contexts where we're going to release or roll back the subtransaction
+	 * before proceeding further, so that no enduring change to the
+	 * transaction state occurs. For now, however, we prohibit this case along
+	 * with all the others.
 	 */
 	if (IsInParallelMode())
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-				 errmsg("cannot start subtransactions during a parallel operation")));
+		errmsg("cannot start subtransactions during a parallel operation")));
 
 	switch (s->blockState)
 	{
@@ -4094,7 +4094,7 @@ ReleaseCurrentSubTransaction(void)
 	if (IsInParallelMode())
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-				 errmsg("cannot commit subtransactions during a parallel operation")));
+		errmsg("cannot commit subtransactions during a parallel operation")));
 
 	if (s->blockState != TBLOCK_SUBINPROGRESS)
 		elog(ERROR, "ReleaseCurrentSubTransaction: unexpected state %s",
@@ -4773,7 +4773,8 @@ Size
 EstimateTransactionStateSpace(void)
 {
 	TransactionState s;
-	Size	nxids = 5; /* iso level, deferrable, top & current XID, XID count */
+	Size		nxids = 5;		/* iso level, deferrable, top & current XID,
+								 * XID count */
 
 	for (s = CurrentTransactionState; s != NULL; s = s->parent)
 	{
@@ -4804,8 +4805,8 @@ void
 SerializeTransactionState(Size maxsize, char *start_address)
 {
 	TransactionState s;
-	Size	nxids = 0;
-	Size	i = 0;
+	Size		nxids = 0;
+	Size		i = 0;
 	TransactionId *workspace;
 	TransactionId *result = (TransactionId *) start_address;
 
@@ -4830,8 +4831,8 @@ SerializeTransactionState(Size maxsize, char *start_address)
 	}
 
 	/*
-	 * OK, we need to generate a sorted list of XIDs that our workers
-	 * should view as current.  First, figure out how many there are.
+	 * OK, we need to generate a sorted list of XIDs that our workers should
+	 * view as current.  First, figure out how many there are.
 	 */
 	for (s = CurrentTransactionState; s != NULL; s = s->parent)
 	{
@@ -5060,22 +5061,22 @@ xactGetCommittedChildren(TransactionId **ptr)
  */
 XLogRecPtr
 XactLogCommitRecord(TimestampTz commit_time,
-					 int nsubxacts, TransactionId *subxacts,
-					 int nrels, RelFileNode *rels,
-					 int nmsgs, SharedInvalidationMessage *msgs,
-					 bool relcacheInval, bool forceSync,
-					 TransactionId twophase_xid)
+					int nsubxacts, TransactionId *subxacts,
+					int nrels, RelFileNode *rels,
+					int nmsgs, SharedInvalidationMessage *msgs,
+					bool relcacheInval, bool forceSync,
+					TransactionId twophase_xid)
 {
-	xl_xact_commit		xlrec;
-	xl_xact_xinfo		xl_xinfo;
-	xl_xact_dbinfo		xl_dbinfo;
-	xl_xact_subxacts	xl_subxacts;
+	xl_xact_commit xlrec;
+	xl_xact_xinfo xl_xinfo;
+	xl_xact_dbinfo xl_dbinfo;
+	xl_xact_subxacts xl_subxacts;
 	xl_xact_relfilenodes xl_relfilenodes;
-	xl_xact_invals		xl_invals;
-	xl_xact_twophase	xl_twophase;
-	xl_xact_origin		xl_origin;
+	xl_xact_invals xl_invals;
+	xl_xact_twophase xl_twophase;
+	xl_xact_origin xl_origin;
 
-	uint8				info;
+	uint8		info;
 
 	Assert(CritSectionCount > 0);
 
@@ -5198,17 +5199,17 @@ XactLogCommitRecord(TimestampTz commit_time,
  */
 XLogRecPtr
 XactLogAbortRecord(TimestampTz abort_time,
-					int nsubxacts, TransactionId *subxacts,
-					int nrels, RelFileNode *rels,
-					TransactionId twophase_xid)
+				   int nsubxacts, TransactionId *subxacts,
+				   int nrels, RelFileNode *rels,
+				   TransactionId twophase_xid)
 {
-	xl_xact_abort		xlrec;
-	xl_xact_xinfo		xl_xinfo;
-	xl_xact_subxacts	xl_subxacts;
+	xl_xact_abort xlrec;
+	xl_xact_xinfo xl_xinfo;
+	xl_xact_subxacts xl_subxacts;
 	xl_xact_relfilenodes xl_relfilenodes;
-	xl_xact_twophase	xl_twophase;
+	xl_xact_twophase xl_twophase;
 
-	uint8				info;
+	uint8		info;
 
 	Assert(CritSectionCount > 0);
 
@@ -5289,7 +5290,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
 {
 	TransactionId max_xid;
 	int			i;
-	TimestampTz	commit_time;
+	TimestampTz commit_time;
 
 	max_xid = TransactionIdLatest(xid, parsed->nsubxacts, parsed->subxacts);
 
@@ -5351,13 +5352,13 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
 		 * recovered. It's unlikely but it's good to be safe.
 		 */
 		TransactionIdAsyncCommitTree(
-			xid, parsed->nsubxacts, parsed->subxacts, lsn);
+							  xid, parsed->nsubxacts, parsed->subxacts, lsn);
 
 		/*
 		 * We must mark clog before we update the ProcArray.
 		 */
 		ExpireTreeKnownAssignedTransactionIds(
-			xid, parsed->nsubxacts, parsed->subxacts, max_xid);
+						  xid, parsed->nsubxacts, parsed->subxacts, max_xid);
 
 		/*
 		 * Send any cache invalidations attached to the commit. We must
@@ -5365,9 +5366,9 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
 		 * occurs in CommitTransaction().
 		 */
 		ProcessCommittedInvalidationMessages(
-			parsed->msgs, parsed->nmsgs,
-			XactCompletionRelcacheInitFileInval(parsed->xinfo),
-			parsed->dbId, parsed->tsId);
+											 parsed->msgs, parsed->nmsgs,
+						  XactCompletionRelcacheInitFileInval(parsed->xinfo),
+											 parsed->dbId, parsed->tsId);
 
 		/*
 		 * Release locks, if any. We do this for both two phase and normal one
@@ -5383,7 +5384,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
 	{
 		/* recover apply progress */
 		replorigin_advance(origin_id, parsed->origin_lsn, lsn,
-						   false /* backward */, false /* WAL */);
+						   false /* backward */ , false /* WAL */ );
 	}
 
 	/* Make sure files supposed to be dropped are dropped */
@@ -5447,8 +5448,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
 static void
 xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
 {
-	int				i;
-	TransactionId	max_xid;
+	int			i;
+	TransactionId max_xid;
 
 	/*
 	 * Make sure nextXid is beyond any XID mentioned in the record.
@@ -5495,7 +5496,7 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
 		 * We must update the ProcArray after we have marked clog.
 		 */
 		ExpireTreeKnownAssignedTransactionIds(
-			xid, parsed->nsubxacts, parsed->subxacts, max_xid);
+						  xid, parsed->nsubxacts, parsed->subxacts, max_xid);
 
 		/*
 		 * There are no flat files that need updating, nor invalidation
@@ -5557,7 +5558,7 @@ xact_redo(XLogReaderState *record)
 		xl_xact_parsed_abort parsed;
 
 		ParseAbortRecord(XLogRecGetInfo(record), xlrec,
-						  &parsed);
+						 &parsed);
 
 		if (info == XLOG_XACT_ABORT)
 		{
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index b913bf3ebcb..087b6be084d 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -81,8 +81,8 @@ extern uint32 bootstrap_data_checksum_version;
 
 
 /* User-settable parameters */
-int			max_wal_size = 64;		/* 1 GB */
-int			min_wal_size = 5;		/* 80 MB */
+int			max_wal_size = 64;	/* 1 GB */
+int			min_wal_size = 5;	/* 80 MB */
 int			wal_keep_segments = 0;
 int			XLOGbuffers = -1;
 int			XLogArchiveTimeout = 0;
@@ -951,14 +951,14 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
 	/*
 	 * Check to see if my copy of RedoRecPtr or doPageWrites is out of date.
 	 * If so, may have to go back and have the caller recompute everything.
-	 * This can only happen just after a checkpoint, so it's better to be
-	 * slow in this case and fast otherwise.
+	 * This can only happen just after a checkpoint, so it's better to be slow
+	 * in this case and fast otherwise.
 	 *
 	 * If we aren't doing full-page writes then RedoRecPtr doesn't actually
 	 * affect the contents of the XLOG record, so we'll update our local copy
 	 * but not force a recomputation.  (If doPageWrites was just turned off,
-	 * we could recompute the record without full pages, but we choose not
-	 * to bother.)
+	 * we could recompute the record without full pages, but we choose not to
+	 * bother.)
 	 */
 	if (RedoRecPtr != Insert->RedoRecPtr)
 	{
@@ -970,8 +970,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
 	if (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr && doPageWrites)
 	{
 		/*
-		 * Oops, some buffer now needs to be backed up that the caller
-		 * didn't back up.  Start over.
+		 * Oops, some buffer now needs to be backed up that the caller didn't
+		 * back up.  Start over.
 		 */
 		WALInsertLockRelease();
 		END_CRIT_SECTION();
@@ -1100,8 +1100,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
 		{
 			appendStringInfo(&buf, "error decoding record: out of memory");
 		}
-		else if	(!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
-							  &errormsg))
+		else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
+								   &errormsg))
 		{
 			appendStringInfo(&buf, "error decoding record: %s",
 							 errormsg ? errormsg : "no error message");
@@ -1932,11 +1932,11 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
 		/*
 		 * Fill the new page's header
 		 */
-		NewPage   ->xlp_magic = XLOG_PAGE_MAGIC;
+		NewPage->xlp_magic = XLOG_PAGE_MAGIC;
 
 		/* NewPage->xlp_info = 0; */	/* done by memset */
-		NewPage   ->xlp_tli = ThisTimeLineID;
-		NewPage   ->xlp_pageaddr = NewPageBeginPtr;
+		NewPage->xlp_tli = ThisTimeLineID;
+		NewPage->xlp_pageaddr = NewPageBeginPtr;
 
 		/* NewPage->xlp_rem_len = 0; */	/* done by memset */
 
@@ -1954,7 +1954,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
 		 * compress a few records.
 		 */
 		if (!Insert->forcePageWrites)
-			NewPage   ->xlp_info |= XLP_BKP_REMOVABLE;
+			NewPage->xlp_info |= XLP_BKP_REMOVABLE;
 
 		/*
 		 * If first page of an XLOG segment file, make it a long header.
@@ -1966,7 +1966,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
 			NewLongPage->xlp_sysid = ControlFile->system_identifier;
 			NewLongPage->xlp_seg_size = XLogSegSize;
 			NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ;
-			NewPage   ->xlp_info |= XLP_LONG_HEADER;
+			NewPage->xlp_info |= XLP_LONG_HEADER;
 		}
 
 		/*
@@ -2008,10 +2008,10 @@ CalculateCheckpointSegments(void)
 	 *
 	 * a) we keep WAL for two checkpoint cycles, back to the "prev" checkpoint.
 	 * b) during checkpoint, we consume checkpoint_completion_target *
-	 *    number of segments consumed between checkpoints.
+	 *	  number of segments consumed between checkpoints.
 	 *-------
 	 */
-	target = (double ) max_wal_size / (2.0 + CheckPointCompletionTarget);
+	target = (double) max_wal_size / (2.0 + CheckPointCompletionTarget);
 
 	/* round down */
 	CheckPointSegments = (int) target;
@@ -2052,15 +2052,15 @@ XLOGfileslop(XLogRecPtr PriorRedoPtr)
 	 * remove enough segments to stay below the maximum.
 	 */
 	minSegNo = PriorRedoPtr / XLOG_SEG_SIZE + min_wal_size - 1;
-	maxSegNo =  PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1;
+	maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1;
 
 	/*
 	 * Between those limits, recycle enough segments to get us through to the
 	 * estimated end of next checkpoint.
 	 *
 	 * To estimate where the next checkpoint will finish, assume that the
-	 * system runs steadily consuming CheckPointDistanceEstimate
-	 * bytes between every checkpoint.
+	 * system runs steadily consuming CheckPointDistanceEstimate bytes between
+	 * every checkpoint.
 	 *
 	 * The reason this calculation is done from the prior checkpoint, not the
 	 * one that just finished, is that this behaves better if some checkpoint
@@ -3005,11 +3005,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
 	/*
 	 * XXX: What should we use as max_segno? We used to use XLOGfileslop when
 	 * that was a constant, but that was always a bit dubious: normally, at a
-	 * checkpoint, XLOGfileslop was the offset from the checkpoint record,
-	 * but here, it was the offset from the insert location. We can't do the
+	 * checkpoint, XLOGfileslop was the offset from the checkpoint record, but
+	 * here, it was the offset from the insert location. We can't do the
 	 * normal XLOGfileslop calculation here because we don't have access to
-	 * the prior checkpoint's redo location. So somewhat arbitrarily, just
-	 * use CheckPointSegments.
+	 * the prior checkpoint's redo location. So somewhat arbitrarily, just use
+	 * CheckPointSegments.
 	 */
 	max_segno = logsegno + CheckPointSegments;
 	if (!InstallXLogFileSegment(&installed_segno, tmppath,
@@ -3098,7 +3098,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
 		nread = upto - nbytes;
 
 		/*
-		 * The part that is not read from the source file is filled with zeros.
+		 * The part that is not read from the source file is filled with
+		 * zeros.
 		 */
 		if (nread < sizeof(buffer))
 			memset(buffer, 0, sizeof(buffer));
@@ -3153,8 +3154,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
 
 	/*
 	 * Now move the segment into place with its final name.  (Or just return
-	 * the path to the file we created, if the caller wants to handle the
-	 * rest on its own.)
+	 * the path to the file we created, if the caller wants to handle the rest
+	 * on its own.)
 	 */
 	if (dstfname)
 	{
@@ -3690,8 +3691,8 @@ RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI)
 
 		/*
 		 * Remove files that are on a timeline older than the new one we're
-		 * switching to, but with a segment number >= the first segment on
-		 * the new timeline.
+		 * switching to, but with a segment number >= the first segment on the
+		 * new timeline.
 		 */
 		if (strncmp(xlde->d_name, switchseg, 8) < 0 &&
 			strcmp(xlde->d_name + 8, switchseg + 8) > 0)
@@ -3768,12 +3769,13 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
 						segname)));
 
 #ifdef WIN32
+
 		/*
 		 * On Windows, if another process (e.g another backend) holds the file
 		 * open in FILE_SHARE_DELETE mode, unlink will succeed, but the file
 		 * will still show up in directory listing until the last handle is
-		 * closed. To avoid confusing the lingering deleted file for a live WAL
-		 * file that needs to be archived, rename it before deleting it.
+		 * closed. To avoid confusing the lingering deleted file for a live
+		 * WAL file that needs to be archived, rename it before deleting it.
 		 *
 		 * If another process holds the file open without FILE_SHARE_DELETE
 		 * flag, rename will fail. We'll try again at the next checkpoint.
@@ -3783,8 +3785,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
 		{
 			ereport(LOG,
 					(errcode_for_file_access(),
-				 errmsg("could not rename old transaction log file \"%s\": %m",
-							path)));
+			   errmsg("could not rename old transaction log file \"%s\": %m",
+					  path)));
 			return;
 		}
 		rc = unlink(newpath);
@@ -3795,8 +3797,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
 		{
 			ereport(LOG,
 					(errcode_for_file_access(),
-				 errmsg("could not remove old transaction log file \"%s\": %m",
-							path)));
+			   errmsg("could not remove old transaction log file \"%s\": %m",
+					  path)));
 			return;
 		}
 		CheckpointStats.ckpt_segs_removed++;
@@ -4609,11 +4611,11 @@ XLOGShmemInit(void)
 	int			i;
 
 #ifdef WAL_DEBUG
+
 	/*
-	 * Create a memory context for WAL debugging that's exempt from the
-	 * normal "no pallocs in critical section" rule. Yes, that can lead to a
-	 * PANIC if an allocation fails, but wal_debug is not for production use
-	 * anyway.
+	 * Create a memory context for WAL debugging that's exempt from the normal
+	 * "no pallocs in critical section" rule. Yes, that can lead to a PANIC if
+	 * an allocation fails, but wal_debug is not for production use anyway.
 	 */
 	if (walDebugCxt == NULL)
 	{
@@ -5044,7 +5046,7 @@ readRecoveryCommandFile(void)
 				ereport(ERROR,
 						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
 						 errmsg("invalid value for recovery parameter \"recovery_target\""),
-						 errhint("The only allowed value is \"immediate\".")));
+					   errhint("The only allowed value is \"immediate\".")));
 			ereport(DEBUG2,
 					(errmsg_internal("recovery_target = '%s'",
 									 item->value)));
@@ -5135,9 +5137,9 @@ readRecoveryCommandFile(void)
 	}
 
 	/*
-	 * Override any inconsistent requests. Not that this is a change
-	 * of behaviour in 9.5; prior to this we simply ignored a request
-	 * to pause if hot_standby = off, which was surprising behaviour.
+	 * Override any inconsistent requests. Not that this is a change of
+	 * behaviour in 9.5; prior to this we simply ignored a request to pause if
+	 * hot_standby = off, which was surprising behaviour.
 	 */
 	if (recoveryTargetAction == RECOVERY_TARGET_ACTION_PAUSE &&
 		recoveryTargetActionSet &&
@@ -6043,7 +6045,7 @@ StartupXLOG(void)
 	if (read_backup_label(&checkPointLoc, &backupEndRequired,
 						  &backupFromStandby))
 	{
-		List	*tablespaces = NIL;
+		List	   *tablespaces = NIL;
 
 		/*
 		 * Archive recovery was requested, and thanks to the backup label
@@ -6099,7 +6101,7 @@ StartupXLOG(void)
 			foreach(lc, tablespaces)
 			{
 				tablespaceinfo *ti = lfirst(lc);
-				char	*linkloc;
+				char	   *linkloc;
 
 				linkloc = psprintf("pg_tblspc/%s", ti->oid);
 
@@ -6112,26 +6114,26 @@ StartupXLOG(void)
 				 */
 				if (lstat(linkloc, &st) == 0 && S_ISDIR(st.st_mode))
 				{
-					if (!rmtree(linkloc,true))
+					if (!rmtree(linkloc, true))
 						ereport(ERROR,
 								(errcode_for_file_access(),
-								 errmsg("could not remove directory \"%s\": %m",
-										linkloc)));
+							  errmsg("could not remove directory \"%s\": %m",
+									 linkloc)));
 				}
 				else
 				{
 					if (unlink(linkloc) < 0 && errno != ENOENT)
 						ereport(ERROR,
 								(errcode_for_file_access(),
-								 errmsg("could not remove symbolic link \"%s\": %m",
-										linkloc)));
+						  errmsg("could not remove symbolic link \"%s\": %m",
+								 linkloc)));
 				}
 
 				if (symlink(ti->path, linkloc) < 0)
 					ereport(ERROR,
 							(errcode_for_file_access(),
-							 errmsg("could not create symbolic link \"%s\": %m",
-									linkloc)));
+						  errmsg("could not create symbolic link \"%s\": %m",
+								 linkloc)));
 
 				pfree(ti->oid);
 				pfree(ti->path);
@@ -6222,9 +6224,9 @@ StartupXLOG(void)
 	 * in place if the database had been cleanly shut down, but it seems
 	 * safest to just remove them always and let them be rebuilt during the
 	 * first backend startup.  These files needs to be removed from all
-	 * directories including pg_tblspc, however the symlinks are created
-	 * only after reading tablesapce_map file in case of archive recovery
-	 * from backup, so needs to clear old relcache files here after creating
+	 * directories including pg_tblspc, however the symlinks are created only
+	 * after reading tablesapce_map file in case of archive recovery from
+	 * backup, so needs to clear old relcache files here after creating
 	 * symlinks.
 	 */
 	RelationCacheInitFileRemove();
@@ -6442,9 +6444,9 @@ StartupXLOG(void)
 		 * Also set backupEndPoint and use minRecoveryPoint as the backup end
 		 * location if we're starting recovery from a base backup which was
 		 * taken from a standby. In this case, the database system status in
-		 * pg_control must indicate that the database was already in
-		 * recovery. Usually that will be DB_IN_ARCHIVE_RECOVERY but also can
-		 * be DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
+		 * pg_control must indicate that the database was already in recovery.
+		 * Usually that will be DB_IN_ARCHIVE_RECOVERY but also can be
+		 * DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
 		 * before reaching this point; e.g. because restore_command or
 		 * primary_conninfo were faulty.
 		 *
@@ -6500,10 +6502,10 @@ StartupXLOG(void)
 
 		/*
 		 * If there was a tablespace_map file, it's done its job and the
-		 * symlinks have been created.  We must get rid of the map file
-		 * so that if we crash during recovery, we don't create symlinks
-		 * again.  It seems prudent though to just rename the file out of
-		 * the way rather than delete it completely.
+		 * symlinks have been created.  We must get rid of the map file so
+		 * that if we crash during recovery, we don't create symlinks again.
+		 * It seems prudent though to just rename the file out of the way
+		 * rather than delete it completely.
 		 */
 		if (haveTblspcMap)
 		{
@@ -6859,7 +6861,8 @@ StartupXLOG(void)
 				{
 					/*
 					 * Before we continue on the new timeline, clean up any
-					 * (possibly bogus) future WAL segments on the old timeline.
+					 * (possibly bogus) future WAL segments on the old
+					 * timeline.
 					 */
 					RemoveNonParentXlogFiles(EndRecPtr, ThisTimeLineID);
 
@@ -6890,32 +6893,33 @@ StartupXLOG(void)
 			{
 				if (!reachedConsistency)
 					ereport(FATAL,
-						(errmsg("requested recovery stop point is before consistent recovery point")));
+							(errmsg("requested recovery stop point is before consistent recovery point")));
 
 				/*
 				 * This is the last point where we can restart recovery with a
 				 * new recovery target, if we shutdown and begin again. After
-				 * this, Resource Managers may choose to do permanent corrective
-				 * actions at end of recovery.
+				 * this, Resource Managers may choose to do permanent
+				 * corrective actions at end of recovery.
 				 */
 				switch (recoveryTargetAction)
 				{
 					case RECOVERY_TARGET_ACTION_SHUTDOWN:
-							/*
-							 * exit with special return code to request shutdown
-							 * of postmaster.  Log messages issued from
-							 * postmaster.
-							 */
-							proc_exit(3);
+
+						/*
+						 * exit with special return code to request shutdown
+						 * of postmaster.  Log messages issued from
+						 * postmaster.
+						 */
+						proc_exit(3);
 
 					case RECOVERY_TARGET_ACTION_PAUSE:
-							SetRecoveryPause(true);
-							recoveryPausesHere();
+						SetRecoveryPause(true);
+						recoveryPausesHere();
 
-							/* drop into promote */
+						/* drop into promote */
 
 					case RECOVERY_TARGET_ACTION_PROMOTE:
-							break;
+						break;
 				}
 			}
 
@@ -7259,8 +7263,8 @@ StartupXLOG(void)
 		 * too.
 		 *
 		 * If a .done or .ready file already exists for the old timeline,
-		 * however, we had already determined that the segment is complete,
-		 * so we can let it be archived normally. (In particular, if it was
+		 * however, we had already determined that the segment is complete, so
+		 * we can let it be archived normally. (In particular, if it was
 		 * restored from the archive to begin with, it's expected to have a
 		 * .done file).
 		 */
@@ -7291,8 +7295,8 @@ StartupXLOG(void)
 				if (rename(origpath, partialpath) != 0)
 					ereport(ERROR,
 							(errcode_for_file_access(),
-							 errmsg("could not rename file \"%s\" to \"%s\": %m",
-									origpath, partialpath)));
+						 errmsg("could not rename file \"%s\" to \"%s\": %m",
+								origpath, partialpath)));
 				XLogArchiveNotify(partialfname);
 			}
 		}
@@ -7366,8 +7370,8 @@ StartupXLOG(void)
 	XLogReportParameters();
 
 	/*
-	 * Local WAL inserts enabled, so it's time to finish initialization
-	 * of commit timestamp.
+	 * Local WAL inserts enabled, so it's time to finish initialization of
+	 * commit timestamp.
 	 */
 	CompleteCommitTsInitialization();
 
@@ -7961,7 +7965,7 @@ LogCheckpointStart(int flags, bool restartpoint)
 		 (flags & CHECKPOINT_WAIT) ? " wait" : "",
 		 (flags & CHECKPOINT_CAUSE_XLOG) ? " xlog" : "",
 		 (flags & CHECKPOINT_CAUSE_TIME) ? " time" : "",
-		 (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" :"");
+		 (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "");
 }
 
 /*
@@ -8056,8 +8060,8 @@ static void
 UpdateCheckPointDistanceEstimate(uint64 nbytes)
 {
 	/*
-	 * To estimate the number of segments consumed between checkpoints, keep
-	 * a moving average of the amount of WAL generated in previous checkpoint
+	 * To estimate the number of segments consumed between checkpoints, keep a
+	 * moving average of the amount of WAL generated in previous checkpoint
 	 * cycles. However, if the load is bursty, with quiet periods and busy
 	 * periods, we want to cater for the peak load. So instead of a plain
 	 * moving average, let the average decline slowly if the previous cycle
@@ -9473,8 +9477,8 @@ xlog_redo(XLogReaderState *record)
 		}
 
 		/*
-		 * Update the commit timestamp tracking. If there was a change
-		 * it needs to be activated or deactivated accordingly.
+		 * Update the commit timestamp tracking. If there was a change it
+		 * needs to be activated or deactivated accordingly.
 		 */
 		if (track_commit_timestamp != xlrec.track_commit_timestamp)
 		{
@@ -9483,6 +9487,7 @@ xlog_redo(XLogReaderState *record)
 			if (track_commit_timestamp)
 				ActivateCommitTs();
 			else
+
 				/*
 				 * We can't create a new WAL record here, but that's OK as
 				 * master did the WAL logging already and we will replay the
@@ -9996,7 +10001,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
 			char	   *relpath = NULL;
 			int			rllen;
 			StringInfoData buflinkpath;
-			char    *s = linkpath;
+			char	   *s = linkpath;
 
 			/* Skip special stuff */
 			if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0)
@@ -10023,10 +10028,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
 			linkpath[rllen] = '\0';
 
 			/*
-			 * Add the escape character '\\' before newline in a string
-			 * to ensure that we can distinguish between the newline in
-			 * the tablespace path and end of line while reading
-			 * tablespace_map file during archive recovery.
+			 * Add the escape character '\\' before newline in a string to
+			 * ensure that we can distinguish between the newline in the
+			 * tablespace path and end of line while reading tablespace_map
+			 * file during archive recovery.
 			 */
 			initStringInfo(&buflinkpath);
 
@@ -10054,8 +10059,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
 			ti->rpath = relpath ? pstrdup(relpath) : NULL;
 			ti->size = infotbssize ? sendTablespace(fullpath, true) : -1;
 
-			if(tablespaces)
-			   *tablespaces = lappend(*tablespaces, ti);
+			if (tablespaces)
+				*tablespaces = lappend(*tablespaces, ti);
 
 			appendStringInfo(&tblspc_mapfbuf, "%s %s\n", ti->oid, ti->path);
 
@@ -10150,10 +10155,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
 				}
 				else
 					ereport(ERROR,
-							(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-							 errmsg("a backup is already in progress"),
-							 errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
-									 TABLESPACE_MAP)));
+						  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+						   errmsg("a backup is already in progress"),
+						   errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
+								   TABLESPACE_MAP)));
 
 				fp = AllocateFile(TABLESPACE_MAP, "w");
 
@@ -10353,8 +10358,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
 							BACKUP_LABEL_FILE)));
 
 		/*
-		 * Remove tablespace_map file if present, it is created
-		 * only if there are tablespaces.
+		 * Remove tablespace_map file if present, it is created only if there
+		 * are tablespaces.
 		 */
 		unlink(TABLESPACE_MAP);
 	}
@@ -10773,10 +10778,12 @@ read_tablespace_map(List **tablespaces)
 	tablespaceinfo *ti;
 	FILE	   *lfp;
 	char		tbsoid[MAXPGPATH];
-	char		*tbslinkpath;
+	char	   *tbslinkpath;
 	char		str[MAXPGPATH];
-	int			ch, prev_ch = -1,
-				i = 0, n;
+	int			ch,
+				prev_ch = -1,
+				i = 0,
+				n;
 
 	/*
 	 * See if tablespace_map file is present
@@ -10794,9 +10801,9 @@ read_tablespace_map(List **tablespaces)
 
 	/*
 	 * Read and parse the link name and path lines from tablespace_map file
-	 * (this code is pretty crude, but we are not expecting any variability
-	 * in the file format).  While taking backup we embed escape character
-	 * '\\' before newline in tablespace path, so that during reading of
+	 * (this code is pretty crude, but we are not expecting any variability in
+	 * the file format).  While taking backup we embed escape character '\\'
+	 * before newline in tablespace path, so that during reading of
 	 * tablespace_map file, we could distinguish newline in tablespace path
 	 * and end of line.  Now while reading tablespace_map file, remove the
 	 * escape character that has been added in tablespace path during backup.
@@ -10808,8 +10815,8 @@ read_tablespace_map(List **tablespaces)
 			str[i] = '\0';
 			if (sscanf(str, "%s %n", tbsoid, &n) != 1)
 				ereport(FATAL,
-					(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-						errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
+						(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+					 errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
 			tbslinkpath = str + n;
 			i = 0;
 
@@ -10821,7 +10828,7 @@ read_tablespace_map(List **tablespaces)
 			continue;
 		}
 		else if ((ch == '\n' || ch == '\r') && prev_ch == '\\')
-			str[i-1] = ch;
+			str[i - 1] = ch;
 		else
 			str[i++] = ch;
 		prev_ch = ch;
@@ -10868,7 +10875,7 @@ BackupInProgress(void)
 
 /*
  * CancelBackup: rename the "backup_label" and "tablespace_map"
- *               files to cancel backup mode
+ *				 files to cancel backup mode
  *
  * If the "backup_label" file exists, it will be renamed to "backup_label.old".
  * Similarly, if the "tablespace_map" file exists, it will be renamed to
@@ -11115,8 +11122,8 @@ static bool
 WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
 							bool fetching_ckpt, XLogRecPtr tliRecPtr)
 {
-	static TimestampTz	last_fail_time = 0;
-	TimestampTz	now;
+	static TimestampTz last_fail_time = 0;
+	TimestampTz now;
 
 	/*-------
 	 * Standby mode is implemented by a state machine:
@@ -11270,9 +11277,10 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
 					 */
 					now = GetCurrentTimestamp();
 					if (!TimestampDifferenceExceeds(last_fail_time, now,
-													wal_retrieve_retry_interval))
+												wal_retrieve_retry_interval))
 					{
-						long		secs, wait_time;
+						long		secs,
+									wait_time;
 						int			usecs;
 
 						TimestampDifference(last_fail_time, now, &secs, &usecs);
@@ -11280,7 +11288,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
 							(secs * 1000 + usecs / 1000);
 
 						WaitLatch(&XLogCtl->recoveryWakeupLatch,
-								  WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
+							 WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
 								  wait_time);
 						ResetLatch(&XLogCtl->recoveryWakeupLatch);
 						now = GetCurrentTimestamp();
@@ -11605,8 +11613,8 @@ fsync_pgdata(char *datadir)
 		return;
 
 	/*
-	 * If possible, hint to the kernel that we're soon going to fsync
-	 * the data directory and its contents.
+	 * If possible, hint to the kernel that we're soon going to fsync the data
+	 * directory and its contents.
 	 */
 #if defined(HAVE_SYNC_FILE_RANGE) || \
 	(defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED))
diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c
index 419736da310..b96c39ac657 100644
--- a/src/backend/access/transam/xloginsert.c
+++ b/src/backend/access/transam/xloginsert.c
@@ -33,7 +33,7 @@
 #include "pg_trace.h"
 
 /* Buffer size required to store a compressed version of backup block image */
-#define PGLZ_MAX_BLCKSZ	PGLZ_MAX_OUTPUT(BLCKSZ)
+#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
 
 /*
  * For each block reference registered with XLogRegisterBuffer, we fill in
@@ -58,7 +58,7 @@ typedef struct
 
 	/* buffer to store a compressed version of backup block image */
 	char		compressed_page[PGLZ_MAX_BLCKSZ];
-}	registered_buffer;
+} registered_buffer;
 
 static registered_buffer *registered_buffers;
 static int	max_registered_buffers;		/* allocated size */
@@ -110,7 +110,7 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,
 				   XLogRecPtr RedoRecPtr, bool doPageWrites,
 				   XLogRecPtr *fpw_lsn);
 static bool XLogCompressBackupBlock(char *page, uint16 hole_offset,
-									uint16 hole_length, char *dest, uint16 *dlen);
+						uint16 hole_length, char *dest, uint16 *dlen);
 
 /*
  * Begin constructing a WAL record. This must be called before the
@@ -602,7 +602,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
 											&compressed_len);
 			}
 
-			/* Fill in the remaining fields in the XLogRecordBlockHeader struct */
+			/*
+			 * Fill in the remaining fields in the XLogRecordBlockHeader
+			 * struct
+			 */
 			bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
 
 			/*
@@ -762,7 +765,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
  * the length of compressed block image.
  */
 static bool
-XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
+XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length,
 						char *dest, uint16 *dlen)
 {
 	int32		orig_len = BLCKSZ - hole_length;
@@ -790,16 +793,15 @@ XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
 		source = page;
 
 	/*
-	 * We recheck the actual size even if pglz_compress() reports success
-	 * and see if the number of bytes saved by compression is larger than
-	 * the length of extra data needed for the compressed version of block
-	 * image.
+	 * We recheck the actual size even if pglz_compress() reports success and
+	 * see if the number of bytes saved by compression is larger than the
+	 * length of extra data needed for the compressed version of block image.
 	 */
 	len = pglz_compress(source, orig_len, dest, PGLZ_strategy_default);
 	if (len >= 0 &&
 		len + extra_bytes < orig_len)
 	{
-		*dlen = (uint16) len;		/* successful compression */
+		*dlen = (uint16) len;	/* successful compression */
 		return true;
 	}
 	return false;
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index 3661e7229aa..a9e926c5a28 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -1086,50 +1086,53 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg)
 					 blk->bimg_len == BLCKSZ))
 				{
 					report_invalid_record(state,
-					  "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
+										  "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
 										  (unsigned int) blk->hole_offset,
 										  (unsigned int) blk->hole_length,
 										  (unsigned int) blk->bimg_len,
 										  (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
 					goto err;
 				}
+
 				/*
-				 * cross-check that hole_offset == 0 and hole_length == 0
-				 * if the HAS_HOLE flag is not set.
+				 * cross-check that hole_offset == 0 and hole_length == 0 if
+				 * the HAS_HOLE flag is not set.
 				 */
 				if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
 					(blk->hole_offset != 0 || blk->hole_length != 0))
 				{
 					report_invalid_record(state,
-					  "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
+										  "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
 										  (unsigned int) blk->hole_offset,
 										  (unsigned int) blk->hole_length,
 										  (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
 					goto err;
 				}
+
 				/*
-				 * cross-check that bimg_len < BLCKSZ
-				 * if the IS_COMPRESSED flag is set.
+				 * cross-check that bimg_len < BLCKSZ if the IS_COMPRESSED
+				 * flag is set.
 				 */
 				if ((blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
 					blk->bimg_len == BLCKSZ)
 				{
 					report_invalid_record(state,
-					  "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X",
+										  "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X",
 										  (unsigned int) blk->bimg_len,
 										  (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
 					goto err;
 				}
+
 				/*
-				 * cross-check that bimg_len = BLCKSZ if neither
-				 * HAS_HOLE nor IS_COMPRESSED flag is set.
+				 * cross-check that bimg_len = BLCKSZ if neither HAS_HOLE nor
+				 * IS_COMPRESSED flag is set.
 				 */
 				if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
 					!(blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
 					blk->bimg_len != BLCKSZ)
 				{
 					report_invalid_record(state,
-					  "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X",
+										  "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X",
 										  (unsigned int) blk->data_len,
 										  (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
 					goto err;
@@ -1294,8 +1297,8 @@ bool
 RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
 {
 	DecodedBkpBlock *bkpb;
-	char   *ptr;
-	char	tmp[BLCKSZ];
+	char	   *ptr;
+	char		tmp[BLCKSZ];
 
 	if (!record->blocks[block_id].in_use)
 		return false;
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index e42187a7d5d..95d6c146fa5 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -401,6 +401,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
 			proc_exit(1);		/* should never return */
 
 		case BootstrapProcess:
+
 			/*
 			 * There was a brief instant during which mode was Normal; this is
 			 * okay.  We need to be in bootstrap mode during BootStrapXLOG for
diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm
index ac1dafb0411..5e704181ecc 100644
--- a/src/backend/catalog/Catalog.pm
+++ b/src/backend/catalog/Catalog.pm
@@ -189,7 +189,8 @@ sub Catalogs
 						}
 						else
 						{
-							die "unknown column option $attopt on column $attname"
+							die
+"unknown column option $attopt on column $attname";
 						}
 					}
 					push @{ $catalog{columns} }, \%row;
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 943909c8225..50a00cf8c8a 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -397,14 +397,14 @@ ExecuteGrantStmt(GrantStmt *stmt)
 	istmt.behavior = stmt->behavior;
 
 	/*
-	 * Convert the RoleSpec list into an Oid list.  Note that at this point
-	 * we insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
+	 * Convert the RoleSpec list into an Oid list.  Note that at this point we
+	 * insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
 	 * there shouldn't be any additional work needed to support this case.
 	 */
 	foreach(cell, stmt->grantees)
 	{
-		RoleSpec *grantee = (RoleSpec *) lfirst(cell);
-		Oid grantee_uid;
+		RoleSpec   *grantee = (RoleSpec *) lfirst(cell);
+		Oid			grantee_uid;
 
 		switch (grantee->roletype)
 		{
@@ -892,14 +892,14 @@ ExecAlterDefaultPrivilegesStmt(AlterDefaultPrivilegesStmt *stmt)
 	iacls.behavior = action->behavior;
 
 	/*
-	 * Convert the RoleSpec list into an Oid list.  Note that at this point
-	 * we insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
+	 * Convert the RoleSpec list into an Oid list.  Note that at this point we
+	 * insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
 	 * there shouldn't be any additional work needed to support this case.
 	 */
 	foreach(cell, action->grantees)
 	{
-		RoleSpec *grantee = (RoleSpec *) lfirst(cell);
-		Oid grantee_uid;
+		RoleSpec   *grantee = (RoleSpec *) lfirst(cell);
+		Oid			grantee_uid;
 
 		switch (grantee->roletype)
 		{
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index ec4ba397c71..c1212e9075a 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -213,8 +213,8 @@ deleteObjectsInList(ObjectAddresses *targetObjects, Relation *depRel,
 		{
 			const ObjectAddress *thisobj = &targetObjects->refs[i];
 			const ObjectAddressExtra *extra = &targetObjects->extras[i];
-			bool	original = false;
-			bool	normal = false;
+			bool		original = false;
+			bool		normal = false;
 
 			if (extra->flags & DEPFLAG_ORIGINAL)
 				original = true;
@@ -1611,10 +1611,10 @@ find_expr_references_walker(Node *node,
 										   context->addrs);
 					break;
 
-				/*
-				 * Dependencies for regrole should be shared among all
-				 * databases, so explicitly inhibit to have dependencies.
-				 */
+					/*
+					 * Dependencies for regrole should be shared among all
+					 * databases, so explicitly inhibit to have dependencies.
+					 */
 				case REGROLEOID:
 					ereport(ERROR,
 							(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl
index a5c78eed493..d06eae019ad 100644
--- a/src/backend/catalog/genbki.pl
+++ b/src/backend/catalog/genbki.pl
@@ -147,7 +147,7 @@ foreach my $catname (@{ $catalogs->{names} })
 	}
 	print BKI "\n )\n";
 
-	# open it, unless bootstrap case (create bootstrap does this automatically)
+   # open it, unless bootstrap case (create bootstrap does this automatically)
 	if ($catalog->{bootstrap} eq '')
 	{
 		print BKI "open $catname\n";
@@ -242,12 +242,12 @@ foreach my $catname (@{ $catalogs->{names} })
 			{
 				$attnum = 0;
 				my @SYS_ATTRS = (
-					{ name => 'ctid', type => 'tid' },
-					{ name => 'oid', type => 'oid' },
-					{ name => 'xmin', type => 'xid' },
-					{ name => 'cmin', type=> 'cid' },
-					{ name => 'xmax', type=> 'xid' },
-					{ name => 'cmax', type => 'cid' },
+					{ name => 'ctid',     type => 'tid' },
+					{ name => 'oid',      type => 'oid' },
+					{ name => 'xmin',     type => 'xid' },
+					{ name => 'cmin',     type => 'cid' },
+					{ name => 'xmax',     type => 'xid' },
+					{ name => 'cmax',     type => 'cid' },
 					{ name => 'tableoid', type => 'oid' });
 				foreach my $attr (@SYS_ATTRS)
 				{
@@ -384,6 +384,7 @@ sub emit_pgattr_row
 			}
 			elsif ($priornotnull)
 			{
+
 				# attnotnull will automatically be set if the type is
 				# fixed-width and prior columns are all NOT NULL ---
 				# compare DefineAttr in bootstrap.c. oidvector and
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index bac9fbe7eb3..4246554d19d 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1709,8 +1709,8 @@ BuildSpeculativeIndexInfo(Relation index, IndexInfo *ii)
 	ii->ii_UniqueStrats = (uint16 *) palloc(sizeof(uint16) * ncols);
 
 	/*
-	 * We have to look up the operator's strategy number.  This
-	 * provides a cross-check that the operator does match the index.
+	 * We have to look up the operator's strategy number.  This provides a
+	 * cross-check that the operator does match the index.
 	 */
 	/* We need the func OIDs and strategy numbers too */
 	for (i = 0; i < ncols; i++)
@@ -3186,7 +3186,7 @@ IndexGetRelation(Oid indexId, bool missing_ok)
  */
 void
 reindex_index(Oid indexId, bool skip_constraint_checks, char persistence,
-				int options)
+			  int options)
 {
 	Relation	iRel,
 				heapRelation;
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index 8d98c5d9a63..c37e38fa3bd 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -453,89 +453,188 @@ static const struct object_type_map
 	const char *tm_name;
 	ObjectType	tm_type;
 }
-ObjectTypeMap[] =
+
+			ObjectTypeMap[] =
 {
 	/* OCLASS_CLASS, all kinds of relations */
-	{ "table", OBJECT_TABLE },
-	{ "index", OBJECT_INDEX },
-	{ "sequence", OBJECT_SEQUENCE },
-	{ "toast table", -1 },		/* unmapped */
-	{ "view", OBJECT_VIEW },
-	{ "materialized view", OBJECT_MATVIEW },
-	{ "composite type", -1 },	/* unmapped */
-	{ "foreign table", OBJECT_FOREIGN_TABLE },
-	{ "table column", OBJECT_COLUMN },
-	{ "index column", -1 },		/* unmapped */
-	{ "sequence column", -1 },	/* unmapped */
-	{ "toast table column", -1 },	/* unmapped */
-	{ "view column", -1 },		/* unmapped */
-	{ "materialized view column", -1 },	/* unmapped */
-	{ "composite type column", -1 },	/* unmapped */
-	{ "foreign table column", OBJECT_COLUMN },
+	{
+		"table", OBJECT_TABLE
+	},
+	{
+		"index", OBJECT_INDEX
+	},
+	{
+		"sequence", OBJECT_SEQUENCE
+	},
+	{
+		"toast table", -1
+	},							/* unmapped */
+	{
+		"view", OBJECT_VIEW
+	},
+	{
+		"materialized view", OBJECT_MATVIEW
+	},
+	{
+		"composite type", -1
+	},							/* unmapped */
+	{
+		"foreign table", OBJECT_FOREIGN_TABLE
+	},
+	{
+		"table column", OBJECT_COLUMN
+	},
+	{
+		"index column", -1
+	},							/* unmapped */
+	{
+		"sequence column", -1
+	},							/* unmapped */
+	{
+		"toast table column", -1
+	},							/* unmapped */
+	{
+		"view column", -1
+	},							/* unmapped */
+	{
+		"materialized view column", -1
+	},							/* unmapped */
+	{
+		"composite type column", -1
+	},							/* unmapped */
+	{
+		"foreign table column", OBJECT_COLUMN
+	},
 	/* OCLASS_PROC */
-	{ "aggregate", OBJECT_AGGREGATE },
-	{ "function", OBJECT_FUNCTION },
+	{
+		"aggregate", OBJECT_AGGREGATE
+	},
+	{
+		"function", OBJECT_FUNCTION
+	},
 	/* OCLASS_TYPE */
-	{ "type", OBJECT_TYPE },
+	{
+		"type", OBJECT_TYPE
+	},
 	/* OCLASS_CAST */
-	{ "cast", OBJECT_CAST },
+	{
+		"cast", OBJECT_CAST
+	},
 	/* OCLASS_COLLATION */
-	{ "collation", OBJECT_COLLATION },
+	{
+		"collation", OBJECT_COLLATION
+	},
 	/* OCLASS_CONSTRAINT */
-	{ "table constraint", OBJECT_TABCONSTRAINT },
-	{ "domain constraint", OBJECT_DOMCONSTRAINT },
+	{
+		"table constraint", OBJECT_TABCONSTRAINT
+	},
+	{
+		"domain constraint", OBJECT_DOMCONSTRAINT
+	},
 	/* OCLASS_CONVERSION */
-	{ "conversion", OBJECT_CONVERSION },
+	{
+		"conversion", OBJECT_CONVERSION
+	},
 	/* OCLASS_DEFAULT */
-	{ "default value", OBJECT_DEFAULT },
+	{
+		"default value", OBJECT_DEFAULT
+	},
 	/* OCLASS_LANGUAGE */
-	{ "language", OBJECT_LANGUAGE },
+	{
+		"language", OBJECT_LANGUAGE
+	},
 	/* OCLASS_LARGEOBJECT */
-	{ "large object", OBJECT_LARGEOBJECT },
+	{
+		"large object", OBJECT_LARGEOBJECT
+	},
 	/* OCLASS_OPERATOR */
-	{ "operator", OBJECT_OPERATOR },
+	{
+		"operator", OBJECT_OPERATOR
+	},
 	/* OCLASS_OPCLASS */
-	{ "operator class", OBJECT_OPCLASS },
+	{
+		"operator class", OBJECT_OPCLASS
+	},
 	/* OCLASS_OPFAMILY */
-	{ "operator family", OBJECT_OPFAMILY },
+	{
+		"operator family", OBJECT_OPFAMILY
+	},
 	/* OCLASS_AMOP */
-	{ "operator of access method", OBJECT_AMOP },
+	{
+		"operator of access method", OBJECT_AMOP
+	},
 	/* OCLASS_AMPROC */
-	{ "function of access method", OBJECT_AMPROC },
+	{
+		"function of access method", OBJECT_AMPROC
+	},
 	/* OCLASS_REWRITE */
-	{ "rule", OBJECT_RULE },
+	{
+		"rule", OBJECT_RULE
+	},
 	/* OCLASS_TRIGGER */
-	{ "trigger", OBJECT_TRIGGER },
+	{
+		"trigger", OBJECT_TRIGGER
+	},
 	/* OCLASS_SCHEMA */
-	{ "schema", OBJECT_SCHEMA },
+	{
+		"schema", OBJECT_SCHEMA
+	},
 	/* OCLASS_TSPARSER */
-	{ "text search parser", OBJECT_TSPARSER },
+	{
+		"text search parser", OBJECT_TSPARSER
+	},
 	/* OCLASS_TSDICT */
-	{ "text search dictionary", OBJECT_TSDICTIONARY },
+	{
+		"text search dictionary", OBJECT_TSDICTIONARY
+	},
 	/* OCLASS_TSTEMPLATE */
-	{ "text search template", OBJECT_TSTEMPLATE },
+	{
+		"text search template", OBJECT_TSTEMPLATE
+	},
 	/* OCLASS_TSCONFIG */
-	{ "text search configuration", OBJECT_TSCONFIGURATION },
+	{
+		"text search configuration", OBJECT_TSCONFIGURATION
+	},
 	/* OCLASS_ROLE */
-	{ "role", OBJECT_ROLE },
+	{
+		"role", OBJECT_ROLE
+	},
 	/* OCLASS_DATABASE */
-	{ "database", OBJECT_DATABASE },
+	{
+		"database", OBJECT_DATABASE
+	},
 	/* OCLASS_TBLSPACE */
-	{ "tablespace", OBJECT_TABLESPACE },
+	{
+		"tablespace", OBJECT_TABLESPACE
+	},
 	/* OCLASS_FDW */
-	{ "foreign-data wrapper", OBJECT_FDW },
+	{
+		"foreign-data wrapper", OBJECT_FDW
+	},
 	/* OCLASS_FOREIGN_SERVER */
-	{ "server", OBJECT_FOREIGN_SERVER },
+	{
+		"server", OBJECT_FOREIGN_SERVER
+	},
 	/* OCLASS_USER_MAPPING */
-	{ "user mapping", OBJECT_USER_MAPPING },
+	{
+		"user mapping", OBJECT_USER_MAPPING
+	},
 	/* OCLASS_DEFACL */
-	{ "default acl", OBJECT_DEFACL },
+	{
+		"default acl", OBJECT_DEFACL
+	},
 	/* OCLASS_EXTENSION */
-	{ "extension", OBJECT_EXTENSION },
+	{
+		"extension", OBJECT_EXTENSION
+	},
 	/* OCLASS_EVENT_TRIGGER */
-	{ "event trigger", OBJECT_EVENT_TRIGGER },
+	{
+		"event trigger", OBJECT_EVENT_TRIGGER
+	},
 	/* OCLASS_POLICY */
-	{ "policy", OBJECT_POLICY }
+	{
+		"policy", OBJECT_POLICY
+	}
 };
 
 const ObjectAddress InvalidObjectAddress =
@@ -667,16 +766,16 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
 				break;
 			case OBJECT_DOMCONSTRAINT:
 				{
-					ObjectAddress	domaddr;
-					char		   *constrname;
+					ObjectAddress domaddr;
+					char	   *constrname;
 
 					domaddr = get_object_address_type(OBJECT_DOMAIN,
-													  list_head(objname), missing_ok);
+											 list_head(objname), missing_ok);
 					constrname = strVal(linitial(objargs));
 
 					address.classId = ConstraintRelationId;
 					address.objectId = get_domain_constraint_oid(domaddr.objectId,
-															  constrname, missing_ok);
+													 constrname, missing_ok);
 					address.objectSubId = 0;
 
 				}
@@ -1286,8 +1385,8 @@ get_object_address_attrdef(ObjectType objtype, List *objname,
 	if (attnum != InvalidAttrNumber && tupdesc->constr != NULL)
 	{
 		Relation	attrdef;
-		ScanKeyData	keys[2];
-		SysScanDesc	scan;
+		ScanKeyData keys[2];
+		SysScanDesc scan;
 		HeapTuple	tup;
 
 		attrdef = relation_open(AttrDefaultRelationId, AccessShareLock);
@@ -1419,14 +1518,14 @@ static ObjectAddress
 get_object_address_opf_member(ObjectType objtype,
 							  List *objname, List *objargs, bool missing_ok)
 {
-	ObjectAddress	famaddr;
-	ObjectAddress	address;
-	ListCell *cell;
-	List   *copy;
-	char   *typenames[2];
-	Oid		typeoids[2];
-	int		membernum;
-	int		i;
+	ObjectAddress famaddr;
+	ObjectAddress address;
+	ListCell   *cell;
+	List	   *copy;
+	char	   *typenames[2];
+	Oid			typeoids[2];
+	int			membernum;
+	int			i;
 
 	/*
 	 * The last element of the objname list contains the strategy or procedure
@@ -1441,9 +1540,9 @@ get_object_address_opf_member(ObjectType objtype,
 
 	/* find out left/right type names and OIDs */
 	i = 0;
-	foreach (cell, objargs)
+	foreach(cell, objargs)
 	{
-		ObjectAddress	typaddr;
+		ObjectAddress typaddr;
 
 		typenames[i] = strVal(lfirst(cell));
 		typaddr = get_object_address_type(OBJECT_TYPE, cell, missing_ok);
@@ -1471,9 +1570,9 @@ get_object_address_opf_member(ObjectType objtype,
 					if (!missing_ok)
 						ereport(ERROR,
 								(errcode(ERRCODE_UNDEFINED_OBJECT),
-								 errmsg("operator %d (%s, %s) of %s does not exist",
-										membernum, typenames[0], typenames[1],
-										getObjectDescription(&famaddr))));
+						  errmsg("operator %d (%s, %s) of %s does not exist",
+								 membernum, typenames[0], typenames[1],
+								 getObjectDescription(&famaddr))));
 				}
 				else
 				{
@@ -1500,9 +1599,9 @@ get_object_address_opf_member(ObjectType objtype,
 					if (!missing_ok)
 						ereport(ERROR,
 								(errcode(ERRCODE_UNDEFINED_OBJECT),
-								 errmsg("function %d (%s, %s) of %s does not exist",
-										membernum, typenames[0], typenames[1],
-										getObjectDescription(&famaddr))));
+						  errmsg("function %d (%s, %s) of %s does not exist",
+								 membernum, typenames[0], typenames[1],
+								 getObjectDescription(&famaddr))));
 				}
 				else
 				{
@@ -1636,8 +1735,8 @@ get_object_address_defacl(List *objname, List *objargs, bool missing_ok)
 		default:
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("unrecognized default ACL object type %c", objtype),
-					 errhint("Valid object types are 'r', 'S', 'f', and 'T'.")));
+				  errmsg("unrecognized default ACL object type %c", objtype),
+				 errhint("Valid object types are 'r', 'S', 'f', and 'T'.")));
 	}
 
 	/*
@@ -1688,8 +1787,8 @@ not_found:
 		else
 			ereport(ERROR,
 					(errcode(ERRCODE_UNDEFINED_OBJECT),
-					 errmsg("default ACL for user \"%s\" on %s does not exist",
-							username, objtype_str)));
+				   errmsg("default ACL for user \"%s\" on %s does not exist",
+						  username, objtype_str)));
 	}
 	return address;
 }
@@ -1701,11 +1800,11 @@ not_found:
 static List *
 textarray_to_strvaluelist(ArrayType *arr)
 {
-	Datum  *elems;
-	bool   *nulls;
-	int		nelems;
-	List   *list = NIL;
-	int		i;
+	Datum	   *elems;
+	bool	   *nulls;
+	int			nelems;
+	List	   *list = NIL;
+	int			i;
 
 	deconstruct_array(arr, TEXTOID, -1, false, 'i',
 					  &elems, &nulls, &nelems);
@@ -1728,18 +1827,18 @@ textarray_to_strvaluelist(ArrayType *arr)
 Datum
 pg_get_object_address(PG_FUNCTION_ARGS)
 {
-	char   *ttype = TextDatumGetCString(PG_GETARG_TEXT_P(0));
-	ArrayType *namearr = PG_GETARG_ARRAYTYPE_P(1);
-	ArrayType *argsarr = PG_GETARG_ARRAYTYPE_P(2);
-	int		itype;
-	ObjectType type;
-	List   *name;
-	List   *args;
+	char	   *ttype = TextDatumGetCString(PG_GETARG_TEXT_P(0));
+	ArrayType  *namearr = PG_GETARG_ARRAYTYPE_P(1);
+	ArrayType  *argsarr = PG_GETARG_ARRAYTYPE_P(2);
+	int			itype;
+	ObjectType	type;
+	List	   *name;
+	List	   *args;
 	ObjectAddress addr;
-	TupleDesc tupdesc;
-	Datum	values[3];
-	bool	nulls[3];
-	HeapTuple htup;
+	TupleDesc	tupdesc;
+	Datum		values[3];
+	bool		nulls[3];
+	HeapTuple	htup;
 	Relation	relation;
 
 	/* Decode object type, raise error if unknown */
@@ -1751,16 +1850,16 @@ pg_get_object_address(PG_FUNCTION_ARGS)
 	type = (ObjectType) itype;
 
 	/*
-	 * Convert the text array to the representation appropriate for the
-	 * given object type.  Most use a simple string Values list, but there
-	 * are some exceptions.
+	 * Convert the text array to the representation appropriate for the given
+	 * object type.  Most use a simple string Values list, but there are some
+	 * exceptions.
 	 */
 	if (type == OBJECT_TYPE || type == OBJECT_DOMAIN || type == OBJECT_CAST ||
 		type == OBJECT_DOMCONSTRAINT)
 	{
-		Datum	*elems;
-		bool	*nulls;
-		int		nelems;
+		Datum	   *elems;
+		bool	   *nulls;
+		int			nelems;
 
 		deconstruct_array(namearr, TEXTOID, -1, false, 'i',
 						  &elems, &nulls, &nelems);
@@ -1812,10 +1911,10 @@ pg_get_object_address(PG_FUNCTION_ARGS)
 		type == OBJECT_AMPROC)
 	{
 		/* in these cases, the args list must be of TypeName */
-		Datum  *elems;
-		bool   *nulls;
-		int		nelems;
-		int		i;
+		Datum	   *elems;
+		bool	   *nulls;
+		int			nelems;
+		int			i;
 
 		deconstruct_array(argsarr, TEXTOID, -1, false, 'i',
 						  &elems, &nulls, &nelems);
@@ -1826,9 +1925,9 @@ pg_get_object_address(PG_FUNCTION_ARGS)
 			if (nulls[i])
 				ereport(ERROR,
 						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-						 errmsg("name or argument lists may not contain nulls")));
+					errmsg("name or argument lists may not contain nulls")));
 			args = lappend(args,
-						   typeStringToTypeName(TextDatumGetCString(elems[i])));
+						typeStringToTypeName(TextDatumGetCString(elems[i])));
 		}
 	}
 	else
@@ -1850,7 +1949,7 @@ pg_get_object_address(PG_FUNCTION_ARGS)
 			if (list_length(args) != 1)
 				ereport(ERROR,
 						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-						 errmsg("argument list length must be exactly %d", 1)));
+					  errmsg("argument list length must be exactly %d", 1)));
 			break;
 		case OBJECT_OPFAMILY:
 		case OBJECT_OPCLASS:
@@ -1870,7 +1969,7 @@ pg_get_object_address(PG_FUNCTION_ARGS)
 			if (list_length(args) != 2)
 				ereport(ERROR,
 						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-						 errmsg("argument list length must be exactly %d", 2)));
+					  errmsg("argument list length must be exactly %d", 2)));
 			break;
 		default:
 			break;
@@ -2146,8 +2245,8 @@ read_objtype_from_string(const char *objtype)
 	}
 	if (i >= lengthof(ObjectTypeMap))
 		ereport(ERROR,
-			   (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-				errmsg("unrecognized object type \"%s\"", objtype)));
+				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+				 errmsg("unrecognized object type \"%s\"", objtype)));
 
 	return type;
 }
@@ -2693,7 +2792,7 @@ getObjectDescription(const ObjectAddress *object)
 				Form_pg_transform trfForm;
 
 				trfTup = SearchSysCache1(TRFOID,
-										  ObjectIdGetDatum(object->objectId));
+										 ObjectIdGetDatum(object->objectId));
 				if (!HeapTupleIsValid(trfTup))
 					elog(ERROR, "could not find tuple for transform %u",
 						 object->objectId);
@@ -2924,28 +3023,28 @@ getObjectDescription(const ObjectAddress *object)
 					case DEFACLOBJ_RELATION:
 						appendStringInfo(&buffer,
 										 _("default privileges on new relations belonging to role %s"),
-									  GetUserNameFromId(defacl->defaclrole, false));
+							   GetUserNameFromId(defacl->defaclrole, false));
 						break;
 					case DEFACLOBJ_SEQUENCE:
 						appendStringInfo(&buffer,
 										 _("default privileges on new sequences belonging to role %s"),
-									  GetUserNameFromId(defacl->defaclrole, false));
+							   GetUserNameFromId(defacl->defaclrole, false));
 						break;
 					case DEFACLOBJ_FUNCTION:
 						appendStringInfo(&buffer,
 										 _("default privileges on new functions belonging to role %s"),
-									  GetUserNameFromId(defacl->defaclrole, false));
+							   GetUserNameFromId(defacl->defaclrole, false));
 						break;
 					case DEFACLOBJ_TYPE:
 						appendStringInfo(&buffer,
 										 _("default privileges on new types belonging to role %s"),
-									  GetUserNameFromId(defacl->defaclrole, false));
+							   GetUserNameFromId(defacl->defaclrole, false));
 						break;
 					default:
 						/* shouldn't get here */
 						appendStringInfo(&buffer,
 								_("default privileges belonging to role %s"),
-									  GetUserNameFromId(defacl->defaclrole, false));
+							   GetUserNameFromId(defacl->defaclrole, false));
 						break;
 				}
 
@@ -2991,8 +3090,8 @@ getObjectDescription(const ObjectAddress *object)
 		case OCLASS_POLICY:
 			{
 				Relation	policy_rel;
-				ScanKeyData	skey[1];
-				SysScanDesc	sscan;
+				ScanKeyData skey[1];
+				SysScanDesc sscan;
 				HeapTuple	tuple;
 				Form_pg_policy form_policy;
 
@@ -3677,7 +3776,7 @@ getObjectIdentityParts(const ObjectAddress *object,
 
 		case OCLASS_TYPE:
 			{
-				char *typeout;
+				char	   *typeout;
 
 				typeout = format_type_be_qualified(object->objectId);
 				appendStringInfoString(&buffer, typeout);
@@ -3770,7 +3869,7 @@ getObjectIdentityParts(const ObjectAddress *object,
 
 					appendStringInfo(&buffer, "%s on %s",
 									 quote_identifier(NameStr(con->conname)),
-									 getObjectIdentityParts(&domain, objname, objargs));
+						  getObjectIdentityParts(&domain, objname, objargs));
 
 					if (objname)
 						*objargs = lappend(*objargs, pstrdup(NameStr(con->conname)));
@@ -3794,8 +3893,8 @@ getObjectIdentityParts(const ObjectAddress *object,
 				conForm = (Form_pg_conversion) GETSTRUCT(conTup);
 				schema = get_namespace_name_or_temp(conForm->connamespace);
 				appendStringInfoString(&buffer,
-								quote_qualified_identifier(schema,
-														   NameStr(conForm->conname)));
+									   quote_qualified_identifier(schema,
+												 NameStr(conForm->conname)));
 				if (objname)
 					*objname = list_make2(schema,
 										  pstrdup(NameStr(conForm->conname)));
@@ -3901,7 +4000,7 @@ getObjectIdentityParts(const ObjectAddress *object,
 
 				appendStringInfo(&buffer, "%s USING %s",
 								 quote_qualified_identifier(schema,
-															NameStr(opcForm->opcname)),
+												  NameStr(opcForm->opcname)),
 								 quote_identifier(NameStr(amForm->amname)));
 				if (objname)
 					*objname = list_make3(pstrdup(NameStr(amForm->amname)),
@@ -3956,7 +4055,7 @@ getObjectIdentityParts(const ObjectAddress *object,
 				if (objname)
 				{
 					*objname = lappend(*objname,
-									   psprintf("%d", amopForm->amopstrategy));
+									 psprintf("%d", amopForm->amopstrategy));
 					*objargs = list_make2(ltype, rtype);
 				}
 
@@ -4136,7 +4235,7 @@ getObjectIdentityParts(const ObjectAddress *object,
 											  NameStr(formParser->prsname)));
 				if (objname)
 					*objname = list_make2(schema,
-										  pstrdup(NameStr(formParser->prsname)));
+									  pstrdup(NameStr(formParser->prsname)));
 				ReleaseSysCache(tup);
 				break;
 			}
@@ -4159,7 +4258,7 @@ getObjectIdentityParts(const ObjectAddress *object,
 											   NameStr(formDict->dictname)));
 				if (objname)
 					*objname = list_make2(schema,
-										  pstrdup(NameStr(formDict->dictname)));
+									   pstrdup(NameStr(formDict->dictname)));
 				ReleaseSysCache(tup);
 				break;
 			}
@@ -4182,7 +4281,7 @@ getObjectIdentityParts(const ObjectAddress *object,
 											   NameStr(formTmpl->tmplname)));
 				if (objname)
 					*objname = list_make2(schema,
-										  pstrdup(NameStr(formTmpl->tmplname)));
+									   pstrdup(NameStr(formTmpl->tmplname)));
 				ReleaseSysCache(tup);
 				break;
 			}
@@ -4510,10 +4609,10 @@ getRelationIdentity(StringInfo buffer, Oid relid, List **objname)
 ArrayType *
 strlist_to_textarray(List *list)
 {
-	ArrayType *arr;
-	Datum	*datums;
-	int		j = 0;
-	ListCell *cell;
+	ArrayType  *arr;
+	Datum	   *datums;
+	int			j = 0;
+	ListCell   *cell;
 	MemoryContext memcxt;
 	MemoryContext oldcxt;
 
@@ -4527,7 +4626,7 @@ strlist_to_textarray(List *list)
 	datums = palloc(sizeof(text *) * list_length(list));
 	foreach(cell, list)
 	{
-		char   *name = lfirst(cell);
+		char	   *name = lfirst(cell);
 
 		datums[j++] = CStringGetTextDatum(name);
 	}
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 5f211dacde2..009ac398ee4 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -545,7 +545,7 @@ AggregateCreate(const char *aggName,
 							 parameterModes,	/* parameterModes */
 							 parameterNames,	/* parameterNames */
 							 parameterDefaults, /* parameterDefaults */
-							 PointerGetDatum(NULL),	/* trftypes */
+							 PointerGetDatum(NULL),		/* trftypes */
 							 PointerGetDatum(NULL),		/* proconfig */
 							 1, /* procost */
 							 0);	/* prorows */
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index c880486c4bc..902b0a7297c 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -346,7 +346,7 @@ restart:
 		if (!OidIsValid(binary_upgrade_next_pg_enum_oid))
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("pg_enum OID value not set when in binary upgrade mode")));
+			errmsg("pg_enum OID value not set when in binary upgrade mode")));
 
 		/*
 		 * Use binary-upgrade override for pg_enum.oid, if supplied. During
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index 122982951e2..7765be4be43 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -1158,11 +1158,11 @@ fail:
 List *
 oid_array_to_list(Datum datum)
 {
-	ArrayType *array = DatumGetArrayTypeP(datum);
-	Datum *values;
-	int nelems;
-	int i;
-	List *result = NIL;
+	ArrayType  *array = DatumGetArrayTypeP(datum);
+	Datum	   *values;
+	int			nelems;
+	int			i;
+	List	   *result = NIL;
 
 	deconstruct_array(array,
 					  OIDOID,
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 32453c3bb89..c4161b7b3fc 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -133,7 +133,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId)
 		if (!OidIsValid(binary_upgrade_next_pg_type_oid))
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("pg_type OID value not set when in binary upgrade mode")));
+			errmsg("pg_type OID value not set when in binary upgrade mode")));
 
 		HeapTupleSetOid(tup, binary_upgrade_next_pg_type_oid);
 		binary_upgrade_next_pg_type_oid = InvalidOid;
diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c
index c99d3534ced..3652d7bf51b 100644
--- a/src/backend/catalog/toasting.c
+++ b/src/backend/catalog/toasting.c
@@ -175,9 +175,9 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
 		/*
 		 * Check to see whether the table needs a TOAST table.
 		 *
-		 * If an update-in-place TOAST relfilenode is specified, force TOAST file
-		 * creation even if it seems not to need one.  This handles the case
-		 * where the old cluster needed a TOAST table but the new cluster
+		 * If an update-in-place TOAST relfilenode is specified, force TOAST
+		 * file creation even if it seems not to need one.  This handles the
+		 * case where the old cluster needed a TOAST table but the new cluster
 		 * would not normally create one.
 		 */
 
@@ -260,9 +260,9 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
 		namespaceid = PG_TOAST_NAMESPACE;
 
 	/*
-	 * Use binary-upgrade override for pg_type.oid, if supplied.  We might
-	 * be in the post-schema-restore phase where we are doing ALTER TABLE
-	 * to create TOAST tables that didn't exist in the old cluster.
+	 * Use binary-upgrade override for pg_type.oid, if supplied.  We might be
+	 * in the post-schema-restore phase where we are doing ALTER TABLE to
+	 * create TOAST tables that didn't exist in the old cluster.
 	 */
 	if (IsBinaryUpgrade && OidIsValid(binary_upgrade_next_toast_pg_type_oid))
 	{
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 65e329eab07..861048f213f 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -2150,6 +2150,7 @@ compute_scalar_stats(VacAttrStatsP stats,
 	/* We always use the default collation for statistics */
 	ssup.ssup_collation = DEFAULT_COLLATION_OID;
 	ssup.ssup_nulls_first = false;
+
 	/*
 	 * For now, don't perform abbreviated key conversion, because full values
 	 * are required for MCV slot generation.  Supporting that optimization
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 3e14c536e21..8904676609d 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -861,8 +861,8 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
 		 * RLS (returns RLS_ENABLED) or not for this COPY statement.
 		 *
 		 * If the relation has a row security policy and we are to apply it
-		 * then perform a "query" copy and allow the normal query processing to
-		 * handle the policies.
+		 * then perform a "query" copy and allow the normal query processing
+		 * to handle the policies.
 		 *
 		 * If RLS is not enabled for this, then just fall through to the
 		 * normal non-filtering relation handling.
@@ -877,7 +877,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
 			if (is_from)
 				ereport(ERROR,
 						(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-						 errmsg("COPY FROM not supported with row level security."),
+				  errmsg("COPY FROM not supported with row level security."),
 						 errhint("Use direct INSERT statements instead.")));
 
 			/* Build target list */
@@ -904,7 +904,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
 			select->targetList = list_make1(target);
 			select->fromClause = list_make1(from);
 
-			query = (Node*) select;
+			query = (Node *) select;
 
 			/* Close the handle to the relation as it is no longer needed. */
 			heap_close(rel, (is_from ? RowExclusiveLock : AccessShareLock));
@@ -1408,26 +1408,27 @@ BeginCopy(bool is_from,
 
 		/*
 		 * If we were passed in a relid, make sure we got the same one back
-		 * after planning out the query.  It's possible that it changed between
-		 * when we checked the policies on the table and decided to use a query
-		 * and now.
+		 * after planning out the query.  It's possible that it changed
+		 * between when we checked the policies on the table and decided to
+		 * use a query and now.
 		 */
 		if (queryRelId != InvalidOid)
 		{
-			Oid relid = linitial_oid(plan->relationOids);
+			Oid			relid = linitial_oid(plan->relationOids);
 
 			/*
-			 * There should only be one relationOid in this case, since we will
-			 * only get here when we have changed the command for the user from
-			 * a "COPY relation TO" to "COPY (SELECT * FROM relation) TO", to
-			 * allow row level security policies to be applied.
+			 * There should only be one relationOid in this case, since we
+			 * will only get here when we have changed the command for the
+			 * user from a "COPY relation TO" to "COPY (SELECT * FROM
+			 * relation) TO", to allow row level security policies to be
+			 * applied.
 			 */
 			Assert(list_length(plan->relationOids) == 1);
 
 			if (relid != queryRelId)
 				ereport(ERROR,
 						(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-						 errmsg("relation referenced by COPY statement has changed")));
+				errmsg("relation referenced by COPY statement has changed")));
 		}
 
 		/*
@@ -2439,7 +2440,7 @@ CopyFrom(CopyState cstate)
 
 				if (resultRelInfo->ri_NumIndices > 0)
 					recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
-														   estate, false, NULL,
+														 estate, false, NULL,
 														   NIL);
 
 				/* AFTER ROW INSERT Triggers */
diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c
index e8f0d793b67..41183f6ff56 100644
--- a/src/backend/commands/createas.c
+++ b/src/backend/commands/createas.c
@@ -89,7 +89,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
 
 	if (stmt->if_not_exists)
 	{
-		Oid	nspid;
+		Oid			nspid;
 
 		nspid = RangeVarGetCreationNamespace(stmt->into->rel);
 
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index a699ce3fd28..6cbe65e88a4 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -554,8 +554,8 @@ createdb(const CreatedbStmt *stmt)
 	 * Force a checkpoint before starting the copy. This will force all dirty
 	 * buffers, including those of unlogged tables, out to disk, to ensure
 	 * source database is up-to-date on disk for the copy.
-	 * FlushDatabaseBuffers() would suffice for that, but we also want
-	 * to process any pending unlink requests. Otherwise, if a checkpoint
+	 * FlushDatabaseBuffers() would suffice for that, but we also want to
+	 * process any pending unlink requests. Otherwise, if a checkpoint
 	 * happened while we're copying files, a file might be deleted just when
 	 * we're about to copy it, causing the lstat() call in copydir() to fail
 	 * with ENOENT.
@@ -841,8 +841,8 @@ dropdb(const char *dbname, bool missing_ok)
 	if (ReplicationSlotsCountDBSlots(db_id, &nslots, &nslots_active))
 		ereport(ERROR,
 				(errcode(ERRCODE_OBJECT_IN_USE),
-				 errmsg("database \"%s\" is used by a logical replication slot",
-						dbname),
+			  errmsg("database \"%s\" is used by a logical replication slot",
+					 dbname),
 				 errdetail_plural("There is %d slot, %d of them active.",
 								  "There are %d slots, %d of them active.",
 								  nslots,
diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c
index 78a1bf334d8..f04f4f5f31e 100644
--- a/src/backend/commands/dropcmds.c
+++ b/src/backend/commands/dropcmds.c
@@ -415,7 +415,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
 			break;
 		case OBJECT_OPCLASS:
 			{
-				List *opcname = list_copy_tail(objname, 1);
+				List	   *opcname = list_copy_tail(objname, 1);
 
 				if (!schema_does_not_exist_skipping(opcname, &msg, &name))
 				{
@@ -427,7 +427,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
 			break;
 		case OBJECT_OPFAMILY:
 			{
-				List *opfname = list_copy_tail(objname, 1);
+				List	   *opfname = list_copy_tail(objname, 1);
 
 				if (!schema_does_not_exist_skipping(opfname, &msg, &name))
 				{
diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c
index d786c7d606c..cc10c5eb1df 100644
--- a/src/backend/commands/event_trigger.c
+++ b/src/backend/commands/event_trigger.c
@@ -57,13 +57,15 @@ typedef struct EventTriggerQueryState
 	bool		in_sql_drop;
 
 	/* table_rewrite */
-	Oid			table_rewrite_oid;	/* InvalidOid, or set for table_rewrite event */
+	Oid			table_rewrite_oid;		/* InvalidOid, or set for
+										 * table_rewrite event */
 	int			table_rewrite_reason;	/* AT_REWRITE reason */
 
 	/* Support for command collection */
 	bool		commandCollectionInhibited;
 	CollectedCommand *currentCommand;
-	List	   *commandList;		/* list of CollectedCommand; see deparse_utility.h */
+	List	   *commandList;	/* list of CollectedCommand; see
+								 * deparse_utility.h */
 	struct EventTriggerQueryState *previous;
 } EventTriggerQueryState;
 
@@ -143,7 +145,7 @@ static void AlterEventTriggerOwner_internal(Relation rel,
 								Oid newOwnerId);
 static event_trigger_command_tag_check_result check_ddl_tag(const char *tag);
 static event_trigger_command_tag_check_result check_table_rewrite_ddl_tag(
-	const char *tag);
+							const char *tag);
 static void error_duplicate_filter_variable(const char *defname);
 static Datum filter_list_to_array(List *filterlist);
 static Oid insert_event_trigger_tuple(char *trigname, char *eventname,
@@ -714,7 +716,7 @@ EventTriggerCommonSetup(Node *parsetree,
 
 		dbgtag = CreateCommandTag(parsetree);
 		if (event == EVT_DDLCommandStart ||
-			event == EVT_DDLCommandEnd   ||
+			event == EVT_DDLCommandEnd ||
 			event == EVT_SQLDrop)
 		{
 			if (check_ddl_tag(dbgtag) != EVENT_TRIGGER_COMMAND_TAG_OK)
@@ -1562,8 +1564,8 @@ pg_event_trigger_table_rewrite_oid(PG_FUNCTION_ARGS)
 		currentEventTriggerState->table_rewrite_oid == InvalidOid)
 		ereport(ERROR,
 				(errcode(ERRCODE_E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED),
-		 errmsg("%s can only be called in a table_rewrite event trigger function",
-				"pg_event_trigger_table_rewrite_oid()")));
+				 errmsg("%s can only be called in a table_rewrite event trigger function",
+						"pg_event_trigger_table_rewrite_oid()")));
 
 	PG_RETURN_OID(currentEventTriggerState->table_rewrite_oid);
 }
@@ -1583,8 +1585,8 @@ pg_event_trigger_table_rewrite_reason(PG_FUNCTION_ARGS)
 		currentEventTriggerState->table_rewrite_reason == 0)
 		ereport(ERROR,
 				(errcode(ERRCODE_E_R_I_E_EVENT_TRIGGER_PROTOCOL_VIOLATED),
-		 errmsg("%s can only be called in a table_rewrite event trigger function",
-				"pg_event_trigger_table_rewrite_reason()")));
+				 errmsg("%s can only be called in a table_rewrite event trigger function",
+						"pg_event_trigger_table_rewrite_reason()")));
 
 	PG_RETURN_INT32(currentEventTriggerState->table_rewrite_reason);
 }
@@ -1672,7 +1674,7 @@ EventTriggerCollectSimpleCommand(ObjectAddress address,
 	command->parsetree = copyObject(parsetree);
 
 	currentEventTriggerState->commandList = lappend(currentEventTriggerState->commandList,
-											  command);
+													command);
 
 	MemoryContextSwitchTo(oldcxt);
 }
@@ -1687,13 +1689,13 @@ EventTriggerCollectSimpleCommand(ObjectAddress address,
  *
  * XXX -- this API isn't considering the possibility of an ALTER TABLE command
  * being called reentrantly by an event trigger function.  Do we need stackable
- * commands at this level?  Perhaps at least we should detect the condition and
+ * commands at this level?	Perhaps at least we should detect the condition and
  * raise an error.
  */
 void
 EventTriggerAlterTableStart(Node *parsetree)
 {
-	MemoryContext	oldcxt;
+	MemoryContext oldcxt;
 	CollectedCommand *command;
 
 	/* ignore if event trigger context not set, or collection disabled */
@@ -1744,7 +1746,7 @@ EventTriggerAlterTableRelid(Oid objectId)
 void
 EventTriggerCollectAlterTableSubcmd(Node *subcmd, ObjectAddress address)
 {
-	MemoryContext	oldcxt;
+	MemoryContext oldcxt;
 	CollectedATSubcmd *newsub;
 
 	/* ignore if event trigger context not set, or collection disabled */
@@ -1808,8 +1810,8 @@ EventTriggerCollectGrant(InternalGrant *istmt)
 {
 	MemoryContext oldcxt;
 	CollectedCommand *command;
-	InternalGrant  *icopy;
-	ListCell	   *cell;
+	InternalGrant *icopy;
+	ListCell   *cell;
 
 	/* ignore if event trigger context not set, or collection disabled */
 	if (!currentEventTriggerState ||
@@ -1849,9 +1851,9 @@ EventTriggerCollectGrant(InternalGrant *istmt)
  */
 void
 EventTriggerCollectAlterOpFam(AlterOpFamilyStmt *stmt, Oid opfamoid,
-							List *operators, List *procedures)
+							  List *operators, List *procedures)
 {
-	MemoryContext	oldcxt;
+	MemoryContext oldcxt;
 	CollectedCommand *command;
 
 	/* ignore if event trigger context not set, or collection disabled */
@@ -1882,9 +1884,9 @@ EventTriggerCollectAlterOpFam(AlterOpFamilyStmt *stmt, Oid opfamoid,
  */
 void
 EventTriggerCollectCreateOpClass(CreateOpClassStmt *stmt, Oid opcoid,
-							   List *operators, List *procedures)
+								 List *operators, List *procedures)
 {
-	MemoryContext	oldcxt;
+	MemoryContext oldcxt;
 	CollectedCommand *command;
 
 	/* ignore if event trigger context not set, or collection disabled */
@@ -1918,7 +1920,7 @@ void
 EventTriggerCollectAlterTSConfig(AlterTSConfigurationStmt *stmt, Oid cfgId,
 								 Oid *dictIds, int ndicts)
 {
-	MemoryContext   oldcxt;
+	MemoryContext oldcxt;
 	CollectedCommand *command;
 
 	/* ignore if event trigger context not set, or collection disabled */
@@ -1952,7 +1954,7 @@ EventTriggerCollectAlterTSConfig(AlterTSConfigurationStmt *stmt, Oid cfgId,
 void
 EventTriggerCollectAlterDefPrivs(AlterDefaultPrivilegesStmt *stmt)
 {
-	MemoryContext	oldcxt;
+	MemoryContext oldcxt;
 	CollectedCommand *command;
 
 	/* ignore if event trigger context not set, or collection disabled */
@@ -2034,10 +2036,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
 		 * object, the returned OID is Invalid.  Don't return anything.
 		 *
 		 * One might think that a viable alternative would be to look up the
-		 * Oid of the existing object and run the deparse with that.  But since
-		 * the parse tree might be different from the one that created the
-		 * object in the first place, we might not end up in a consistent state
-		 * anyway.
+		 * Oid of the existing object and run the deparse with that.  But
+		 * since the parse tree might be different from the one that created
+		 * the object in the first place, we might not end up in a consistent
+		 * state anyway.
 		 */
 		if (cmd->type == SCT_Simple &&
 			!OidIsValid(cmd->d.simple.address.objectId))
@@ -2074,10 +2076,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
 					identity = getObjectIdentity(&addr);
 
 					/*
-					 * Obtain schema name, if any ("pg_temp" if a temp object).
-					 * If the object class is not in the supported list here,
-					 * we assume it's a schema-less object type, and thus
-					 * "schema" remains set to NULL.
+					 * Obtain schema name, if any ("pg_temp" if a temp
+					 * object). If the object class is not in the supported
+					 * list here, we assume it's a schema-less object type,
+					 * and thus "schema" remains set to NULL.
 					 */
 					if (is_objectclass_supported(addr.classId))
 					{
@@ -2099,10 +2101,10 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
 									 addr.classId, addr.objectId);
 							schema_oid =
 								heap_getattr(objtup, nspAttnum,
-											 RelationGetDescr(catalog), &isnull);
+										 RelationGetDescr(catalog), &isnull);
 							if (isnull)
 								elog(ERROR,
-									 "invalid null namespace in object %u/%u/%d",
+								 "invalid null namespace in object %u/%u/%d",
 									 addr.classId, addr.objectId, addr.objectSubId);
 							/* XXX not quite get_namespace_name_or_temp */
 							if (isAnyTempNamespace(schema_oid))
@@ -2149,7 +2151,7 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
 				values[i++] = CStringGetTextDatum(CreateCommandTag(cmd->parsetree));
 				/* object_type */
 				values[i++] = CStringGetTextDatum(stringify_adefprivs_objtype(
-																			  cmd->d.defprivs.objtype));
+												   cmd->d.defprivs.objtype));
 				/* schema */
 				nulls[i++] = true;
 				/* identity */
@@ -2172,7 +2174,7 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
 												  "GRANT" : "REVOKE");
 				/* object_type */
 				values[i++] = CStringGetTextDatum(stringify_grantobjtype(
-																		 cmd->d.grant.istmt->objtype));
+											   cmd->d.grant.istmt->objtype));
 				/* schema */
 				nulls[i++] = true;
 				/* identity */
@@ -2230,7 +2232,7 @@ stringify_grantobjtype(GrantObjectType objtype)
 			return "TYPE";
 		default:
 			elog(ERROR, "unrecognized type %d", objtype);
-			return "???";	/* keep compiler quiet */
+			return "???";		/* keep compiler quiet */
 	}
 }
 
@@ -2257,6 +2259,6 @@ stringify_adefprivs_objtype(GrantObjectType objtype)
 			break;
 		default:
 			elog(ERROR, "unrecognized type %d", objtype);
-			return "???";	/* keep compiler quiet */
+			return "???";		/* keep compiler quiet */
 	}
 }
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 232f41df65a..a82c6ff7b4d 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -83,11 +83,11 @@ static void show_merge_append_keys(MergeAppendState *mstate, List *ancestors,
 static void show_agg_keys(AggState *astate, List *ancestors,
 			  ExplainState *es);
 static void show_grouping_sets(PlanState *planstate, Agg *agg,
-							   List *ancestors, ExplainState *es);
+				   List *ancestors, ExplainState *es);
 static void show_grouping_set_keys(PlanState *planstate,
-								   Agg *aggnode, Sort *sortnode,
-								   List *context, bool useprefix,
-								   List *ancestors, ExplainState *es);
+					   Agg *aggnode, Sort *sortnode,
+					   List *context, bool useprefix,
+					   List *ancestors, ExplainState *es);
 static void show_group_keys(GroupState *gstate, List *ancestors,
 				ExplainState *es);
 static void show_sort_group_keys(PlanState *planstate, const char *qlabel,
@@ -754,7 +754,7 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used)
 									((ModifyTable *) plan)->nominalRelation);
 			if (((ModifyTable *) plan)->exclRelRTI)
 				*rels_used = bms_add_member(*rels_used,
-											((ModifyTable *) plan)->exclRelRTI);
+										 ((ModifyTable *) plan)->exclRelRTI);
 			break;
 		default:
 			break;
@@ -984,6 +984,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 				 * quite messy.
 				 */
 				RangeTblEntry *rte;
+
 				rte = rt_fetch(((SampleScan *) plan)->scanrelid, es->rtable);
 				custom_name = get_tablesample_method_name(rte->tablesample->tsmid);
 				pname = psprintf("Sample Scan (%s)", custom_name);
@@ -1895,8 +1896,8 @@ show_grouping_sets(PlanState *planstate, Agg *agg,
 
 	foreach(lc, agg->chain)
 	{
-		Agg *aggnode = lfirst(lc);
-		Sort *sortnode = (Sort *) aggnode->plan.lefttree;
+		Agg		   *aggnode = lfirst(lc);
+		Sort	   *sortnode = (Sort *) aggnode->plan.lefttree;
 
 		show_grouping_set_keys(planstate, aggnode, sortnode,
 							   context, useprefix, ancestors, es);
@@ -2561,7 +2562,7 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors,
 	{
 		ExplainProperty("Conflict Resolution",
 						node->onConflictAction == ONCONFLICT_NOTHING ?
-							"NOTHING" : "UPDATE",
+						"NOTHING" : "UPDATE",
 						false, es);
 
 		/*
@@ -2582,9 +2583,9 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors,
 		/* EXPLAIN ANALYZE display of actual outcome for each tuple proposed */
 		if (es->analyze && mtstate->ps.instrument)
 		{
-			double total;
-			double insert_path;
-			double other_path;
+			double		total;
+			double		insert_path;
+			double		other_path;
 
 			InstrEndLoop(mtstate->mt_plans[0]->instrument);
 
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index c1426dc9391..3d220e9c7e6 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -921,9 +921,9 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
 	ReleaseSysCache(languageTuple);
 
 	/*
-	 * Only superuser is allowed to create leakproof functions because leakproof
-	 * functions can see tuples which have not yet been filtered out by security
-	 * barrier views or row level security policies.
+	 * Only superuser is allowed to create leakproof functions because
+	 * leakproof functions can see tuples which have not yet been filtered out
+	 * by security barrier views or row level security policies.
 	 */
 	if (isLeakProof && !superuser())
 		ereport(ERROR,
@@ -932,14 +932,15 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
 
 	if (transformDefElem)
 	{
-		ListCell *lc;
+		ListCell   *lc;
 
 		Assert(IsA(transformDefElem, List));
 
-		foreach (lc, (List *) transformDefElem)
+		foreach(lc, (List *) transformDefElem)
 		{
-			Oid typeid = typenameTypeId(NULL, lfirst(lc));
-			Oid elt = get_base_element_type(typeid);
+			Oid			typeid = typenameTypeId(NULL, lfirst(lc));
+			Oid			elt = get_base_element_type(typeid);
+
 			typeid = elt ? elt : typeid;
 
 			get_transform_oid(typeid, languageOid, false);
@@ -992,13 +993,13 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
 
 	if (list_length(trftypes_list) > 0)
 	{
-		ListCell *lc;
-		Datum *arr;
-		int i;
+		ListCell   *lc;
+		Datum	   *arr;
+		int			i;
 
 		arr = palloc(list_length(trftypes_list) * sizeof(Datum));
 		i = 0;
-		foreach (lc, trftypes_list)
+		foreach(lc, trftypes_list)
 			arr[i++] = ObjectIdGetDatum(lfirst_oid(lc));
 		trftypes = construct_array(arr, list_length(trftypes_list),
 								   OIDOID, sizeof(Oid), true, 'i');
@@ -1716,7 +1717,7 @@ check_transform_function(Form_pg_proc procstruct)
 	if (procstruct->proisagg)
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-				 errmsg("transform function must not be an aggregate function")));
+			errmsg("transform function must not be an aggregate function")));
 	if (procstruct->proiswindow)
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
@@ -1867,9 +1868,9 @@ CreateTransform(CreateTransformStmt *stmt)
 		if (!stmt->replace)
 			ereport(ERROR,
 					(errcode(ERRCODE_DUPLICATE_OBJECT),
-					 errmsg("transform for type %s language \"%s\" already exists",
-							format_type_be(typeid),
-							stmt->lang)));
+			   errmsg("transform for type %s language \"%s\" already exists",
+					  format_type_be(typeid),
+					  stmt->lang)));
 
 		MemSet(replaces, false, sizeof(replaces));
 		replaces[Anum_pg_transform_trffromsql - 1] = true;
@@ -1958,9 +1959,9 @@ get_transform_oid(Oid type_id, Oid lang_id, bool missing_ok)
 	if (!OidIsValid(oid) && !missing_ok)
 		ereport(ERROR,
 				(errcode(ERRCODE_UNDEFINED_OBJECT),
-				 errmsg("transform for type %s language \"%s\" does not exist",
-						format_type_be(type_id),
-						get_language_name(lang_id, false))));
+			   errmsg("transform for type %s language \"%s\" does not exist",
+					  format_type_be(type_id),
+					  get_language_name(lang_id, false))));
 	return oid;
 }
 
diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index eb16bb31ffc..5492e5985bf 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -66,7 +66,7 @@ static char *make_temptable_name_n(char *tempname, int n);
 static void mv_GenerateOper(StringInfo buf, Oid opoid);
 
 static void refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
-						 int save_sec_context);
+					   int save_sec_context);
 static void refresh_by_heap_swap(Oid matviewOid, Oid OIDNewHeap, char relpersistence);
 
 static void OpenMatViewIncrementalMaintenance(void);
diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c
index a3d840da5cf..6e95ba28b9d 100644
--- a/src/backend/commands/policy.c
+++ b/src/backend/commands/policy.c
@@ -45,27 +45,27 @@
 #include "utils/syscache.h"
 
 static void RangeVarCallbackForPolicy(const RangeVar *rv,
-				Oid relid, Oid oldrelid, void *arg);
+						  Oid relid, Oid oldrelid, void *arg);
 static char parse_policy_command(const char *cmd_name);
-static ArrayType* policy_role_list_to_array(List *roles);
+static ArrayType *policy_role_list_to_array(List *roles);
 
 /*
  * Callback to RangeVarGetRelidExtended().
  *
  * Checks the following:
- *  - the relation specified is a table.
- *  - current user owns the table.
- *  - the table is not a system table.
+ *	- the relation specified is a table.
+ *	- current user owns the table.
+ *	- the table is not a system table.
  *
  * If any of these checks fails then an error is raised.
  */
 static void
 RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid,
-								void *arg)
+						  void *arg)
 {
-	HeapTuple		tuple;
-	Form_pg_class	classform;
-	char			relkind;
+	HeapTuple	tuple;
+	Form_pg_class classform;
+	char		relkind;
 
 	tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
 	if (!HeapTupleIsValid(tuple))
@@ -96,8 +96,8 @@ RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid,
 
 /*
  * parse_policy_command -
- *   helper function to convert full command strings to their char
- *   representation.
+ *	 helper function to convert full command strings to their char
+ *	 representation.
  *
  * cmd_name - full string command name. Valid values are 'all', 'select',
  *			  'insert', 'update' and 'delete'.
@@ -106,7 +106,7 @@ RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid,
 static char
 parse_policy_command(const char *cmd_name)
 {
-	char cmd;
+	char		cmd;
 
 	if (!cmd_name)
 		elog(ERROR, "unrecognized policy command");
@@ -129,7 +129,7 @@ parse_policy_command(const char *cmd_name)
 
 /*
  * policy_role_list_to_array
- *   helper function to convert a list of RoleSpecs to an array of role ids.
+ *	 helper function to convert a list of RoleSpecs to an array of role ids.
  */
 static ArrayType *
 policy_role_list_to_array(List *roles)
@@ -156,7 +156,7 @@ policy_role_list_to_array(List *roles)
 
 	foreach(cell, roles)
 	{
-		RoleSpec *spec = lfirst(cell);
+		RoleSpec   *spec = lfirst(cell);
 
 		/*
 		 * PUBLIC covers all roles, so it only makes sense alone.
@@ -167,7 +167,7 @@ policy_role_list_to_array(List *roles)
 				ereport(WARNING,
 						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
 						 errmsg("ignoring roles specified other than public"),
-						 errhint("All roles are members of the public role.")));
+					  errhint("All roles are members of the public role.")));
 			temp_array[0] = ObjectIdGetDatum(ACL_ID_PUBLIC);
 			num_roles = 1;
 			break;
@@ -193,14 +193,14 @@ policy_role_list_to_array(List *roles)
 void
 RelationBuildRowSecurity(Relation relation)
 {
-	MemoryContext		rscxt;
-	MemoryContext		oldcxt = CurrentMemoryContext;
-	RowSecurityDesc	   * volatile rsdesc = NULL;
+	MemoryContext rscxt;
+	MemoryContext oldcxt = CurrentMemoryContext;
+	RowSecurityDesc *volatile rsdesc = NULL;
 
 	/*
 	 * Create a memory context to hold everything associated with this
-	 * relation's row security policy.  This makes it easy to clean up
-	 * during a relcache flush.
+	 * relation's row security policy.  This makes it easy to clean up during
+	 * a relcache flush.
 	 */
 	rscxt = AllocSetContextCreate(CacheMemoryContext,
 								  "row security descriptor",
@@ -209,15 +209,15 @@ RelationBuildRowSecurity(Relation relation)
 								  ALLOCSET_SMALL_MAXSIZE);
 
 	/*
-	 * Since rscxt lives under CacheMemoryContext, it is long-lived.  Use
-	 * a PG_TRY block to ensure it'll get freed if we fail partway through.
+	 * Since rscxt lives under CacheMemoryContext, it is long-lived.  Use a
+	 * PG_TRY block to ensure it'll get freed if we fail partway through.
 	 */
 	PG_TRY();
 	{
-		Relation			catalog;
-		ScanKeyData			skey;
-		SysScanDesc			sscan;
-		HeapTuple			tuple;
+		Relation	catalog;
+		ScanKeyData skey;
+		SysScanDesc sscan;
+		HeapTuple	tuple;
 
 		rsdesc = MemoryContextAllocZero(rscxt, sizeof(RowSecurityDesc));
 		rsdesc->rscxt = rscxt;
@@ -238,17 +238,17 @@ RelationBuildRowSecurity(Relation relation)
 		 */
 		while (HeapTupleIsValid(tuple = systable_getnext(sscan)))
 		{
-			Datum				value_datum;
-			char				cmd_value;
-			Datum				roles_datum;
-			char			   *qual_value;
-			Expr			   *qual_expr;
-			char			   *with_check_value;
-			Expr			   *with_check_qual;
-			char			   *policy_name_value;
-			Oid					policy_id;
-			bool				isnull;
-			RowSecurityPolicy  *policy;
+			Datum		value_datum;
+			char		cmd_value;
+			Datum		roles_datum;
+			char	   *qual_value;
+			Expr	   *qual_expr;
+			char	   *with_check_value;
+			Expr	   *with_check_qual;
+			char	   *policy_name_value;
+			Oid			policy_id;
+			bool		isnull;
+			RowSecurityPolicy *policy;
 
 			/*
 			 * Note: all the pass-by-reference data we collect here is either
@@ -259,26 +259,26 @@ RelationBuildRowSecurity(Relation relation)
 
 			/* Get policy command */
 			value_datum = heap_getattr(tuple, Anum_pg_policy_polcmd,
-								 RelationGetDescr(catalog), &isnull);
+									   RelationGetDescr(catalog), &isnull);
 			Assert(!isnull);
 			cmd_value = DatumGetChar(value_datum);
 
 			/* Get policy name */
 			value_datum = heap_getattr(tuple, Anum_pg_policy_polname,
-										RelationGetDescr(catalog), &isnull);
+									   RelationGetDescr(catalog), &isnull);
 			Assert(!isnull);
 			policy_name_value = NameStr(*(DatumGetName(value_datum)));
 
 			/* Get policy roles */
 			roles_datum = heap_getattr(tuple, Anum_pg_policy_polroles,
-										RelationGetDescr(catalog), &isnull);
+									   RelationGetDescr(catalog), &isnull);
 			/* shouldn't be null, but initdb doesn't mark it so, so check */
 			if (isnull)
 				elog(ERROR, "unexpected null value in pg_policy.polroles");
 
 			/* Get policy qual */
 			value_datum = heap_getattr(tuple, Anum_pg_policy_polqual,
-								 RelationGetDescr(catalog), &isnull);
+									   RelationGetDescr(catalog), &isnull);
 			if (!isnull)
 			{
 				qual_value = TextDatumGetCString(value_datum);
@@ -289,7 +289,7 @@ RelationBuildRowSecurity(Relation relation)
 
 			/* Get WITH CHECK qual */
 			value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck,
-										RelationGetDescr(catalog), &isnull);
+									   RelationGetDescr(catalog), &isnull);
 			if (!isnull)
 			{
 				with_check_value = TextDatumGetCString(value_datum);
@@ -311,7 +311,7 @@ RelationBuildRowSecurity(Relation relation)
 			policy->qual = copyObject(qual_expr);
 			policy->with_check_qual = copyObject(with_check_qual);
 			policy->hassublinks = checkExprHasSubLink((Node *) qual_expr) ||
-								  checkExprHasSubLink((Node *) with_check_qual);
+				checkExprHasSubLink((Node *) with_check_qual);
 
 			rsdesc->policies = lcons(policy, rsdesc->policies);
 
@@ -330,15 +330,15 @@ RelationBuildRowSecurity(Relation relation)
 		/*
 		 * Check if no policies were added
 		 *
-		 * If no policies exist in pg_policy for this relation, then we
-		 * need to create a single default-deny policy.  We use InvalidOid for
-		 * the Oid to indicate that this is the default-deny policy (we may
-		 * decide to ignore the default policy if an extension adds policies).
+		 * If no policies exist in pg_policy for this relation, then we need
+		 * to create a single default-deny policy.  We use InvalidOid for the
+		 * Oid to indicate that this is the default-deny policy (we may decide
+		 * to ignore the default policy if an extension adds policies).
 		 */
 		if (rsdesc->policies == NIL)
 		{
-			RowSecurityPolicy  *policy;
-			Datum				role;
+			RowSecurityPolicy *policy;
+			Datum		role;
 
 			MemoryContextSwitchTo(rscxt);
 
@@ -351,7 +351,7 @@ RelationBuildRowSecurity(Relation relation)
 			policy->roles = construct_array(&role, 1, OIDOID, sizeof(Oid), true,
 											'i');
 			policy->qual = (Expr *) makeConst(BOOLOID, -1, InvalidOid,
-											  sizeof(bool), BoolGetDatum(false),
+										   sizeof(bool), BoolGetDatum(false),
 											  false, true);
 			policy->with_check_qual = copyObject(policy->qual);
 			policy->hassublinks = false;
@@ -376,15 +376,15 @@ RelationBuildRowSecurity(Relation relation)
 
 /*
  * RemovePolicyById -
- *   remove a policy by its OID.  If a policy does not exist with the provided
- *   oid, then an error is raised.
+ *	 remove a policy by its OID.  If a policy does not exist with the provided
+ *	 oid, then an error is raised.
  *
  * policy_id - the oid of the policy.
  */
 void
 RemovePolicyById(Oid policy_id)
 {
-	Relation 	pg_policy_rel;
+	Relation	pg_policy_rel;
 	SysScanDesc sscan;
 	ScanKeyData skey[1];
 	HeapTuple	tuple;
@@ -435,8 +435,8 @@ RemovePolicyById(Oid policy_id)
 
 	/*
 	 * Note that, unlike some of the other flags in pg_class, relrowsecurity
-	 * is not just an indication of if policies exist.  When relrowsecurity
-	 * is set by a user, then all access to the relation must be through a
+	 * is not just an indication of if policies exist.  When relrowsecurity is
+	 * set by a user, then all access to the relation must be through a
 	 * policy.  If no policy is defined for the relation then a default-deny
 	 * policy is created and all records are filtered (except for queries from
 	 * the owner).
@@ -450,31 +450,31 @@ RemovePolicyById(Oid policy_id)
 
 /*
  * CreatePolicy -
- *   handles the execution of the CREATE POLICY command.
+ *	 handles the execution of the CREATE POLICY command.
  *
  * stmt - the CreatePolicyStmt that describes the policy to create.
  */
 ObjectAddress
 CreatePolicy(CreatePolicyStmt *stmt)
 {
-	Relation		pg_policy_rel;
-	Oid				policy_id;
-	Relation		target_table;
-	Oid				table_id;
-	char			polcmd;
-	ArrayType	   *role_ids;
-	ParseState	   *qual_pstate;
-	ParseState	   *with_check_pstate;
-	RangeTblEntry  *rte;
-	Node		   *qual;
-	Node		   *with_check_qual;
-	ScanKeyData		skey[2];
-	SysScanDesc		sscan;
-	HeapTuple		policy_tuple;
-	Datum			values[Natts_pg_policy];
-	bool			isnull[Natts_pg_policy];
-	ObjectAddress	target;
-	ObjectAddress	myself;
+	Relation	pg_policy_rel;
+	Oid			policy_id;
+	Relation	target_table;
+	Oid			table_id;
+	char		polcmd;
+	ArrayType  *role_ids;
+	ParseState *qual_pstate;
+	ParseState *with_check_pstate;
+	RangeTblEntry *rte;
+	Node	   *qual;
+	Node	   *with_check_qual;
+	ScanKeyData skey[2];
+	SysScanDesc sscan;
+	HeapTuple	policy_tuple;
+	Datum		values[Natts_pg_policy];
+	bool		isnull[Natts_pg_policy];
+	ObjectAddress target;
+	ObjectAddress myself;
 
 	/* Parse command */
 	polcmd = parse_policy_command(stmt->cmd);
@@ -506,8 +506,8 @@ CreatePolicy(CreatePolicyStmt *stmt)
 	with_check_pstate = make_parsestate(NULL);
 
 	/* zero-clear */
-	memset(values,   0, sizeof(values));
-	memset(isnull,   0, sizeof(isnull));
+	memset(values, 0, sizeof(values));
+	memset(isnull, 0, sizeof(isnull));
 
 	/* Get id of table.  Also handles permissions checks. */
 	table_id = RangeVarGetRelidExtended(stmt->table, AccessExclusiveLock,
@@ -515,7 +515,7 @@ CreatePolicy(CreatePolicyStmt *stmt)
 										RangeVarCallbackForPolicy,
 										(void *) stmt);
 
-	/* Open target_table to build quals. No lock is necessary.*/
+	/* Open target_table to build quals. No lock is necessary. */
 	target_table = relation_open(table_id, NoLock);
 
 	/* Add for the regular security quals */
@@ -534,9 +534,9 @@ CreatePolicy(CreatePolicyStmt *stmt)
 								"POLICY");
 
 	with_check_qual = transformWhereClause(with_check_pstate,
-								copyObject(stmt->with_check),
-								EXPR_KIND_WHERE,
-								"POLICY");
+										   copyObject(stmt->with_check),
+										   EXPR_KIND_WHERE,
+										   "POLICY");
 
 	/* Open pg_policy catalog */
 	pg_policy_rel = heap_open(PolicyRelationId, RowExclusiveLock);
@@ -568,7 +568,7 @@ CreatePolicy(CreatePolicyStmt *stmt)
 
 	values[Anum_pg_policy_polrelid - 1] = ObjectIdGetDatum(table_id);
 	values[Anum_pg_policy_polname - 1] = DirectFunctionCall1(namein,
-															 CStringGetDatum(stmt->policy_name));
+										 CStringGetDatum(stmt->policy_name));
 	values[Anum_pg_policy_polcmd - 1] = CharGetDatum(polcmd);
 	values[Anum_pg_policy_polroles - 1] = PointerGetDatum(role_ids);
 
@@ -625,34 +625,34 @@ CreatePolicy(CreatePolicyStmt *stmt)
 
 /*
  * AlterPolicy -
- *   handles the execution of the ALTER POLICY command.
+ *	 handles the execution of the ALTER POLICY command.
  *
  * stmt - the AlterPolicyStmt that describes the policy and how to alter it.
  */
 ObjectAddress
 AlterPolicy(AlterPolicyStmt *stmt)
 {
-	Relation		pg_policy_rel;
-	Oid				policy_id;
-	Relation		target_table;
-	Oid				table_id;
-	ArrayType	   *role_ids = NULL;
-	List		   *qual_parse_rtable = NIL;
-	List		   *with_check_parse_rtable = NIL;
-	Node		   *qual = NULL;
-	Node		   *with_check_qual = NULL;
-	ScanKeyData		skey[2];
-	SysScanDesc		sscan;
-	HeapTuple		policy_tuple;
-	HeapTuple		new_tuple;
-	Datum			values[Natts_pg_policy];
-	bool			isnull[Natts_pg_policy];
-	bool			replaces[Natts_pg_policy];
-	ObjectAddress	target;
-	ObjectAddress	myself;
-	Datum			cmd_datum;
-	char			polcmd;
-	bool			polcmd_isnull;
+	Relation	pg_policy_rel;
+	Oid			policy_id;
+	Relation	target_table;
+	Oid			table_id;
+	ArrayType  *role_ids = NULL;
+	List	   *qual_parse_rtable = NIL;
+	List	   *with_check_parse_rtable = NIL;
+	Node	   *qual = NULL;
+	Node	   *with_check_qual = NULL;
+	ScanKeyData skey[2];
+	SysScanDesc sscan;
+	HeapTuple	policy_tuple;
+	HeapTuple	new_tuple;
+	Datum		values[Natts_pg_policy];
+	bool		isnull[Natts_pg_policy];
+	bool		replaces[Natts_pg_policy];
+	ObjectAddress target;
+	ObjectAddress myself;
+	Datum		cmd_datum;
+	char		polcmd;
+	bool		polcmd_isnull;
 
 	/* Parse role_ids */
 	if (stmt->roles != NULL)
@@ -669,8 +669,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
 	/* Parse the using policy clause */
 	if (stmt->qual)
 	{
-		RangeTblEntry  *rte;
-		ParseState	   *qual_pstate = make_parsestate(NULL);
+		RangeTblEntry *rte;
+		ParseState *qual_pstate = make_parsestate(NULL);
 
 		rte = addRangeTableEntryForRelation(qual_pstate, target_table,
 											NULL, false, false);
@@ -688,8 +688,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
 	/* Parse the with-check policy clause */
 	if (stmt->with_check)
 	{
-		RangeTblEntry  *rte;
-		ParseState	   *with_check_pstate = make_parsestate(NULL);
+		RangeTblEntry *rte;
+		ParseState *with_check_pstate = make_parsestate(NULL);
 
 		rte = addRangeTableEntryForRelation(with_check_pstate, target_table,
 											NULL, false, false);
@@ -706,9 +706,9 @@ AlterPolicy(AlterPolicyStmt *stmt)
 	}
 
 	/* zero-clear */
-	memset(values,   0, sizeof(values));
+	memset(values, 0, sizeof(values));
 	memset(replaces, 0, sizeof(replaces));
-	memset(isnull,   0, sizeof(isnull));
+	memset(isnull, 0, sizeof(isnull));
 
 	/* Find policy to update. */
 	pg_policy_rel = heap_open(PolicyRelationId, RowExclusiveLock);
@@ -756,8 +756,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
 				 errmsg("only USING expression allowed for SELECT, DELETE")));
 
 	/*
-	 * If the command is INSERT then WITH CHECK should be the only
-	 * expression provided.
+	 * If the command is INSERT then WITH CHECK should be the only expression
+	 * provided.
 	 */
 	if ((polcmd == ACL_INSERT_CHR)
 		&& stmt->qual != NULL)
@@ -829,19 +829,19 @@ AlterPolicy(AlterPolicyStmt *stmt)
 
 /*
  * rename_policy -
- *   change the name of a policy on a relation
+ *	 change the name of a policy on a relation
  */
 ObjectAddress
 rename_policy(RenameStmt *stmt)
 {
-	Relation		pg_policy_rel;
-	Relation		target_table;
-	Oid				table_id;
-	Oid				opoloid;
-	ScanKeyData		skey[2];
-	SysScanDesc		sscan;
-	HeapTuple		policy_tuple;
-	ObjectAddress	address;
+	Relation	pg_policy_rel;
+	Relation	target_table;
+	Oid			table_id;
+	Oid			opoloid;
+	ScanKeyData skey[2];
+	SysScanDesc sscan;
+	HeapTuple	policy_tuple;
+	ObjectAddress address;
 
 	/* Get id of table.  Also handles permissions checks. */
 	table_id = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
@@ -875,7 +875,7 @@ rename_policy(RenameStmt *stmt)
 		ereport(ERROR,
 				(errcode(ERRCODE_DUPLICATE_OBJECT),
 				 errmsg("policy \"%s\" for table \"%s\" already exists",
-						stmt->newname, RelationGetRelationName(target_table))));
+					 stmt->newname, RelationGetRelationName(target_table))));
 
 	systable_endscan(sscan);
 
@@ -903,7 +903,7 @@ rename_policy(RenameStmt *stmt)
 		ereport(ERROR,
 				(errcode(ERRCODE_UNDEFINED_OBJECT),
 				 errmsg("policy \"%s\" for table \"%s\" does not exist",
-						stmt->subname, RelationGetRelationName(target_table))));
+					 stmt->subname, RelationGetRelationName(target_table))));
 
 	opoloid = HeapTupleGetOid(policy_tuple);
 
@@ -923,9 +923,9 @@ rename_policy(RenameStmt *stmt)
 	ObjectAddressSet(address, PolicyRelationId, opoloid);
 
 	/*
-	 * Invalidate relation's relcache entry so that other backends (and
-	 * this one too!) are sent SI message to make them rebuild relcache
-	 * entries.  (Ideally this should happen automatically...)
+	 * Invalidate relation's relcache entry so that other backends (and this
+	 * one too!) are sent SI message to make them rebuild relcache entries.
+	 * (Ideally this should happen automatically...)
 	 */
 	CacheInvalidateRelcache(target_table);
 
@@ -946,11 +946,11 @@ rename_policy(RenameStmt *stmt)
 Oid
 get_relation_policy_oid(Oid relid, const char *policy_name, bool missing_ok)
 {
-	Relation		pg_policy_rel;
-	ScanKeyData		skey[2];
-	SysScanDesc		sscan;
-	HeapTuple		policy_tuple;
-	Oid				policy_oid;
+	Relation	pg_policy_rel;
+	ScanKeyData skey[2];
+	SysScanDesc sscan;
+	HeapTuple	policy_tuple;
+	Oid			policy_oid;
 
 	pg_policy_rel = heap_open(PolicyRelationId, AccessShareLock);
 
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 5a7beff7d56..01e8612145a 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -44,7 +44,7 @@ static void AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerI
 Oid
 CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
 {
-	const char	*schemaName = stmt->schemaname;
+	const char *schemaName = stmt->schemaname;
 	Oid			namespaceId;
 	OverrideSearchPath *overridePath;
 	List	   *parsetree_list;
@@ -68,7 +68,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
 	/* fill schema name with the user name if not specified */
 	if (!schemaName)
 	{
-		HeapTuple tuple;
+		HeapTuple	tuple;
 
 		tuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(owner_uid));
 		if (!HeapTupleIsValid(tuple))
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index bb85cb9f13f..9c1037fe53f 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -566,8 +566,8 @@ nextval_internal(Oid relid)
 		PreventCommandIfReadOnly("nextval()");
 
 	/*
-	 * Forbid this during parallel operation because, to make it work,
-	 * the cooperating backends would need to share the backend-local cached
+	 * Forbid this during parallel operation because, to make it work, the
+	 * cooperating backends would need to share the backend-local cached
 	 * sequence information.  Currently, we don't support that.
 	 */
 	PreventCommandIfParallelMode("nextval()");
@@ -702,10 +702,10 @@ nextval_internal(Oid relid)
 
 	/*
 	 * If something needs to be WAL logged, acquire an xid, so this
-	 * transaction's commit will trigger a WAL flush and wait for
-	 * syncrep. It's sufficient to ensure the toplevel transaction has an xid,
-	 * no need to assign xids subxacts, that'll already trigger an appropriate
-	 * wait.  (Have to do that here, so we're outside the critical section)
+	 * transaction's commit will trigger a WAL flush and wait for syncrep.
+	 * It's sufficient to ensure the toplevel transaction has an xid, no need
+	 * to assign xids subxacts, that'll already trigger an appropriate wait.
+	 * (Have to do that here, so we're outside the critical section)
 	 */
 	if (logit && RelationNeedsWAL(seqrel))
 		GetTopTransactionId();
@@ -870,8 +870,8 @@ do_setval(Oid relid, int64 next, bool iscalled)
 		PreventCommandIfReadOnly("setval()");
 
 	/*
-	 * Forbid this during parallel operation because, to make it work,
-	 * the cooperating backends would need to share the backend-local cached
+	 * Forbid this during parallel operation because, to make it work, the
+	 * cooperating backends would need to share the backend-local cached
 	 * sequence information.  Currently, we don't support that.
 	 */
 	PreventCommandIfParallelMode("setval()");
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 5114e6f1a4d..84dbee0c411 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -306,14 +306,14 @@ static void createForeignKeyTriggers(Relation rel, Oid refRelOid,
 						 Constraint *fkconstraint,
 						 Oid constraintOid, Oid indexOid);
 static void ATController(AlterTableStmt *parsetree,
-						 Relation rel, List *cmds, bool recurse, LOCKMODE lockmode);
+			 Relation rel, List *cmds, bool recurse, LOCKMODE lockmode);
 static void ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
 		  bool recurse, bool recursing, LOCKMODE lockmode);
 static void ATRewriteCatalogs(List **wqueue, LOCKMODE lockmode);
 static void ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
 		  AlterTableCmd *cmd, LOCKMODE lockmode);
 static void ATRewriteTables(AlterTableStmt *parsetree,
-							List **wqueue, LOCKMODE lockmode);
+				List **wqueue, LOCKMODE lockmode);
 static void ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode);
 static AlteredTableInfo *ATGetQueueEntry(List **wqueue, Relation rel);
 static void ATSimplePermissions(Relation rel, int allowed_targets);
@@ -631,7 +631,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
 
 			cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint));
 			cooked->contype = CONSTR_DEFAULT;
-			cooked->conoid = InvalidOid;	/* until created */
+			cooked->conoid = InvalidOid;		/* until created */
 			cooked->name = NULL;
 			cooked->attnum = attnum;
 			cooked->expr = colDef->cooked_default;
@@ -1751,7 +1751,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
 
 					cooked = (CookedConstraint *) palloc(sizeof(CookedConstraint));
 					cooked->contype = CONSTR_CHECK;
-					cooked->conoid = InvalidOid;	/* until created */
+					cooked->conoid = InvalidOid;		/* until created */
 					cooked->name = pstrdup(name);
 					cooked->attnum = 0; /* not used for constraints */
 					cooked->expr = expr;
@@ -1781,7 +1781,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
 	 */
 	if (inhSchema != NIL)
 	{
-		int		schema_attno = 0;
+		int			schema_attno = 0;
 
 		foreach(entry, schema)
 		{
@@ -1809,14 +1809,14 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
 				 * Yes, try to merge the two column definitions. They must
 				 * have the same type, typmod, and collation.
 				 */
-				 if (exist_attno == schema_attno)
+				if (exist_attno == schema_attno)
 					ereport(NOTICE,
-					   (errmsg("merging column \"%s\" with inherited definition",
-							   attributeName)));
+					(errmsg("merging column \"%s\" with inherited definition",
+							attributeName)));
 				else
 					ereport(NOTICE,
-					   (errmsg("moving and merging column \"%s\" with inherited definition", attributeName),
-						errdetail("User-specified column moved to the position of the inherited column.")));
+							(errmsg("moving and merging column \"%s\" with inherited definition", attributeName),
+							 errdetail("User-specified column moved to the position of the inherited column.")));
 				def = (ColumnDef *) list_nth(inhSchema, exist_attno - 1);
 				typenameTypeIdAndMod(NULL, def->typeName, &defTypeId, &deftypmod);
 				typenameTypeIdAndMod(NULL, newdef->typeName, &newTypeId, &newtypmod);
@@ -3496,7 +3496,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
 			break;
 		case AT_ReAddIndex:		/* ADD INDEX */
 			address = ATExecAddIndex(tab, rel, (IndexStmt *) cmd->def, true,
-									lockmode);
+									 lockmode);
 			break;
 		case AT_AddConstraint:	/* ADD CONSTRAINT */
 			address =
@@ -3803,7 +3803,7 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode)
 			 * And fire it only once.
 			 */
 			if (parsetree)
-				EventTriggerTableRewrite((Node *)parsetree,
+				EventTriggerTableRewrite((Node *) parsetree,
 										 tab->relid,
 										 tab->rewrite);
 
@@ -5960,7 +5960,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
 									  true,		/* update pg_index */
 									  true,		/* remove old dependencies */
 									  allowSystemTableMods,
-									  false);		/* is_internal */
+									  false);	/* is_internal */
 
 	index_close(indexRel, NoLock);
 
@@ -6906,7 +6906,7 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse,
 						 HeapTupleGetOid(tuple));
 	}
 	else
-		address = InvalidObjectAddress;		/* already validated */
+		address = InvalidObjectAddress; /* already validated */
 
 	systable_endscan(scan);
 
@@ -7866,11 +7866,12 @@ ATPrepAlterColumnType(List **wqueue,
 	{
 		/*
 		 * Set up an expression to transform the old data value to the new
-		 * type. If a USING option was given, use the expression as transformed
-		 * by transformAlterTableStmt, else just take the old value and try to
-		 * coerce it.  We do this first so that type incompatibility can be
-		 * detected before we waste effort, and because we need the expression
-		 * to be parsed against the original table row type.
+		 * type. If a USING option was given, use the expression as
+		 * transformed by transformAlterTableStmt, else just take the old
+		 * value and try to coerce it.  We do this first so that type
+		 * incompatibility can be detected before we waste effort, and because
+		 * we need the expression to be parsed against the original table row
+		 * type.
 		 */
 		if (!transform)
 		{
@@ -8221,8 +8222,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
 				 * specified in the policy's USING or WITH CHECK qual
 				 * expressions.  It might be possible to rewrite and recheck
 				 * the policy expression, but punt for now.  It's certainly
-				 * easy enough to remove and recreate the policy; still,
-				 * FIXME someday.
+				 * easy enough to remove and recreate the policy; still, FIXME
+				 * someday.
 				 */
 				ereport(ERROR,
 						(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -9701,9 +9702,9 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt)
 			!ConditionalLockRelationOid(relOid, AccessExclusiveLock))
 			ereport(ERROR,
 					(errcode(ERRCODE_OBJECT_IN_USE),
-			   errmsg("aborting because lock on relation \"%s\".\"%s\" is not available",
-					  get_namespace_name(relForm->relnamespace),
-					  NameStr(relForm->relname))));
+					 errmsg("aborting because lock on relation \"%s\".\"%s\" is not available",
+							get_namespace_name(relForm->relnamespace),
+							NameStr(relForm->relname))));
 		else
 			LockRelationOid(relOid, AccessExclusiveLock);
 
@@ -10923,9 +10924,9 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode
 static void
 ATExecEnableRowSecurity(Relation rel)
 {
-	Relation		pg_class;
-	Oid				relid;
-	HeapTuple		tuple;
+	Relation	pg_class;
+	Oid			relid;
+	HeapTuple	tuple;
 
 	relid = RelationGetRelid(rel);
 
@@ -10949,9 +10950,9 @@ ATExecEnableRowSecurity(Relation rel)
 static void
 ATExecDisableRowSecurity(Relation rel)
 {
-	Relation		pg_class;
-	Oid				relid;
-	HeapTuple		tuple;
+	Relation	pg_class;
+	Oid			relid;
+	HeapTuple	tuple;
 
 	relid = RelationGetRelid(rel);
 
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index d9b9587f1e6..31091ba7f3e 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -4329,7 +4329,7 @@ AfterTriggerEndSubXact(bool isCommit)
 static void
 AfterTriggerEnlargeQueryState(void)
 {
-	int		init_depth = afterTriggers.maxquerydepth;
+	int			init_depth = afterTriggers.maxquerydepth;
 
 	Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
 
@@ -4396,7 +4396,7 @@ SetConstraintStateCreate(int numalloc)
 	state = (SetConstraintState)
 		MemoryContextAllocZero(TopTransactionContext,
 							   offsetof(SetConstraintStateData, trigstates) +
-						   numalloc * sizeof(SetConstraintTriggerData));
+							   numalloc * sizeof(SetConstraintTriggerData));
 
 	state->numalloc = numalloc;
 
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index ab13be225c8..de913538910 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -575,13 +575,13 @@ DefineType(List *names, List *parameters)
 	if (typmodinOid && func_volatile(typmodinOid) == PROVOLATILE_VOLATILE)
 		ereport(WARNING,
 				(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-				 errmsg("type modifier input function %s should not be volatile",
-						NameListToString(typmodinName))));
+			 errmsg("type modifier input function %s should not be volatile",
+					NameListToString(typmodinName))));
 	if (typmodoutOid && func_volatile(typmodoutOid) == PROVOLATILE_VOLATILE)
 		ereport(WARNING,
 				(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-				 errmsg("type modifier output function %s should not be volatile",
-						NameListToString(typmodoutName))));
+			errmsg("type modifier output function %s should not be volatile",
+				   NameListToString(typmodoutName))));
 
 	/*
 	 * OK, we're done checking, time to make the type.  We must assign the
@@ -643,32 +643,32 @@ DefineType(List *names, List *parameters)
 			   array_type,		/* type name */
 			   typeNamespace,	/* namespace */
 			   InvalidOid,		/* relation oid (n/a here) */
-			   0,		/* relation kind (ditto) */
-			   GetUserId(),	/* owner's ID */
-			   -1,		/* internal size (always varlena) */
+			   0,				/* relation kind (ditto) */
+			   GetUserId(),		/* owner's ID */
+			   -1,				/* internal size (always varlena) */
 			   TYPTYPE_BASE,	/* type-type (base type) */
 			   TYPCATEGORY_ARRAY,		/* type-category (array) */
-			   false,	/* array types are never preferred */
+			   false,			/* array types are never preferred */
 			   delimiter,		/* array element delimiter */
 			   F_ARRAY_IN,		/* input procedure */
-			   F_ARRAY_OUT,	/* output procedure */
+			   F_ARRAY_OUT,		/* output procedure */
 			   F_ARRAY_RECV,	/* receive procedure */
 			   F_ARRAY_SEND,	/* send procedure */
-			   typmodinOid,	/* typmodin procedure */
+			   typmodinOid,		/* typmodin procedure */
 			   typmodoutOid,	/* typmodout procedure */
 			   F_ARRAY_TYPANALYZE,		/* analyze procedure */
-			   typoid, /* element type ID */
-			   true,	/* yes this is an array type */
+			   typoid,			/* element type ID */
+			   true,			/* yes this is an array type */
 			   InvalidOid,		/* no further array type */
 			   InvalidOid,		/* base type ID */
-			   NULL,	/* never a default type value */
-			   NULL,	/* binary default isn't sent either */
-			   false,	/* never passed by value */
+			   NULL,			/* never a default type value */
+			   NULL,			/* binary default isn't sent either */
+			   false,			/* never passed by value */
 			   alignment,		/* see above */
-			   'x',	/* ARRAY is always toastable */
-			   -1,		/* typMod (Domains only) */
-			   0,		/* Array dimensions of typbasetype */
-			   false,	/* Type NOT NULL */
+			   'x',				/* ARRAY is always toastable */
+			   -1,				/* typMod (Domains only) */
+			   0,				/* Array dimensions of typbasetype */
+			   false,			/* Type NOT NULL */
 			   collation);		/* type's collation */
 
 	pfree(array_type);
@@ -1616,7 +1616,7 @@ makeRangeConstructors(const char *name, Oid namespace,
 								 PointerGetDatum(NULL), /* parameterModes */
 								 PointerGetDatum(NULL), /* parameterNames */
 								 NIL,	/* parameterDefaults */
-								 PointerGetDatum(NULL),		/* trftypes */
+								 PointerGetDatum(NULL), /* trftypes */
 								 PointerGetDatum(NULL), /* proconfig */
 								 1.0,	/* procost */
 								 0.0);	/* prorows */
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 456c27ebe07..3b381c58353 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -87,7 +87,8 @@ CreateRole(CreateRoleStmt *stmt)
 	bool		createdb = false;		/* Can the user create databases? */
 	bool		canlogin = false;		/* Can this user login? */
 	bool		isreplication = false;	/* Is this a replication role? */
-	bool		bypassrls = false;		/* Is this a row security enabled role? */
+	bool		bypassrls = false;		/* Is this a row security enabled
+										 * role? */
 	int			connlimit = -1; /* maximum connections allowed */
 	List	   *addroleto = NIL;	/* roles to make this a member of */
 	List	   *rolemembers = NIL;		/* roles to be members of this role */
@@ -300,7 +301,7 @@ CreateRole(CreateRoleStmt *stmt)
 		if (!superuser())
 			ereport(ERROR,
 					(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-					 errmsg("must be superuser to change bypassrls attribute.")));
+				errmsg("must be superuser to change bypassrls attribute.")));
 	}
 	else
 	{
@@ -681,7 +682,7 @@ AlterRole(AlterRoleStmt *stmt)
 		if (!superuser())
 			ereport(ERROR,
 					(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-					 errmsg("must be superuser to change bypassrls attribute")));
+				 errmsg("must be superuser to change bypassrls attribute")));
 	}
 	else if (!have_createrole_privilege())
 	{
@@ -721,11 +722,11 @@ AlterRole(AlterRoleStmt *stmt)
 	 * Call the password checking hook if there is one defined
 	 */
 	if (check_password_hook && password)
-		(*check_password_hook)(rolename ,
-							   password,
-			 isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
-							   validUntil_datum,
-							   validUntil_null);
+		(*check_password_hook) (rolename,
+								password,
+			   isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
+								validUntil_datum,
+								validUntil_null);
 
 	/*
 	 * Build an updated tuple, perusing the information just obtained
@@ -1358,8 +1359,8 @@ roleSpecsToIds(List *memberNames)
 
 	foreach(l, memberNames)
 	{
-		Node   *rolespec = (Node *) lfirst(l);
-		Oid		roleid;
+		Node	   *rolespec = (Node *) lfirst(l);
+		Oid			roleid;
 
 		roleid = get_rolespec_oid(rolespec, false);
 		result = lappend_oid(result, roleid);
@@ -1455,7 +1456,7 @@ AddRoleMems(const char *rolename, Oid roleid,
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_GRANT_OPERATION),
 					 (errmsg("role \"%s\" is a member of role \"%s\"",
-							 rolename, get_rolespec_name((Node *) memberRole)))));
+						rolename, get_rolespec_name((Node *) memberRole)))));
 
 		/*
 		 * Check if entry for this role/member already exists; if so, give
@@ -1470,7 +1471,7 @@ AddRoleMems(const char *rolename, Oid roleid,
 		{
 			ereport(NOTICE,
 					(errmsg("role \"%s\" is already a member of role \"%s\"",
-							get_rolespec_name((Node *) memberRole), rolename)));
+						 get_rolespec_name((Node *) memberRole), rolename)));
 			ReleaseSysCache(authmem_tuple);
 			continue;
 		}
@@ -1581,7 +1582,7 @@ DelRoleMems(const char *rolename, Oid roleid,
 		{
 			ereport(WARNING,
 					(errmsg("role \"%s\" is not a member of role \"%s\"",
-							get_rolespec_name((Node *) memberRole), rolename)));
+						 get_rolespec_name((Node *) memberRole), rolename)));
 			continue;
 		}
 
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 34ca325a9b6..baf66f1e6c0 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -83,7 +83,7 @@ static bool vacuum_rel(Oid relid, RangeVar *relation, int options,
 void
 ExecVacuum(VacuumStmt *vacstmt, bool isTopLevel)
 {
-	VacuumParams	params;
+	VacuumParams params;
 
 	/* sanity checks on options */
 	Assert(vacstmt->options & (VACOPT_VACUUM | VACOPT_ANALYZE));
@@ -530,8 +530,8 @@ vacuum_set_xid_limits(Relation rel,
 
 	/*
 	 * Compute the multixact age for which freezing is urgent.  This is
-	 * normally autovacuum_multixact_freeze_max_age, but may be less if we
-	 * are short of multixact member space.
+	 * normally autovacuum_multixact_freeze_max_age, but may be less if we are
+	 * short of multixact member space.
 	 */
 	effective_multixact_freeze_max_age = MultiXactMemberFreezeThreshold();
 
@@ -1134,9 +1134,8 @@ vac_truncate_clog(TransactionId frozenXID,
 		return;
 
 	/*
-	 * Truncate CLOG and CommitTs to the oldest computed value.
-	 * Note we don't truncate multixacts; that will be done by the next
-	 * checkpoint.
+	 * Truncate CLOG and CommitTs to the oldest computed value. Note we don't
+	 * truncate multixacts; that will be done by the next checkpoint.
 	 */
 	TruncateCLOG(frozenXID);
 	TruncateCommitTs(frozenXID, true);
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index c94575c81ea..a01cfb4c043 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -105,7 +105,7 @@ typedef struct LVRelStats
 	BlockNumber old_rel_pages;	/* previous value of pg_class.relpages */
 	BlockNumber rel_pages;		/* total number of pages */
 	BlockNumber scanned_pages;	/* number of pages we examined */
-	BlockNumber	pinskipped_pages; /* # of pages we skipped due to a pin */
+	BlockNumber pinskipped_pages;		/* # of pages we skipped due to a pin */
 	double		scanned_tuples; /* counts only tuples on scanned pages */
 	double		old_rel_tuples; /* previous value of pg_class.reltuples */
 	double		new_rel_tuples; /* new estimated total # of tuples */
@@ -336,7 +336,8 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
 			TimestampDifferenceExceeds(starttime, endtime,
 									   params->log_min_duration))
 		{
-			StringInfoData	buf;
+			StringInfoData buf;
+
 			TimestampDifference(starttime, endtime, &secs, &usecs);
 
 			read_rate = 0;
@@ -369,7 +370,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
 							 vacrelstats->new_rel_tuples,
 							 vacrelstats->new_dead_tuples);
 			appendStringInfo(&buf,
-							 _("buffer usage: %d hits, %d misses, %d dirtied\n"),
+						 _("buffer usage: %d hits, %d misses, %d dirtied\n"),
 							 VacuumPageHit,
 							 VacuumPageMiss,
 							 VacuumPageDirty);
@@ -454,7 +455,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
 	BlockNumber next_not_all_visible_block;
 	bool		skipping_all_visible_blocks;
 	xl_heap_freeze_tuple *frozen;
-	StringInfoData	buf;
+	StringInfoData buf;
 
 	pg_rusage_init(&ru0);
 
@@ -1784,7 +1785,7 @@ static bool
 heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid)
 {
 	Page		page = BufferGetPage(buf);
-	BlockNumber	blockno = BufferGetBlockNumber(buf);
+	BlockNumber blockno = BufferGetBlockNumber(buf);
 	OffsetNumber offnum,
 				maxoff;
 	bool		all_visible = true;
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 4948a265cb2..04073d3f9f9 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -405,10 +405,10 @@ ExecSupportsMarkRestore(Path *pathnode)
 			 * that does, we presently come here only for ResultPath nodes,
 			 * which represent Result plans without a child plan.  So there is
 			 * nothing to recurse to and we can just say "false".  (This means
-			 * that Result's support for mark/restore is in fact dead code.
-			 * We keep it since it's not much code, and someday the planner
-			 * might be smart enough to use it.  That would require making
-			 * this function smarter too, of course.)
+			 * that Result's support for mark/restore is in fact dead code. We
+			 * keep it since it's not much code, and someday the planner might
+			 * be smart enough to use it.  That would require making this
+			 * function smarter too, of course.)
 			 */
 			Assert(IsA(pathnode, ResultPath));
 			return false;
diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c
index ee1cd19f96b..bf385086c62 100644
--- a/src/backend/executor/execIndexing.c
+++ b/src/backend/executor/execIndexing.c
@@ -78,9 +78,9 @@
  * another in-progress tuple, it has two options:
  *
  * 1. back out the speculatively inserted tuple, then wait for the other
- *    transaction, and retry. Or,
+ *	  transaction, and retry. Or,
  * 2. wait for the other transaction, with the speculatively inserted tuple
- *    still in place.
+ *	  still in place.
  *
  * If two backends insert at the same time, and both try to wait for each
  * other, they will deadlock.  So option 2 is not acceptable.  Option 1
@@ -428,7 +428,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
 													 indexRelation, indexInfo,
 													 tupleid, values, isnull,
 													 estate, false,
-													 waitMode, violationOK, NULL);
+												waitMode, violationOK, NULL);
 		}
 
 		if ((checkUnique == UNIQUE_CHECK_PARTIAL ||
@@ -538,7 +538,7 @@ ExecCheckIndexConstraints(TupleTableSlot *slot,
 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
 					 errmsg("ON CONFLICT does not support deferred unique constraints/exclusion constraints as arbiters"),
 					 errtableconstraint(heapRelation,
-										RelationGetRelationName(indexRelation))));
+								   RelationGetRelationName(indexRelation))));
 
 		checkedIndex = true;
 
@@ -578,7 +578,7 @@ ExecCheckIndexConstraints(TupleTableSlot *slot,
 		satisfiesConstraint =
 			check_exclusion_or_unique_constraint(heapRelation, indexRelation,
 												 indexInfo, &invalidItemPtr,
-												 values, isnull, estate, false,
+											   values, isnull, estate, false,
 												 CEOUC_WAIT, true,
 												 conflictTid);
 		if (!satisfiesConstraint)
@@ -814,9 +814,9 @@ retry:
 					 errmsg("could not create exclusion constraint \"%s\"",
 							RelationGetRelationName(index)),
 					 error_new && error_existing ?
-						errdetail("Key %s conflicts with key %s.",
-								  error_new, error_existing) :
-						errdetail("Key conflicts exist."),
+					 errdetail("Key %s conflicts with key %s.",
+							   error_new, error_existing) :
+					 errdetail("Key conflicts exist."),
 					 errtableconstraint(heap,
 										RelationGetRelationName(index))));
 		else
@@ -825,9 +825,9 @@ retry:
 					 errmsg("conflicting key value violates exclusion constraint \"%s\"",
 							RelationGetRelationName(index)),
 					 error_new && error_existing ?
-						errdetail("Key %s conflicts with existing key %s.",
-								  error_new, error_existing) :
-						errdetail("Key conflicts with existing key."),
+					 errdetail("Key %s conflicts with existing key %s.",
+							   error_new, error_existing) :
+					 errdetail("Key conflicts with existing key."),
 					 errtableconstraint(heap,
 										RelationGetRelationName(index))));
 	}
@@ -838,8 +838,8 @@ retry:
 	 * Ordinarily, at this point the search should have found the originally
 	 * inserted tuple (if any), unless we exited the loop early because of
 	 * conflict.  However, it is possible to define exclusion constraints for
-	 * which that wouldn't be true --- for instance, if the operator is <>.
-	 * So we no longer complain if found_self is still false.
+	 * which that wouldn't be true --- for instance, if the operator is <>. So
+	 * we no longer complain if found_self is still false.
 	 */
 
 	econtext->ecxt_scantuple = save_scantuple;
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 7c29b4b42ae..a1561ce0cc0 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -153,16 +153,16 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
 	 * If the transaction is read-only, we need to check if any writes are
 	 * planned to non-temporary tables.  EXPLAIN is considered read-only.
 	 *
-	 * Don't allow writes in parallel mode.  Supporting UPDATE and DELETE would
-	 * require (a) storing the combocid hash in shared memory, rather than
-	 * synchronizing it just once at the start of parallelism, and (b) an
+	 * Don't allow writes in parallel mode.  Supporting UPDATE and DELETE
+	 * would require (a) storing the combocid hash in shared memory, rather
+	 * than synchronizing it just once at the start of parallelism, and (b) an
 	 * alternative to heap_update()'s reliance on xmax for mutual exclusion.
 	 * INSERT may have no such troubles, but we forbid it to simplify the
 	 * checks.
 	 *
 	 * We have lower-level defenses in CommandCounterIncrement and elsewhere
-	 * against performing unsafe operations in parallel mode, but this gives
-	 * a more user-friendly error message.
+	 * against performing unsafe operations in parallel mode, but this gives a
+	 * more user-friendly error message.
 	 */
 	if ((XactReadOnly || IsInParallelMode()) &&
 		!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
@@ -670,14 +670,14 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
 		 */
 		if (remainingPerms & ACL_INSERT && !ExecCheckRTEPermsModified(relOid,
 																	  userid,
-																	  rte->insertedCols,
-																	  ACL_INSERT))
+														   rte->insertedCols,
+																 ACL_INSERT))
 			return false;
 
 		if (remainingPerms & ACL_UPDATE && !ExecCheckRTEPermsModified(relOid,
 																	  userid,
-																	  rte->updatedCols,
-																	  ACL_UPDATE))
+															rte->updatedCols,
+																 ACL_UPDATE))
 			return false;
 	}
 	return true;
@@ -695,10 +695,9 @@ ExecCheckRTEPermsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
 	int			col = -1;
 
 	/*
-	 * When the query doesn't explicitly update any columns, allow the
-	 * query if we have permission on any column of the rel.  This is
-	 * to handle SELECT FOR UPDATE as well as possible corner cases in
-	 * UPDATE.
+	 * When the query doesn't explicitly update any columns, allow the query
+	 * if we have permission on any column of the rel.  This is to handle
+	 * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
 	 */
 	if (bms_is_empty(modifiedCols))
 	{
@@ -742,8 +741,8 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
 	ListCell   *l;
 
 	/*
-	 * Fail if write permissions are requested in parallel mode for
-	 * table (temp or non-temp), otherwise fail for any non-temp table.
+	 * Fail if write permissions are requested in parallel mode for table
+	 * (temp or non-temp), otherwise fail for any non-temp table.
 	 */
 	foreach(l, plannedstmt->rtable)
 	{
@@ -1665,9 +1664,9 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
 	Relation	rel = resultRelInfo->ri_RelationDesc;
 	TupleDesc	tupdesc = RelationGetDescr(rel);
 	TupleConstr *constr = tupdesc->constr;
-	Bitmapset   *modifiedCols;
-	Bitmapset   *insertedCols;
-	Bitmapset   *updatedCols;
+	Bitmapset  *modifiedCols;
+	Bitmapset  *insertedCols;
+	Bitmapset  *updatedCols;
 
 	Assert(constr);
 
@@ -1722,7 +1721,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
 					(errcode(ERRCODE_CHECK_VIOLATION),
 					 errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
 							RelationGetRelationName(rel), failed),
-					 val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
+			  val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
 					 errtableconstraint(rel, failed)));
 		}
 	}
@@ -1773,11 +1772,11 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
 		/*
 		 * WITH CHECK OPTION checks are intended to ensure that the new tuple
 		 * is visible (in the case of a view) or that it passes the
-		 * 'with-check' policy (in the case of row security).
-		 * If the qual evaluates to NULL or FALSE, then the new tuple won't be
-		 * included in the view or doesn't pass the 'with-check' policy for the
-		 * table.  We need ExecQual to return FALSE for NULL to handle the view
-		 * case (the opposite of what we do above for CHECK constraints).
+		 * 'with-check' policy (in the case of row security). If the qual
+		 * evaluates to NULL or FALSE, then the new tuple won't be included in
+		 * the view or doesn't pass the 'with-check' policy for the table.  We
+		 * need ExecQual to return FALSE for NULL to handle the view case (the
+		 * opposite of what we do above for CHECK constraints).
 		 */
 		if (!ExecQual((List *) wcoExpr, econtext, false))
 		{
@@ -1788,14 +1787,15 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
 
 			switch (wco->kind)
 			{
-				/*
-				 * For WITH CHECK OPTIONs coming from views, we might be able to
-				 * provide the details on the row, depending on the permissions
-				 * on the relation (that is, if the user could view it directly
-				 * anyway).  For RLS violations, we don't include the data since
-				 * we don't know if the user should be able to view the tuple as
-				 * as that depends on the USING policy.
-				 */
+					/*
+					 * For WITH CHECK OPTIONs coming from views, we might be
+					 * able to provide the details on the row, depending on
+					 * the permissions on the relation (that is, if the user
+					 * could view it directly anyway).  For RLS violations, we
+					 * don't include the data since we don't know if the user
+					 * should be able to view the tuple as as that depends on
+					 * the USING policy.
+					 */
 				case WCO_VIEW_CHECK:
 					insertedCols = GetInsertedColumns(resultRelInfo, estate);
 					updatedCols = GetUpdatedColumns(resultRelInfo, estate);
@@ -1808,8 +1808,8 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
 
 					ereport(ERROR,
 							(errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
-						errmsg("new row violates WITH CHECK OPTION for \"%s\"",
-							   wco->relname),
+					  errmsg("new row violates WITH CHECK OPTION for \"%s\"",
+							 wco->relname),
 							 val_desc ? errdetail("Failing row contains %s.",
 												  val_desc) : 0));
 					break;
@@ -1817,14 +1817,14 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
 				case WCO_RLS_UPDATE_CHECK:
 					ereport(ERROR,
 							(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-						 errmsg("new row violates row level security policy for \"%s\"",
-								wco->relname)));
+							 errmsg("new row violates row level security policy for \"%s\"",
+									wco->relname)));
 					break;
 				case WCO_RLS_CONFLICT_CHECK:
 					ereport(ERROR,
 							(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-						 errmsg("new row violates row level security policy (USING expression) for \"%s\"",
-								wco->relname)));
+							 errmsg("new row violates row level security policy (USING expression) for \"%s\"",
+									wco->relname)));
 					break;
 				default:
 					elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
@@ -1915,8 +1915,8 @@ ExecBuildSlotValueDescription(Oid reloid,
 		{
 			/*
 			 * No table-level SELECT, so need to make sure they either have
-			 * SELECT rights on the column or that they have provided the
-			 * data for the column.  If not, omit this column from the error
+			 * SELECT rights on the column or that they have provided the data
+			 * for the column.  If not, omit this column from the error
 			 * message.
 			 */
 			aclresult = pg_attribute_aclcheck(reloid, tupdesc->attrs[i]->attnum,
@@ -2258,14 +2258,14 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
 						break;
 					case LockWaitSkip:
 						if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
-							return NULL; /* skip instead of waiting */
+							return NULL;		/* skip instead of waiting */
 						break;
 					case LockWaitError:
 						if (!ConditionalXactLockTableWait(SnapshotDirty.xmax))
 							ereport(ERROR,
 									(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
 									 errmsg("could not obtain lock on row in relation \"%s\"",
-											RelationGetRelationName(relation))));
+										RelationGetRelationName(relation))));
 						break;
 				}
 				continue;		/* loop back to repeat heap_fetch */
@@ -2313,9 +2313,9 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
 					 * doing so would require changing heap_update and
 					 * heap_delete to not complain about updating "invisible"
 					 * tuples, which seems pretty scary (heap_lock_tuple will
-					 * not complain, but few callers expect HeapTupleInvisible,
-					 * and we're not one of them).  So for now, treat the tuple
-					 * as deleted and do not process.
+					 * not complain, but few callers expect
+					 * HeapTupleInvisible, and we're not one of them).  So for
+					 * now, treat the tuple as deleted and do not process.
 					 */
 					ReleaseBuffer(buffer);
 					return NULL;
@@ -2563,8 +2563,8 @@ EvalPlanQualFetchRowMarks(EPQState *epqstate)
 				if (fdwroutine->RefetchForeignRow == NULL)
 					ereport(ERROR,
 							(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-							 errmsg("cannot lock rows in foreign table \"%s\"",
-									RelationGetRelationName(erm->relation))));
+						   errmsg("cannot lock rows in foreign table \"%s\"",
+								  RelationGetRelationName(erm->relation))));
 				copyTuple = fdwroutine->RefetchForeignRow(epqstate->estate,
 														  erm,
 														  datum,
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index d414e20f120..0f911f210bf 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -182,8 +182,8 @@ static Datum ExecEvalArrayCoerceExpr(ArrayCoerceExprState *astate,
 static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
 					  bool *isNull, ExprDoneCond *isDone);
 static Datum ExecEvalGroupingFuncExpr(GroupingFuncExprState *gstate,
-						ExprContext *econtext,
-						bool *isNull, ExprDoneCond *isDone);
+						 ExprContext *econtext,
+						 bool *isNull, ExprDoneCond *isDone);
 
 
 /* ----------------------------------------------------------------
@@ -3034,10 +3034,10 @@ ExecEvalGroupingFuncExpr(GroupingFuncExprState *gstate,
 						 bool *isNull,
 						 ExprDoneCond *isDone)
 {
-	int result = 0;
-	int attnum = 0;
-	Bitmapset *grouped_cols = gstate->aggstate->grouped_cols;
-	ListCell *lc;
+	int			result = 0;
+	int			attnum = 0;
+	Bitmapset  *grouped_cols = gstate->aggstate->grouped_cols;
+	ListCell   *lc;
 
 	if (isDone)
 		*isDone = ExprSingleResult;
@@ -4529,7 +4529,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
 				GroupingFuncExprState *grp_state = makeNode(GroupingFuncExprState);
 				Agg		   *agg = NULL;
 
-				if (!parent || !IsA(parent, AggState) || !IsA(parent->plan, Agg))
+				if (!parent || !IsA(parent, AggState) ||!IsA(parent->plan, Agg))
 					elog(ERROR, "parent of GROUPING is not Agg node");
 
 				grp_state->aggstate = (AggState *) parent;
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 3963408b18c..7e15b797a7e 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -645,7 +645,7 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
 	 * overall targetlist's econtext.  GroupingFunc arguments are never
 	 * evaluated at all.
 	 */
-	if (IsA(node, Aggref) || IsA(node, GroupingFunc))
+	if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
 		return false;
 	if (IsA(node, WindowFunc))
 		return false;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 01a1e67f09e..31d74e94778 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -337,11 +337,11 @@ typedef struct AggStatePerPhaseData
 {
 	int			numsets;		/* number of grouping sets (or 0) */
 	int		   *gset_lengths;	/* lengths of grouping sets */
-	Bitmapset **grouped_cols;   /* column groupings for rollup */
+	Bitmapset **grouped_cols;	/* column groupings for rollup */
 	FmgrInfo   *eqfunctions;	/* per-grouping-field equality fns */
 	Agg		   *aggnode;		/* Agg node for phase data */
 	Sort	   *sortnode;		/* Sort node for input ordering for phase */
-} AggStatePerPhaseData;
+}	AggStatePerPhaseData;
 
 /*
  * To implement hashed aggregation, we need a hashtable that stores a
@@ -380,12 +380,12 @@ static void finalize_aggregate(AggState *aggstate,
 				   AggStatePerGroup pergroupstate,
 				   Datum *resultVal, bool *resultIsNull);
 static void prepare_projection_slot(AggState *aggstate,
-									TupleTableSlot *slot,
-									int currentSet);
+						TupleTableSlot *slot,
+						int currentSet);
 static void finalize_aggregates(AggState *aggstate,
-								AggStatePerAgg peragg,
-								AggStatePerGroup pergroup,
-								int currentSet);
+					AggStatePerAgg peragg,
+					AggStatePerGroup pergroup,
+					int currentSet);
 static TupleTableSlot *project_aggregates(AggState *aggstate);
 static Bitmapset *find_unaggregated_cols(AggState *aggstate);
 static bool find_unaggregated_cols_walker(Node *node, Bitmapset **colnos);
@@ -441,12 +441,12 @@ initialize_phase(AggState *aggstate, int newphase)
 	}
 
 	/*
-	 * If this isn't the last phase, we need to sort appropriately for the next
-	 * phase in sequence.
+	 * If this isn't the last phase, we need to sort appropriately for the
+	 * next phase in sequence.
 	 */
 	if (newphase < aggstate->numphases - 1)
 	{
-		Sort	   *sortnode = aggstate->phases[newphase+1].sortnode;
+		Sort	   *sortnode = aggstate->phases[newphase + 1].sortnode;
 		PlanState  *outerNode = outerPlanState(aggstate);
 		TupleDesc	tupDesc = ExecGetResultType(outerNode);
 
@@ -540,9 +540,8 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
 	/*
 	 * (Re)set transValue to the initial value.
 	 *
-	 * Note that when the initial value is pass-by-ref, we must copy
-	 * it (into the aggcontext) since we will pfree the transValue
-	 * later.
+	 * Note that when the initial value is pass-by-ref, we must copy it (into
+	 * the aggcontext) since we will pfree the transValue later.
 	 */
 	if (peraggstate->initValueIsNull)
 		pergroupstate->transValue = peraggstate->initValue;
@@ -551,7 +550,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
 		MemoryContext oldContext;
 
 		oldContext = MemoryContextSwitchTo(
-			aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
+		aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
 		pergroupstate->transValue = datumCopy(peraggstate->initValue,
 											  peraggstate->transtypeByVal,
 											  peraggstate->transtypeLen);
@@ -560,11 +559,11 @@ initialize_aggregate(AggState *aggstate, AggStatePerAgg peraggstate,
 	pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
 
 	/*
-	 * If the initial value for the transition state doesn't exist in
-	 * the pg_aggregate table then we will let the first non-NULL
-	 * value returned from the outer procNode become the initial
-	 * value. (This is useful for aggregates like max() and min().)
-	 * The noTransValue flag signals that we still need to do this.
+	 * If the initial value for the transition state doesn't exist in the
+	 * pg_aggregate table then we will let the first non-NULL value returned
+	 * from the outer procNode become the initial value. (This is useful for
+	 * aggregates like max() and min().) The noTransValue flag signals that we
+	 * still need to do this.
 	 */
 	pergroupstate->noTransValue = peraggstate->initValueIsNull;
 }
@@ -586,8 +585,8 @@ initialize_aggregates(AggState *aggstate,
 					  int numReset)
 {
 	int			aggno;
-	int         numGroupingSets = Max(aggstate->phase->numsets, 1);
-	int         setno = 0;
+	int			numGroupingSets = Max(aggstate->phase->numsets, 1);
+	int			setno = 0;
 
 	if (numReset < 1)
 		numReset = numGroupingSets;
@@ -655,7 +654,7 @@ advance_transition_function(AggState *aggstate,
 			 * do not need to pfree the old transValue, since it's NULL.
 			 */
 			oldContext = MemoryContextSwitchTo(
-				aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
+											   aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
 			pergroupstate->transValue = datumCopy(fcinfo->arg[1],
 												  peraggstate->transtypeByVal,
 												  peraggstate->transtypeLen);
@@ -730,9 +729,9 @@ static void
 advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
 {
 	int			aggno;
-	int         setno = 0;
-	int         numGroupingSets = Max(aggstate->phase->numsets, 1);
-	int         numAggs = aggstate->numaggs;
+	int			setno = 0;
+	int			numGroupingSets = Max(aggstate->phase->numsets, 1);
+	int			numAggs = aggstate->numaggs;
 
 	for (aggno = 0; aggno < numAggs; aggno++)
 	{
@@ -1134,7 +1133,7 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet
 {
 	if (aggstate->phase->grouped_cols)
 	{
-		Bitmapset *grouped_cols = aggstate->phase->grouped_cols[currentSet];
+		Bitmapset  *grouped_cols = aggstate->phase->grouped_cols[currentSet];
 
 		aggstate->grouped_cols = grouped_cols;
 
@@ -1156,7 +1155,7 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet
 
 			foreach(lc, aggstate->all_grouped_cols)
 			{
-				int attnum = lfirst_int(lc);
+				int			attnum = lfirst_int(lc);
 
 				if (!bms_is_member(attnum, grouped_cols))
 					slot->tts_isnull[attnum - 1] = true;
@@ -1225,8 +1224,7 @@ project_aggregates(AggState *aggstate)
 	ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
 
 	/*
-	 * Check the qual (HAVING clause); if the group does not match, ignore
-	 * it.
+	 * Check the qual (HAVING clause); if the group does not match, ignore it.
 	 */
 	if (ExecQual(aggstate->ss.ps.qual, econtext, false))
 	{
@@ -1286,7 +1284,7 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
 		*colnos = bms_add_member(*colnos, var->varattno);
 		return false;
 	}
-	if (IsA(node, Aggref) || IsA(node, GroupingFunc))
+	if (IsA(node, Aggref) ||IsA(node, GroupingFunc))
 	{
 		/* do not descend into aggregate exprs */
 		return false;
@@ -1319,7 +1317,7 @@ build_hash_table(AggState *aggstate)
 											  aggstate->hashfunctions,
 											  node->numGroups,
 											  entrysize,
-											  aggstate->aggcontexts[0]->ecxt_per_tuple_memory,
+							 aggstate->aggcontexts[0]->ecxt_per_tuple_memory,
 											  tmpmem);
 }
 
@@ -1521,8 +1519,8 @@ agg_retrieve_direct(AggState *aggstate)
 	/*
 	 * get state info from node
 	 *
-	 * econtext is the per-output-tuple expression context
-	 * tmpcontext is the per-input-tuple expression context
+	 * econtext is the per-output-tuple expression context tmpcontext is the
+	 * per-input-tuple expression context
 	 */
 	econtext = aggstate->ss.ps.ps_ExprContext;
 	tmpcontext = aggstate->tmpcontext;
@@ -1615,17 +1613,17 @@ agg_retrieve_direct(AggState *aggstate)
 		 * If a subgroup for the current grouping set is present, project it.
 		 *
 		 * We have a new group if:
-		 *  - we're out of input but haven't projected all grouping sets
-		 *    (checked above)
+		 *	- we're out of input but haven't projected all grouping sets
+		 *	  (checked above)
 		 * OR
-		 *    - we already projected a row that wasn't from the last grouping
-		 *      set
-		 *    AND
-		 *    - the next grouping set has at least one grouping column (since
-		 *      empty grouping sets project only once input is exhausted)
-		 *    AND
-		 *    - the previous and pending rows differ on the grouping columns
-		 *      of the next grouping set
+		 *	  - we already projected a row that wasn't from the last grouping
+		 *		set
+		 *	  AND
+		 *	  - the next grouping set has at least one grouping column (since
+		 *		empty grouping sets project only once input is exhausted)
+		 *	  AND
+		 *	  - the previous and pending rows differ on the grouping columns
+		 *		of the next grouping set
 		 */
 		if (aggstate->input_done ||
 			(node->aggstrategy == AGG_SORTED &&
@@ -1729,7 +1727,8 @@ agg_retrieve_direct(AggState *aggstate)
 							   firstSlot,
 							   InvalidBuffer,
 							   true);
-				aggstate->grp_firstTuple = NULL;	/* don't keep two pointers */
+				aggstate->grp_firstTuple = NULL;		/* don't keep two
+														 * pointers */
 
 				/* set up for first advance_aggregates call */
 				tmpcontext->ecxt_outertuple = firstSlot;
@@ -1774,7 +1773,7 @@ agg_retrieve_direct(AggState *aggstate)
 											 node->numCols,
 											 node->grpColIdx,
 											 aggstate->phase->eqfunctions,
-											 tmpcontext->ecxt_per_tuple_memory))
+										  tmpcontext->ecxt_per_tuple_memory))
 						{
 							aggstate->grp_firstTuple = ExecCopySlotTuple(outerslot);
 							break;
@@ -1787,8 +1786,8 @@ agg_retrieve_direct(AggState *aggstate)
 			 * Use the representative input tuple for any references to
 			 * non-aggregated input columns in aggregate direct args, the node
 			 * qual, and the tlist.  (If we are not grouping, and there are no
-			 * input rows at all, we will come here with an empty firstSlot ...
-			 * but if not grouping, there can't be any references to
+			 * input rows at all, we will come here with an empty firstSlot
+			 * ... but if not grouping, there can't be any references to
 			 * non-aggregated input columns, so no problem.)
 			 */
 			econtext->ecxt_outertuple = firstSlot;
@@ -1803,8 +1802,8 @@ agg_retrieve_direct(AggState *aggstate)
 		finalize_aggregates(aggstate, peragg, pergroup, currentSet);
 
 		/*
-		 * If there's no row to project right now, we must continue rather than
-		 * returning a null since there might be more groups.
+		 * If there's no row to project right now, we must continue rather
+		 * than returning a null since there might be more groups.
 		 */
 		result = project_aggregates(aggstate);
 		if (result)
@@ -1996,7 +1995,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 
 		foreach(l, node->chain)
 		{
-			Agg	   *agg = lfirst(l);
+			Agg		   *agg = lfirst(l);
 
 			numGroupingSets = Max(numGroupingSets,
 								  list_length(agg->groupingSets));
@@ -2074,7 +2073,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 	ExecAssignScanTypeFromOuterPlan(&aggstate->ss);
 	if (node->chain)
 		ExecSetSlotDescriptor(aggstate->sort_slot,
-							  aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor);
+						 aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor);
 
 	/*
 	 * Initialize result tuple type and projection info.
@@ -2111,13 +2110,13 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 	for (phase = 0; phase < numPhases; ++phase)
 	{
 		AggStatePerPhase phasedata = &aggstate->phases[phase];
-		Agg *aggnode;
-		Sort *sortnode;
-		int num_sets;
+		Agg		   *aggnode;
+		Sort	   *sortnode;
+		int			num_sets;
 
 		if (phase > 0)
 		{
-			aggnode = list_nth(node->chain, phase-1);
+			aggnode = list_nth(node->chain, phase - 1);
 			sortnode = (Sort *) aggnode->plan.lefttree;
 			Assert(IsA(sortnode, Sort));
 		}
@@ -2137,8 +2136,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 			i = 0;
 			foreach(l, aggnode->groupingSets)
 			{
-				int current_length = list_length(lfirst(l));
-				Bitmapset *cols = NULL;
+				int			current_length = list_length(lfirst(l));
+				Bitmapset  *cols = NULL;
 
 				/* planner forces this to be correct */
 				for (j = 0; j < current_length; ++j)
@@ -2288,8 +2287,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 		/* Begin filling in the peraggstate data */
 		peraggstate->aggrefstate = aggrefstate;
 		peraggstate->aggref = aggref;
-		peraggstate->sortstates =(Tuplesortstate**)
-			palloc0(sizeof(Tuplesortstate*) * numGroupingSets);
+		peraggstate->sortstates = (Tuplesortstate **)
+			palloc0(sizeof(Tuplesortstate *) * numGroupingSets);
 
 		for (currentsortno = 0; currentsortno < numGroupingSets; currentsortno++)
 			peraggstate->sortstates[currentsortno] = NULL;
@@ -2643,11 +2642,11 @@ void
 ExecReScanAgg(AggState *node)
 {
 	ExprContext *econtext = node->ss.ps.ps_ExprContext;
-	PlanState	*outerPlan = outerPlanState(node);
+	PlanState  *outerPlan = outerPlanState(node);
 	Agg		   *aggnode = (Agg *) node->ss.ps.plan;
 	int			aggno;
-	int         numGroupingSets = Max(node->maxsets, 1);
-	int         setno;
+	int			numGroupingSets = Max(node->maxsets, 1);
+	int			setno;
 
 	node->agg_done = false;
 
@@ -2732,7 +2731,7 @@ ExecReScanAgg(AggState *node)
 		 * Reset the per-group state (in particular, mark transvalues null)
 		 */
 		MemSet(node->pergroup, 0,
-			   sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets);
+			 sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets);
 
 		/* reset to phase 0 */
 		initialize_phase(node, 0);
@@ -2775,8 +2774,9 @@ AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext)
 	{
 		if (aggcontext)
 		{
-			AggState    *aggstate = ((AggState *) fcinfo->context);
-			ExprContext *cxt  = aggstate->aggcontexts[aggstate->current_set];
+			AggState   *aggstate = ((AggState *) fcinfo->context);
+			ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
+
 			*aggcontext = cxt->ecxt_per_tuple_memory;
 		}
 		return AGG_CONTEXT_AGGREGATE;
@@ -2862,7 +2862,7 @@ AggRegisterCallback(FunctionCallInfo fcinfo,
 	if (fcinfo->context && IsA(fcinfo->context, AggState))
 	{
 		AggState   *aggstate = (AggState *) fcinfo->context;
-		ExprContext *cxt  = aggstate->aggcontexts[aggstate->current_set];
+		ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
 
 		RegisterExprContextCallback(cxt, func, arg);
 
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 40a06f163a6..4597437178a 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -449,7 +449,7 @@ ExecBitmapHeapScan(BitmapHeapScanState *node)
 void
 ExecReScanBitmapHeapScan(BitmapHeapScanState *node)
 {
-	PlanState	*outerPlan = outerPlanState(node);
+	PlanState  *outerPlan = outerPlanState(node);
 
 	/* rescan to release any page pin */
 	heap_rescan(node->ss.ss_currentScanDesc, NULL);
diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c
index 3f87716b8f1..5e4785423e6 100644
--- a/src/backend/executor/nodeGroup.c
+++ b/src/backend/executor/nodeGroup.c
@@ -280,7 +280,7 @@ ExecEndGroup(GroupState *node)
 void
 ExecReScanGroup(GroupState *node)
 {
-	PlanState	*outerPlan = outerPlanState(node);
+	PlanState  *outerPlan = outerPlanState(node);
 
 	node->grp_done = FALSE;
 	node->ss.ps.ps_TupFromTlist = false;
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index b1f6c824329..2a049240549 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -500,8 +500,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 	bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
 
 	/*
-	 * If there's not enough space to store the projected number of tuples
-	 * and the required bucket headers, we will need multiple batches.
+	 * If there's not enough space to store the projected number of tuples and
+	 * the required bucket headers, we will need multiple batches.
 	 */
 	if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
 	{
@@ -512,8 +512,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 		long		bucket_size;
 
 		/*
-		 * Estimate the number of buckets we'll want to have when work_mem
-		 * is entirely full.  Each bucket will contain a bucket pointer plus
+		 * Estimate the number of buckets we'll want to have when work_mem is
+		 * entirely full.  Each bucket will contain a bucket pointer plus
 		 * NTUP_PER_BUCKET tuples, whose projected size already includes
 		 * overhead for the hash code, pointer to the next tuple, etc.
 		 */
@@ -527,9 +527,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 		 * Buckets are simple pointers to hashjoin tuples, while tupsize
 		 * includes the pointer, hash code, and MinimalTupleData.  So buckets
 		 * should never really exceed 25% of work_mem (even for
-		 * NTUP_PER_BUCKET=1); except maybe * for work_mem values that are
-		 * not 2^N bytes, where we might get more * because of doubling.
-		 * So let's look for 50% here.
+		 * NTUP_PER_BUCKET=1); except maybe * for work_mem values that are not
+		 * 2^N bytes, where we might get more * because of doubling. So let's
+		 * look for 50% here.
 		 */
 		Assert(bucket_bytes <= hash_table_bytes / 2);
 
@@ -655,7 +655,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
 		hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
 
 		hashtable->buckets = repalloc(hashtable->buckets,
-									  sizeof(HashJoinTuple) * hashtable->nbuckets);
+								sizeof(HashJoinTuple) * hashtable->nbuckets);
 	}
 
 	/*
@@ -671,6 +671,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
 	while (oldchunks != NULL)
 	{
 		HashMemoryChunk nextchunk = oldchunks->next;
+
 		/* position within the buffer (up to oldchunks->used) */
 		size_t		idx = 0;
 
@@ -691,7 +692,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
 			{
 				/* keep tuple in memory - copy it into the new chunk */
 				HashJoinTuple copyTuple =
-					(HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
+				(HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
+
 				memcpy(copyTuple, hashTuple, hashTupleSize);
 
 				/* and add it back to the appropriate bucket */
@@ -749,15 +751,15 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
 static void
 ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
 {
-	HashMemoryChunk	chunk;
+	HashMemoryChunk chunk;
 
 	/* do nothing if not an increase (it's called increase for a reason) */
 	if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
 		return;
 
 	/*
-	 * We already know the optimal number of buckets, so let's just
-	 * compute the log2_nbuckets for it.
+	 * We already know the optimal number of buckets, so let's just compute
+	 * the log2_nbuckets for it.
 	 */
 	hashtable->nbuckets = hashtable->nbuckets_optimal;
 	hashtable->log2_nbuckets = my_log2(hashtable->nbuckets_optimal);
@@ -771,14 +773,14 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
 #endif
 
 	/*
-	 * Just reallocate the proper number of buckets - we don't need to
-	 * walk through them - we can walk the dense-allocated chunks
-	 * (just like in ExecHashIncreaseNumBatches, but without all the
-	 * copying into new chunks)
+	 * Just reallocate the proper number of buckets - we don't need to walk
+	 * through them - we can walk the dense-allocated chunks (just like in
+	 * ExecHashIncreaseNumBatches, but without all the copying into new
+	 * chunks)
 	 */
 	hashtable->buckets =
 		(HashJoinTuple *) repalloc(hashtable->buckets,
-								   hashtable->nbuckets * sizeof(HashJoinTuple));
+								hashtable->nbuckets * sizeof(HashJoinTuple));
 
 	memset(hashtable->buckets, 0, sizeof(void *) * hashtable->nbuckets);
 
@@ -786,12 +788,13 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
 	for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next)
 	{
 		/* process all tuples stored in this chunk */
-		size_t idx = 0;
+		size_t		idx = 0;
+
 		while (idx < chunk->used)
 		{
 			HashJoinTuple hashTuple = (HashJoinTuple) (chunk->data + idx);
-			int		bucketno;
-			int		batchno;
+			int			bucketno;
+			int			batchno;
 
 			ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
 									  &bucketno, &batchno);
@@ -869,10 +872,11 @@ ExecHashTableInsert(HashJoinTable hashtable,
 
 		/*
 		 * Increase the (optimal) number of buckets if we just exceeded the
-		 * NTUP_PER_BUCKET threshold, but only when there's still a single batch.
+		 * NTUP_PER_BUCKET threshold, but only when there's still a single
+		 * batch.
 		 */
 		if ((hashtable->nbatch == 1) &&
-			(hashtable->nbuckets_optimal <= INT_MAX/2) &&	/* overflow protection */
+			(hashtable->nbuckets_optimal <= INT_MAX / 2) &&		/* overflow protection */
 			(ntuples >= (hashtable->nbuckets_optimal * NTUP_PER_BUCKET)))
 		{
 			hashtable->nbuckets_optimal *= 2;
@@ -1636,7 +1640,7 @@ dense_alloc(HashJoinTable hashtable, Size size)
 	{
 		/* allocate new chunk and put it at the beginning of the list */
 		newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
-								  offsetof(HashMemoryChunkData, data) + size);
+								 offsetof(HashMemoryChunkData, data) + size);
 		newChunk->maxlen = size;
 		newChunk->used = 0;
 		newChunk->ntuples = 0;
@@ -1663,15 +1667,15 @@ dense_alloc(HashJoinTable hashtable, Size size)
 	}
 
 	/*
-	 * See if we have enough space for it in the current chunk (if any).
-	 * If not, allocate a fresh chunk.
+	 * See if we have enough space for it in the current chunk (if any). If
+	 * not, allocate a fresh chunk.
 	 */
 	if ((hashtable->chunks == NULL) ||
 		(hashtable->chunks->maxlen - hashtable->chunks->used) < size)
 	{
 		/* allocate new chunk and put it at the beginning of the list */
 		newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
-					   offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
+					  offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
 
 		newChunk->maxlen = HASH_CHUNK_SIZE;
 		newChunk->used = size;
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index 976c77b76c6..9f54c4633e8 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -106,8 +106,8 @@ IndexOnlyNext(IndexOnlyScanState *node)
 		 * away, because the tuple is still visible until the deleting
 		 * transaction commits or the statement ends (if it's our
 		 * transaction). In either case, the lock on the VM buffer will have
-		 * been released (acting as a write barrier) after clearing the
-		 * bit. And for us to have a snapshot that includes the deleting
+		 * been released (acting as a write barrier) after clearing the bit.
+		 * And for us to have a snapshot that includes the deleting
 		 * transaction (making the tuple invisible), we must have acquired
 		 * ProcArrayLock after that time, acting as a read barrier.
 		 *
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 79133e08b66..7fd90415f96 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -288,9 +288,9 @@ next_indextuple:
 		 * Can we return this tuple immediately, or does it need to be pushed
 		 * to the reorder queue?  If the ORDER BY expression values returned
 		 * by the index were inaccurate, we can't return it yet, because the
-		 * next tuple from the index might need to come before this one.
-		 * Also, we can't return it yet if there are any smaller tuples in the
-		 * queue already.
+		 * next tuple from the index might need to come before this one. Also,
+		 * we can't return it yet if there are any smaller tuples in the queue
+		 * already.
 		 */
 		if (!was_exact || (topmost && cmp_orderbyvals(lastfetched_vals,
 													  lastfetched_nulls,
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 7bcf99f4889..b9b0f06882f 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -196,11 +196,12 @@ lnext:
 				 * case, so as to avoid the "Halloween problem" of repeated
 				 * update attempts.  In the latter case it might be sensible
 				 * to fetch the updated tuple instead, but doing so would
-				 * require changing heap_update and heap_delete to not complain
-				 * about updating "invisible" tuples, which seems pretty scary
-				 * (heap_lock_tuple will not complain, but few callers expect
-				 * HeapTupleInvisible, and we're not one of them).  So for now,
-				 * treat the tuple as deleted and do not process.
+				 * require changing heap_update and heap_delete to not
+				 * complain about updating "invisible" tuples, which seems
+				 * pretty scary (heap_lock_tuple will not complain, but few
+				 * callers expect HeapTupleInvisible, and we're not one of
+				 * them).  So for now, treat the tuple as deleted and do not
+				 * process.
 				 */
 				goto lnext;
 
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 8ff4352a66a..b2b5aa7e8eb 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -317,7 +317,7 @@ ExecMaterialRestrPos(MaterialState *node)
 void
 ExecReScanMaterial(MaterialState *node)
 {
-	PlanState	*outerPlan = outerPlanState(node);
+	PlanState  *outerPlan = outerPlanState(node);
 
 	ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
 
diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c
index 0c814f0e72d..bdf76808a8a 100644
--- a/src/backend/executor/nodeMergeAppend.c
+++ b/src/backend/executor/nodeMergeAppend.c
@@ -139,10 +139,10 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
 
 		/*
 		 * It isn't feasible to perform abbreviated key conversion, since
-		 * tuples are pulled into mergestate's binary heap as needed.  It would
-		 * likely be counter-productive to convert tuples into an abbreviated
-		 * representation as they're pulled up, so opt out of that additional
-		 * optimization entirely.
+		 * tuples are pulled into mergestate's binary heap as needed.  It
+		 * would likely be counter-productive to convert tuples into an
+		 * abbreviated representation as they're pulled up, so opt out of that
+		 * additional optimization entirely.
 		 */
 		sortKey->abbreviate = false;
 
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 15742c574ad..34b6cf61e0a 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -232,8 +232,8 @@ MJExamineQuals(List *mergeclauses,
 		/*
 		 * sortsupport routine must know if abbreviation optimization is
 		 * applicable in principle.  It is never applicable for merge joins
-		 * because there is no convenient opportunity to convert to alternative
-		 * representation.
+		 * because there is no convenient opportunity to convert to
+		 * alternative representation.
 		 */
 		clause->ssup.abbreviate = false;
 
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 8112fb45b81..874ca6a69bc 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -180,7 +180,7 @@ ExecCheckHeapTupleVisible(EState *estate,
 	if (!HeapTupleSatisfiesVisibility(tuple, estate->es_snapshot, buffer))
 		ereport(ERROR,
 				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
-				 errmsg("could not serialize access due to concurrent update")));
+			 errmsg("could not serialize access due to concurrent update")));
 }
 
 /*
@@ -321,8 +321,8 @@ ExecInsert(ModifyTableState *mtstate,
 		/*
 		 * Check any RLS INSERT WITH CHECK policies
 		 *
-		 * ExecWithCheckOptions() will skip any WCOs which are not of
-		 * the kind we are looking for at this point.
+		 * ExecWithCheckOptions() will skip any WCOs which are not of the kind
+		 * we are looking for at this point.
 		 */
 		if (resultRelInfo->ri_WithCheckOptions != NIL)
 			ExecWithCheckOptions(WCO_RLS_INSERT_CHECK,
@@ -383,9 +383,9 @@ ExecInsert(ModifyTableState *mtstate,
 				else
 				{
 					/*
-					 * In case of ON CONFLICT DO NOTHING, do nothing.
-					 * However, verify that the tuple is visible to the
-					 * executor's MVCC snapshot at higher isolation levels.
+					 * In case of ON CONFLICT DO NOTHING, do nothing. However,
+					 * verify that the tuple is visible to the executor's MVCC
+					 * snapshot at higher isolation levels.
 					 */
 					Assert(onconflict == ONCONFLICT_NOTHING);
 					ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid);
@@ -411,7 +411,7 @@ ExecInsert(ModifyTableState *mtstate,
 
 			/* insert index entries for tuple */
 			recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
-												   estate, true, &specConflict,
+												 estate, true, &specConflict,
 												   arbiterIndexes);
 
 			/* adjust the tuple's state accordingly */
@@ -475,17 +475,16 @@ ExecInsert(ModifyTableState *mtstate,
 	list_free(recheckIndexes);
 
 	/*
-	 * Check any WITH CHECK OPTION constraints from parent views.  We
-	 * are required to do this after testing all constraints and
-	 * uniqueness violations per the SQL spec, so we do it after actually
-	 * inserting the record into the heap and all indexes.
+	 * Check any WITH CHECK OPTION constraints from parent views.  We are
+	 * required to do this after testing all constraints and uniqueness
+	 * violations per the SQL spec, so we do it after actually inserting the
+	 * record into the heap and all indexes.
 	 *
-	 * ExecWithCheckOptions will elog(ERROR) if a violation is found, so
-	 * the tuple will never be seen, if it violates the WITH CHECK
-	 * OPTION.
+	 * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
+	 * tuple will never be seen, if it violates the WITH CHECK OPTION.
 	 *
-	 * ExecWithCheckOptions() will skip any WCOs which are not of
-	 * the kind we are looking for at this point.
+	 * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
+	 * are looking for at this point.
 	 */
 	if (resultRelInfo->ri_WithCheckOptions != NIL)
 		ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
@@ -860,8 +859,8 @@ ExecUpdate(ItemPointer tupleid,
 		 * triggers then trigger.c will have done heap_lock_tuple to lock the
 		 * correct tuple, so there's no need to do them again.)
 		 *
-		 * ExecWithCheckOptions() will skip any WCOs which are not of
-		 * the kind we are looking for at this point.
+		 * ExecWithCheckOptions() will skip any WCOs which are not of the kind
+		 * we are looking for at this point.
 		 */
 lreplace:;
 		if (resultRelInfo->ri_WithCheckOptions != NIL)
@@ -990,13 +989,13 @@ lreplace:;
 	list_free(recheckIndexes);
 
 	/*
-	 * Check any WITH CHECK OPTION constraints from parent views.  We
-	 * are required to do this after testing all constraints and
-	 * uniqueness violations per the SQL spec, so we do it after actually
-	 * updating the record in the heap and all indexes.
+	 * Check any WITH CHECK OPTION constraints from parent views.  We are
+	 * required to do this after testing all constraints and uniqueness
+	 * violations per the SQL spec, so we do it after actually updating the
+	 * record in the heap and all indexes.
 	 *
-	 * ExecWithCheckOptions() will skip any WCOs which are not of
-	 * the kind we are looking for at this point.
+	 * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
+	 * are looking for at this point.
 	 */
 	if (resultRelInfo->ri_WithCheckOptions != NIL)
 		ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
@@ -1143,9 +1142,9 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
 	/*
 	 * Make tuple and any needed join variables available to ExecQual and
 	 * ExecProject.  The EXCLUDED tuple is installed in ecxt_innertuple, while
-	 * the target's existing tuple is installed in the scantuple.  EXCLUDED has
-	 * been made to reference INNER_VAR in setrefs.c, but there is no other
-	 * redirection.
+	 * the target's existing tuple is installed in the scantuple.  EXCLUDED
+	 * has been made to reference INNER_VAR in setrefs.c, but there is no
+	 * other redirection.
 	 */
 	econtext->ecxt_scantuple = mtstate->mt_existing;
 	econtext->ecxt_innertuple = excludedSlot;
@@ -1430,7 +1429,7 @@ ExecModifyTable(ModifyTableState *node)
 		{
 			case CMD_INSERT:
 				slot = ExecInsert(node, slot, planSlot,
-								  node->mt_arbiterindexes, node->mt_onconflict,
+								node->mt_arbiterindexes, node->mt_onconflict,
 								  estate, node->canSetTag);
 				break;
 			case CMD_UPDATE:
diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c
index fc89d1dca03..4c1c5237b7d 100644
--- a/src/backend/executor/nodeSamplescan.c
+++ b/src/backend/executor/nodeSamplescan.c
@@ -27,7 +27,7 @@
 #include "utils/tqual.h"
 
 static void InitScanRelation(SampleScanState *node, EState *estate,
-							 int eflags, TableSampleClause *tablesample);
+				 int eflags, TableSampleClause *tablesample);
 static TupleTableSlot *SampleNext(SampleScanState *node);
 
 
@@ -45,9 +45,9 @@ static TupleTableSlot *SampleNext(SampleScanState *node);
 static TupleTableSlot *
 SampleNext(SampleScanState *node)
 {
-	TupleTableSlot	   *slot;
-	TableSampleDesc	   *tsdesc;
-	HeapTuple			tuple;
+	TupleTableSlot *slot;
+	TableSampleDesc *tsdesc;
+	HeapTuple	tuple;
 
 	/*
 	 * get information from the scan state
@@ -60,7 +60,8 @@ SampleNext(SampleScanState *node)
 	if (tuple)
 		ExecStoreTuple(tuple,	/* tuple to store */
 					   slot,	/* slot to store in */
-					   tsdesc->heapScan->rs_cbuf,	/* buffer associated with this tuple */
+					   tsdesc->heapScan->rs_cbuf,		/* buffer associated
+														 * with this tuple */
 					   false);	/* don't pfree this pointer */
 	else
 		ExecClearTuple(slot);
@@ -112,7 +113,7 @@ InitScanRelation(SampleScanState *node, EState *estate, int eflags,
 	 * open that relation and acquire appropriate lock on it.
 	 */
 	currentRelation = ExecOpenScanRelation(estate,
-										   ((SampleScan *) node->ss.ps.plan)->scanrelid,
+								((SampleScan *) node->ss.ps.plan)->scanrelid,
 										   eflags);
 
 	node->ss.ss_currentRelation = currentRelation;
diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c
index 732f3c38dbb..af1dccfb318 100644
--- a/src/backend/executor/nodeSort.c
+++ b/src/backend/executor/nodeSort.c
@@ -290,7 +290,7 @@ ExecSortRestrPos(SortState *node)
 void
 ExecReScanSort(SortState *node)
 {
-	PlanState	*outerPlan = outerPlanState(node);
+	PlanState  *outerPlan = outerPlanState(node);
 
 	/*
 	 * If we haven't sorted yet, just return. If outerplan's chgParam is not
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index bf0c98d8783..ecf96f8c193 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -2057,7 +2057,7 @@ ExecEndWindowAgg(WindowAggState *node)
 void
 ExecReScanWindowAgg(WindowAggState *node)
 {
-	PlanState	*outerPlan = outerPlanState(node);
+	PlanState  *outerPlan = outerPlanState(node);
 	ExprContext *econtext = node->ss.ps.ps_ExprContext;
 
 	node->all_done = false;
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 472de41f9b4..d544ad9c106 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -1344,11 +1344,11 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
 	}
 
 	/*
-	 * If told to be read-only, or in parallel mode, verify that this query
-	 * is in fact read-only.  This can't be done earlier because we need to
-	 * look at the finished, planned queries.  (In particular, we don't want
-	 * to do it between GetCachedPlan and PortalDefineQuery, because throwing
-	 * an error between those steps would result in leaking our plancache
+	 * If told to be read-only, or in parallel mode, verify that this query is
+	 * in fact read-only.  This can't be done earlier because we need to look
+	 * at the finished, planned queries.  (In particular, we don't want to do
+	 * it between GetCachedPlan and PortalDefineQuery, because throwing an
+	 * error between those steps would result in leaking our plancache
 	 * refcount.)
 	 */
 	if (read_only || IsInParallelMode())
@@ -1365,8 +1365,8 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
 					ereport(ERROR,
 							(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
 					/* translator: %s is a SQL statement name */
-							 errmsg("%s is not allowed in a non-volatile function",
-									CreateCommandTag(pstmt))));
+					   errmsg("%s is not allowed in a non-volatile function",
+							  CreateCommandTag(pstmt))));
 				else
 					PreventCommandIfParallelMode(CreateCommandTag(pstmt));
 			}
diff --git a/src/backend/lib/bipartite_match.c b/src/backend/lib/bipartite_match.c
index 1adba78ff34..037dd1de30b 100644
--- a/src/backend/lib/bipartite_match.c
+++ b/src/backend/lib/bipartite_match.c
@@ -51,14 +51,14 @@ BipartiteMatch(int u_size, int v_size, short **adjacency)
 
 	while (hk_breadth_search(state))
 	{
-		int		u;
+		int			u;
 
 		for (u = 1; u <= u_size; ++u)
 			if (state->pair_uv[u] == 0)
 				if (hk_depth_search(state, u, 1))
 					state->matching++;
 
-		CHECK_FOR_INTERRUPTS();		/* just in case */
+		CHECK_FOR_INTERRUPTS(); /* just in case */
 	}
 
 	return state;
@@ -108,18 +108,18 @@ hk_breadth_search(BipartiteMatchState *state)
 
 		if (distance[u] < distance[0])
 		{
-			short  *u_adj = state->adjacency[u];
-			int		i = u_adj ? u_adj[0] : 0;
+			short	   *u_adj = state->adjacency[u];
+			int			i = u_adj ? u_adj[0] : 0;
 
 			for (; i > 0; --i)
 			{
-				int	u_next = state->pair_vu[u_adj[i]];
+				int			u_next = state->pair_vu[u_adj[i]];
 
 				if (isinf(distance[u_next]))
 				{
 					distance[u_next] = 1 + distance[u];
 					queue[qhead++] = u_next;
-					Assert(qhead <= usize+2);
+					Assert(qhead <= usize + 2);
 				}
 			}
 		}
@@ -145,11 +145,11 @@ hk_depth_search(BipartiteMatchState *state, int u, int depth)
 
 	for (; i > 0; --i)
 	{
-		int		v = u_adj[i];
+		int			v = u_adj[i];
 
 		if (distance[pair_vu[v]] == distance[u] + 1)
 		{
-			if (hk_depth_search(state, pair_vu[v], depth+1))
+			if (hk_depth_search(state, pair_vu[v], depth + 1))
 			{
 				pair_vu[v] = u;
 				pair_uv[u] = v;
diff --git a/src/backend/lib/hyperloglog.c b/src/backend/lib/hyperloglog.c
index 4b37048c37b..718afb84e08 100644
--- a/src/backend/lib/hyperloglog.c
+++ b/src/backend/lib/hyperloglog.c
@@ -153,7 +153,7 @@ estimateHyperLogLog(hyperLogLogState *cState)
 	if (result <= (5.0 / 2.0) * cState->nRegisters)
 	{
 		/* Small range correction */
-		int 	zero_count = 0;
+		int			zero_count = 0;
 
 		for (i = 0; i < cState->nRegisters; i++)
 		{
@@ -183,7 +183,7 @@ estimateHyperLogLog(hyperLogLogState *cState)
 void
 mergeHyperLogLog(hyperLogLogState *cState, const hyperLogLogState *oState)
 {
-	int		r;
+	int			r;
 
 	if (cState->nRegisters != oState->nRegisters)
 		elog(ERROR, "number of registers mismatch: %zu != %zu",
@@ -216,7 +216,7 @@ mergeHyperLogLog(hyperLogLogState *cState, const hyperLogLogState *oState)
 static inline uint8
 rho(uint32 x, uint8 b)
 {
-	uint8	j = 1;
+	uint8		j = 1;
 
 	while (j <= b && !(x & 0x80000000))
 	{
diff --git a/src/backend/lib/pairingheap.c b/src/backend/lib/pairingheap.c
index 17278fde6ea..3d8a5ea5618 100644
--- a/src/backend/lib/pairingheap.c
+++ b/src/backend/lib/pairingheap.c
@@ -295,7 +295,7 @@ merge_children(pairingheap *heap, pairingheap_node *children)
 static void
 pairingheap_dump_recurse(StringInfo buf,
 						 pairingheap_node *node,
-						 void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
+	 void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
 						 void *opaque,
 						 int depth,
 						 pairingheap_node *prev_or_parent)
@@ -316,7 +316,7 @@ pairingheap_dump_recurse(StringInfo buf,
 
 char *
 pairingheap_dump(pairingheap *heap,
-				 void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
+	 void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
 				 void *opaque)
 {
 	StringInfoData buf;
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 40f30229c05..4699efacd05 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -371,7 +371,7 @@ ClientAuthentication(Port *port)
 					   (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
 						errmsg("pg_hba.conf rejects replication connection for host \"%s\", user \"%s\", %s",
 							   hostinfo, port->user_name,
-							   port->ssl_in_use ? _("SSL on") : _("SSL off"))));
+							port->ssl_in_use ? _("SSL on") : _("SSL off"))));
 #else
 					ereport(FATAL,
 					   (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
@@ -387,7 +387,7 @@ ClientAuthentication(Port *port)
 						errmsg("pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\", %s",
 							   hostinfo, port->user_name,
 							   port->database_name,
-							   port->ssl_in_use ? _("SSL on") : _("SSL off"))));
+							port->ssl_in_use ? _("SSL on") : _("SSL off"))));
 #else
 					ereport(FATAL,
 					   (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index 2646555f141..f0774fe8c95 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -77,10 +77,10 @@
 #include "utils/memutils.h"
 
 
-static int my_sock_read(BIO *h, char *buf, int size);
-static int my_sock_write(BIO *h, const char *buf, int size);
+static int	my_sock_read(BIO *h, char *buf, int size);
+static int	my_sock_write(BIO *h, const char *buf, int size);
 static BIO_METHOD *my_BIO_s_socket(void);
-static int my_SSL_set_fd(Port *port, int fd);
+static int	my_SSL_set_fd(Port *port, int fd);
 
 static DH  *load_dh_file(int keylength);
 static DH  *load_dh_buffer(const char *, size_t);
@@ -571,10 +571,9 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
 	int			err;
 
 	/*
-	 * If SSL renegotiations are enabled and we're getting close to the
-	 * limit, start one now; but avoid it if there's one already in
-	 * progress.  Request the renegotiation 1kB before the limit has
-	 * actually expired.
+	 * If SSL renegotiations are enabled and we're getting close to the limit,
+	 * start one now; but avoid it if there's one already in progress.
+	 * Request the renegotiation 1kB before the limit has actually expired.
 	 */
 	if (ssl_renegotiation_limit && !in_ssl_renegotiation &&
 		port->count > (ssl_renegotiation_limit - 1) * 1024L)
@@ -583,12 +582,12 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
 
 		/*
 		 * The way we determine that a renegotiation has completed is by
-		 * observing OpenSSL's internal renegotiation counter.  Make sure
-		 * we start out at zero, and assume that the renegotiation is
-		 * complete when the counter advances.
+		 * observing OpenSSL's internal renegotiation counter.  Make sure we
+		 * start out at zero, and assume that the renegotiation is complete
+		 * when the counter advances.
 		 *
-		 * OpenSSL provides SSL_renegotiation_pending(), but this doesn't
-		 * seem to work in testing.
+		 * OpenSSL provides SSL_renegotiation_pending(), but this doesn't seem
+		 * to work in testing.
 		 */
 		SSL_clear_num_renegotiations(port->ssl);
 
@@ -658,9 +657,9 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor)
 		}
 
 		/*
-		 * if renegotiation is still ongoing, and we've gone beyond the
-		 * limit, kill the connection now -- continuing to use it can be
-		 * considered a security problem.
+		 * if renegotiation is still ongoing, and we've gone beyond the limit,
+		 * kill the connection now -- continuing to use it can be considered a
+		 * security problem.
 		 */
 		if (in_ssl_renegotiation &&
 			port->count > ssl_renegotiation_limit * 1024L)
@@ -700,7 +699,7 @@ my_sock_read(BIO *h, char *buf, int size)
 
 	if (buf != NULL)
 	{
-		res = secure_raw_read(((Port *)h->ptr), buf, size);
+		res = secure_raw_read(((Port *) h->ptr), buf, size);
 		BIO_clear_retry_flags(h);
 		if (res <= 0)
 		{
@@ -1044,7 +1043,7 @@ SSLerrmessage(void)
 int
 be_tls_get_cipher_bits(Port *port)
 {
-	int bits;
+	int			bits;
 
 	if (port->ssl)
 	{
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index 4e7acbe0804..4a650cc0012 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -51,7 +51,7 @@ char	   *ssl_crl_file;
 int			ssl_renegotiation_limit;
 
 #ifdef USE_SSL
-bool ssl_loaded_verify_locations = false;
+bool		ssl_loaded_verify_locations = false;
 #endif
 
 /* GUC variable controlling SSL cipher list */
@@ -146,7 +146,7 @@ retry:
 	/* In blocking mode, wait until the socket is ready */
 	if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN))
 	{
-		int		w;
+		int			w;
 
 		Assert(waitfor);
 
@@ -162,8 +162,8 @@ retry:
 
 			/*
 			 * We'll retry the read. Most likely it will return immediately
-			 * because there's still no data available, and we'll wait
-			 * for the socket to become ready again.
+			 * because there's still no data available, and we'll wait for the
+			 * socket to become ready again.
 			 */
 		}
 		goto retry;
@@ -225,7 +225,7 @@ retry:
 
 	if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN))
 	{
-		int		w;
+		int			w;
 
 		Assert(waitfor);
 
@@ -241,8 +241,8 @@ retry:
 
 			/*
 			 * We'll retry the write. Most likely it will return immediately
-			 * because there's still no data available, and we'll wait
-			 * for the socket to become ready again.
+			 * because there's still no data available, and we'll wait for the
+			 * socket to become ready again.
 			 */
 		}
 		goto retry;
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index c23938580b9..7a935f34b58 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -1382,8 +1382,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
 	 * situations and is generally considered bad practice.  We keep the
 	 * capability around for backwards compatibility, but we might want to
 	 * remove it at some point in the future.  Users who still need to strip
-	 * the realm off would be better served by using an appropriate regex in
-	 * a pg_ident.conf mapping.
+	 * the realm off would be better served by using an appropriate regex in a
+	 * pg_ident.conf mapping.
 	 */
 	if (hbaline->auth_method == uaGSS ||
 		hbaline->auth_method == uaSSPI)
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 6667cf94c64..a4b37ed5a26 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -1125,7 +1125,7 @@ pq_getstring(StringInfo s)
 
 
 /* --------------------------------
- *		pq_startmsgread	- begin reading a message from the client.
+ *		pq_startmsgread - begin reading a message from the client.
  *
  *		This must be called before any of the pq_get* functions.
  * --------------------------------
@@ -1140,7 +1140,7 @@ pq_startmsgread(void)
 	if (PqCommReadingMsg)
 		ereport(FATAL,
 				(errcode(ERRCODE_PROTOCOL_VIOLATION),
-				 errmsg("terminating connection because protocol sync was lost")));
+		   errmsg("terminating connection because protocol sync was lost")));
 
 	PqCommReadingMsg = true;
 }
diff --git a/src/backend/libpq/pqmq.c b/src/backend/libpq/pqmq.c
index f12f2d582e8..9ca6b7ce0d1 100644
--- a/src/backend/libpq/pqmq.c
+++ b/src/backend/libpq/pqmq.c
@@ -107,17 +107,16 @@ mq_is_send_pending(void)
 static int
 mq_putmessage(char msgtype, const char *s, size_t len)
 {
-	shm_mq_iovec	iov[2];
-	shm_mq_result	result;
+	shm_mq_iovec iov[2];
+	shm_mq_result result;
 
 	/*
-	 * If we're sending a message, and we have to wait because the
-	 * queue is full, and then we get interrupted, and that interrupt
-	 * results in trying to send another message, we respond by detaching
-	 * the queue.  There's no way to return to the original context, but
-	 * even if there were, just queueing the message would amount to
-	 * indefinitely postponing the response to the interrupt.  So we do
-	 * this instead.
+	 * If we're sending a message, and we have to wait because the queue is
+	 * full, and then we get interrupted, and that interrupt results in trying
+	 * to send another message, we respond by detaching the queue.  There's no
+	 * way to return to the original context, but even if there were, just
+	 * queueing the message would amount to indefinitely postponing the
+	 * response to the interrupt.  So we do this instead.
 	 */
 	if (pq_mq_busy)
 	{
@@ -166,10 +165,10 @@ mq_putmessage_noblock(char msgtype, const char *s, size_t len)
 {
 	/*
 	 * While the shm_mq machinery does support sending a message in
-	 * non-blocking mode, there's currently no way to try sending beginning
-	 * to send the message that doesn't also commit us to completing the
-	 * transmission.  This could be improved in the future, but for now
-	 * we don't need it.
+	 * non-blocking mode, there's currently no way to try sending beginning to
+	 * send the message that doesn't also commit us to completing the
+	 * transmission.  This could be improved in the future, but for now we
+	 * don't need it.
 	 */
 	elog(ERROR, "not currently supported");
 }
@@ -201,7 +200,7 @@ pq_parse_errornotice(StringInfo msg, ErrorData *edata)
 	/* Loop over fields and extract each one. */
 	for (;;)
 	{
-		char	code = pq_getmsgbyte(msg);
+		char		code = pq_getmsgbyte(msg);
 		const char *value;
 
 		if (code == '\0')
@@ -215,9 +214,9 @@ pq_parse_errornotice(StringInfo msg, ErrorData *edata)
 		{
 			case PG_DIAG_SEVERITY:
 				if (strcmp(value, "DEBUG") == 0)
-					edata->elevel = DEBUG1;	/* or some other DEBUG level */
+					edata->elevel = DEBUG1;		/* or some other DEBUG level */
 				else if (strcmp(value, "LOG") == 0)
-					edata->elevel = LOG;	/* can't be COMMERROR */
+					edata->elevel = LOG;		/* can't be COMMERROR */
 				else if (strcmp(value, "INFO") == 0)
 					edata->elevel = INFO;
 				else if (strcmp(value, "NOTICE") == 0)
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index cab93725e67..4c363d3d39a 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -1216,7 +1216,7 @@ _copyAggref(const Aggref *from)
 static GroupingFunc *
 _copyGroupingFunc(const GroupingFunc *from)
 {
-	GroupingFunc	   *newnode = makeNode(GroupingFunc);
+	GroupingFunc *newnode = makeNode(GroupingFunc);
 
 	COPY_NODE_FIELD(args);
 	COPY_NODE_FIELD(refs);
@@ -1915,7 +1915,7 @@ _copyFromExpr(const FromExpr *from)
 static OnConflictExpr *
 _copyOnConflictExpr(const OnConflictExpr *from)
 {
-	OnConflictExpr   *newnode = makeNode(OnConflictExpr);
+	OnConflictExpr *newnode = makeNode(OnConflictExpr);
 
 	COPY_SCALAR_FIELD(action);
 	COPY_NODE_FIELD(arbiterElems);
@@ -2173,7 +2173,7 @@ _copySortGroupClause(const SortGroupClause *from)
 static GroupingSet *
 _copyGroupingSet(const GroupingSet *from)
 {
-	GroupingSet		   *newnode = makeNode(GroupingSet);
+	GroupingSet *newnode = makeNode(GroupingSet);
 
 	COPY_SCALAR_FIELD(kind);
 	COPY_NODE_FIELD(content);
diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c
index a9b58eb31fc..4be89f63ae0 100644
--- a/src/backend/nodes/makefuncs.c
+++ b/src/backend/nodes/makefuncs.c
@@ -562,7 +562,7 @@ makeFuncCall(List *name, List *args, int location)
 GroupingSet *
 makeGroupingSet(GroupingSetKind kind, List *content, int location)
 {
-	GroupingSet	   *n = makeNode(GroupingSet);
+	GroupingSet *n = makeNode(GroupingSet);
 
 	n->kind = kind;
 	n->content = content;
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 41763931339..a2bcca5b75a 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -1936,7 +1936,7 @@ expression_tree_walker(Node *node,
 			break;
 		case T_OnConflictExpr:
 			{
-				OnConflictExpr   *onconflict = (OnConflictExpr *) node;
+				OnConflictExpr *onconflict = (OnConflictExpr *) node;
 
 				if (walker((Node *) onconflict->arbiterElems, context))
 					return true;
@@ -2269,8 +2269,8 @@ expression_tree_mutator(Node *node,
 			break;
 		case T_GroupingFunc:
 			{
-				GroupingFunc   *grouping = (GroupingFunc *) node;
-				GroupingFunc   *newnode;
+				GroupingFunc *grouping = (GroupingFunc *) node;
+				GroupingFunc *newnode;
 
 				FLATCOPY(newnode, grouping, GroupingFunc);
 				MUTATE(newnode->args, grouping->args, List *);
@@ -2691,8 +2691,8 @@ expression_tree_mutator(Node *node,
 			break;
 		case T_OnConflictExpr:
 			{
-				OnConflictExpr   *oc = (OnConflictExpr *) node;
-				OnConflictExpr   *newnode;
+				OnConflictExpr *oc = (OnConflictExpr *) node;
+				OnConflictExpr *newnode;
 
 				FLATCOPY(newnode, oc, OnConflictExpr);
 				MUTATE(newnode->arbiterElems, oc->arbiterElems, List *);
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 1fd8763c966..4e6d90d8d8f 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -72,9 +72,9 @@ static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel,
 static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
 					   RangeTblEntry *rte);
 static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel,
-				   RangeTblEntry *rte);
+						 RangeTblEntry *rte);
 static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
-										 RangeTblEntry *rte);
+							 RangeTblEntry *rte);
 static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel,
 				 RangeTblEntry *rte);
 static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
@@ -451,8 +451,8 @@ set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
 static void
 set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
 {
-	Relids	required_outer;
-	Path   *path;
+	Relids		required_outer;
+	Path	   *path;
 
 	/*
 	 * We don't support pushing join clauses into the quals of a seqscan, but
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index c2b2b7622a6..ac865be6379 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -242,8 +242,8 @@ cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
 	Cost		cpu_per_tuple;
 	BlockNumber pages;
 	double		tuples;
-	RangeTblEntry		   *rte = planner_rt_fetch(baserel->relid, root);
-	TableSampleClause	   *tablesample = rte->tablesample;
+	RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
+	TableSampleClause *tablesample = rte->tablesample;
 
 	/* Should only be applied to base relations */
 	Assert(baserel->relid > 0);
@@ -268,7 +268,7 @@ cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel)
 
 
 	spc_page_cost = tablesample->tsmseqscan ? spc_seq_page_cost :
-					spc_random_page_cost;
+		spc_random_page_cost;
 
 	/*
 	 * disk costs
diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c
index a6c17534f0a..470db878175 100644
--- a/src/backend/optimizer/plan/analyzejoins.c
+++ b/src/backend/optimizer/plan/analyzejoins.c
@@ -672,20 +672,20 @@ query_is_distinct_for(Query *query, List *colnos, List *opids)
 	else if (query->groupingSets)
 	{
 		/*
-		 * If we have grouping sets with expressions, we probably
-		 * don't have uniqueness and analysis would be hard. Punt.
+		 * If we have grouping sets with expressions, we probably don't have
+		 * uniqueness and analysis would be hard. Punt.
 		 */
 		if (query->groupClause)
 			return false;
 
 		/*
-		 * If we have no groupClause (therefore no grouping expressions),
-		 * we might have one or many empty grouping sets. If there's just
-		 * one, then we're returning only one row and are certainly unique.
-		 * But otherwise, we know we're certainly not unique.
+		 * If we have no groupClause (therefore no grouping expressions), we
+		 * might have one or many empty grouping sets. If there's just one,
+		 * then we're returning only one row and are certainly unique. But
+		 * otherwise, we know we're certainly not unique.
 		 */
 		if (list_length(query->groupingSets) == 1 &&
-			((GroupingSet *)linitial(query->groupingSets))->kind == GROUPING_SET_EMPTY)
+			((GroupingSet *) linitial(query->groupingSets))->kind == GROUPING_SET_EMPTY)
 			return true;
 		else
 			return false;
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index b47ef466dc1..a3482def643 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -59,7 +59,7 @@ static Plan *create_unique_plan(PlannerInfo *root, UniquePath *best_path);
 static SeqScan *create_seqscan_plan(PlannerInfo *root, Path *best_path,
 					List *tlist, List *scan_clauses);
 static SampleScan *create_samplescan_plan(PlannerInfo *root, Path *best_path,
-					List *tlist, List *scan_clauses);
+					   List *tlist, List *scan_clauses);
 static Scan *create_indexscan_plan(PlannerInfo *root, IndexPath *best_path,
 					  List *tlist, List *scan_clauses, bool indexonly);
 static BitmapHeapScan *create_bitmap_scan_plan(PlannerInfo *root,
@@ -1153,7 +1153,7 @@ create_seqscan_plan(PlannerInfo *root, Path *best_path,
  */
 static SampleScan *
 create_samplescan_plan(PlannerInfo *root, Path *best_path,
-					List *tlist, List *scan_clauses)
+					   List *tlist, List *scan_clauses)
 {
 	SampleScan *scan_plan;
 	Index		scan_relid = best_path->parent->relid;
@@ -1340,7 +1340,7 @@ create_indexscan_plan(PlannerInfo *root,
 		Assert(list_length(best_path->path.pathkeys) == list_length(indexorderbys));
 		forboth(pathkeyCell, best_path->path.pathkeys, exprCell, indexorderbys)
 		{
-			PathKey	   *pathkey = (PathKey *) lfirst(pathkeyCell);
+			PathKey    *pathkey = (PathKey *) lfirst(pathkeyCell);
 			Node	   *expr = (Node *) lfirst(exprCell);
 			Oid			exprtype = exprType(expr);
 			Oid			sortop;
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 60340e39eda..920c2b77fff 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -64,7 +64,7 @@ planner_hook_type planner_hook = NULL;
 #define EXPRKIND_LIMIT			6
 #define EXPRKIND_APPINFO		7
 #define EXPRKIND_PHV			8
-#define EXPRKIND_TABLESAMPLE    9
+#define EXPRKIND_TABLESAMPLE	9
 
 /* Passthrough data for standard_qp_callback */
 typedef struct
@@ -123,15 +123,15 @@ static void get_column_info_for_window(PlannerInfo *root, WindowClause *wc,
 						   AttrNumber **ordColIdx,
 						   Oid **ordOperators);
 static Plan *build_grouping_chain(PlannerInfo *root,
-						  Query	   *parse,
-						  List	   *tlist,
-						  bool		need_sort_for_grouping,
-						  List	   *rollup_groupclauses,
-						  List	   *rollup_lists,
-						  AttrNumber *groupColIdx,
-						  AggClauseCosts *agg_costs,
-						  long		numGroups,
-						  Plan	   *result_plan);
+					 Query *parse,
+					 List *tlist,
+					 bool need_sort_for_grouping,
+					 List *rollup_groupclauses,
+					 List *rollup_lists,
+					 AttrNumber *groupColIdx,
+					 AggClauseCosts *agg_costs,
+					 long numGroups,
+					 Plan *result_plan);
 
 /*****************************************************************************
  *
@@ -865,13 +865,14 @@ inheritance_planner(PlannerInfo *root)
 	 *
 	 * Note that any RTEs with security barrier quals will be turned into
 	 * subqueries during planning, and so we must create copies of them too,
-	 * except where they are target relations, which will each only be used
-	 * in a single plan.
+	 * except where they are target relations, which will each only be used in
+	 * a single plan.
 	 */
 	resultRTindexes = bms_add_member(resultRTindexes, parentRTindex);
 	foreach(lc, root->append_rel_list)
 	{
 		AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(lc);
+
 		if (appinfo->parent_relid == parentRTindex)
 			resultRTindexes = bms_add_member(resultRTindexes,
 											 appinfo->child_relid);
@@ -1299,6 +1300,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 			foreach(lc, parse->groupClause)
 			{
 				SortGroupClause *gc = lfirst(lc);
+
 				if (gc->tleSortGroupRef > maxref)
 					maxref = gc->tleSortGroupRef;
 			}
@@ -1315,12 +1317,12 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 
 			foreach(lc_set, sets)
 			{
-				List   *current_sets = reorder_grouping_sets(lfirst(lc_set),
-													(list_length(sets) == 1
-													 ? parse->sortClause
-													 : NIL));
-				List   *groupclause = preprocess_groupclause(root, linitial(current_sets));
-				int		ref = 0;
+				List	   *current_sets = reorder_grouping_sets(lfirst(lc_set),
+													  (list_length(sets) == 1
+													   ? parse->sortClause
+													   : NIL));
+				List	   *groupclause = preprocess_groupclause(root, linitial(current_sets));
+				int			ref = 0;
 
 				/*
 				 * Now that we've pinned down an order for the groupClause for
@@ -1333,6 +1335,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 				foreach(lc, groupclause)
 				{
 					SortGroupClause *gc = lfirst(lc);
+
 					tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
 				}
 
@@ -1496,7 +1499,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 
 					foreach(lc3, lfirst(lc2))
 					{
-						List   *gset = lfirst(lc3);
+						List	   *gset = lfirst(lc3);
 
 						dNumGroups += estimate_num_groups(root,
 														  groupExprs,
@@ -1736,7 +1739,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 
 			/* Detect if we'll need an explicit sort for grouping */
 			if (parse->groupClause && !use_hashed_grouping &&
-				!pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
+			  !pathkeys_contained_in(root->group_pathkeys, current_pathkeys))
 			{
 				need_sort_for_grouping = true;
 
@@ -1810,6 +1813,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 				foreach(lc, parse->groupClause)
 				{
 					SortGroupClause *gc = lfirst(lc);
+
 					grouping_map[gc->tleSortGroupRef] = groupColIdx[i++];
 				}
 
@@ -1832,7 +1836,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 												&agg_costs,
 												numGroupCols,
 												groupColIdx,
-												extract_grouping_ops(parse->groupClause),
+									extract_grouping_ops(parse->groupClause),
 												NIL,
 												numGroups,
 												result_plan);
@@ -1842,9 +1846,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 			else if (parse->hasAggs || (parse->groupingSets && parse->groupClause))
 			{
 				/*
-				 * Output is in sorted order by group_pathkeys if, and only if,
-				 * there is a single rollup operation on a non-empty list of
-				 * grouping expressions.
+				 * Output is in sorted order by group_pathkeys if, and only
+				 * if, there is a single rollup operation on a non-empty list
+				 * of grouping expressions.
 				 */
 				if (list_length(rollup_groupclauses) == 1
 					&& list_length(linitial(rollup_groupclauses)) > 0)
@@ -1864,8 +1868,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 												   result_plan);
 
 				/*
-				 * these are destroyed by build_grouping_chain, so make sure we
-				 * don't try and touch them again
+				 * these are destroyed by build_grouping_chain, so make sure
+				 * we don't try and touch them again
 				 */
 				rollup_groupclauses = NIL;
 				rollup_lists = NIL;
@@ -1901,23 +1905,23 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 			}
 			else if (root->hasHavingQual || parse->groupingSets)
 			{
-				int		nrows = list_length(parse->groupingSets);
+				int			nrows = list_length(parse->groupingSets);
 
 				/*
-				 * No aggregates, and no GROUP BY, but we have a HAVING qual or
-				 * grouping sets (which by elimination of cases above must
+				 * No aggregates, and no GROUP BY, but we have a HAVING qual
+				 * or grouping sets (which by elimination of cases above must
 				 * consist solely of empty grouping sets, since otherwise
 				 * groupClause will be non-empty).
 				 *
 				 * This is a degenerate case in which we are supposed to emit
-				 * either 0 or 1 row for each grouping set depending on whether
-				 * HAVING succeeds.  Furthermore, there cannot be any variables
-				 * in either HAVING or the targetlist, so we actually do not
-				 * need the FROM table at all!  We can just throw away the
-				 * plan-so-far and generate a Result node.  This is a
-				 * sufficiently unusual corner case that it's not worth
-				 * contorting the structure of this routine to avoid having to
-				 * generate the plan in the first place.
+				 * either 0 or 1 row for each grouping set depending on
+				 * whether HAVING succeeds.  Furthermore, there cannot be any
+				 * variables in either HAVING or the targetlist, so we
+				 * actually do not need the FROM table at all!	We can just
+				 * throw away the plan-so-far and generate a Result node.
+				 * This is a sufficiently unusual corner case that it's not
+				 * worth contorting the structure of this routine to avoid
+				 * having to generate the plan in the first place.
 				 */
 				result_plan = (Plan *) make_result(root,
 												   tlist,
@@ -1931,7 +1935,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
 				 */
 				if (nrows > 1)
 				{
-					List   *plans = list_make1(result_plan);
+					List	   *plans = list_make1(result_plan);
 
 					while (--nrows > 0)
 						plans = lappend(plans, copyObject(result_plan));
@@ -2279,6 +2283,7 @@ remap_groupColIdx(PlannerInfo *root, List *groupClause)
 	foreach(lc, groupClause)
 	{
 		SortGroupClause *clause = lfirst(lc);
+
 		new_grpColIdx[i++] = grouping_map[clause->tleSortGroupRef];
 	}
 
@@ -2304,15 +2309,15 @@ remap_groupColIdx(PlannerInfo *root, List *groupClause)
  */
 static Plan *
 build_grouping_chain(PlannerInfo *root,
-					 Query	   *parse,
-					 List	   *tlist,
-					 bool		need_sort_for_grouping,
-					 List	   *rollup_groupclauses,
-					 List	   *rollup_lists,
+					 Query *parse,
+					 List *tlist,
+					 bool need_sort_for_grouping,
+					 List *rollup_groupclauses,
+					 List *rollup_lists,
 					 AttrNumber *groupColIdx,
 					 AggClauseCosts *agg_costs,
-					 long		numGroups,
-					 Plan	   *result_plan)
+					 long numGroups,
+					 Plan *result_plan)
 {
 	AttrNumber *top_grpColIdx = groupColIdx;
 	List	   *chain = NIL;
@@ -2366,8 +2371,8 @@ build_grouping_chain(PlannerInfo *root,
 
 		/*
 		 * sort_plan includes the cost of result_plan over again, which is not
-		 * what we want (since it's not actually running that plan). So correct
-		 * the cost figures.
+		 * what we want (since it's not actually running that plan). So
+		 * correct the cost figures.
 		 */
 
 		sort_plan->startup_cost -= result_plan->total_cost;
@@ -2412,7 +2417,7 @@ build_grouping_chain(PlannerInfo *root,
 		result_plan = (Plan *) make_agg(root,
 										tlist,
 										(List *) parse->havingQual,
-										(numGroupCols > 0) ? AGG_SORTED : AGG_PLAIN,
+								 (numGroupCols > 0) ? AGG_SORTED : AGG_PLAIN,
 										agg_costs,
 										numGroupCols,
 										top_grpColIdx,
@@ -2429,7 +2434,7 @@ build_grouping_chain(PlannerInfo *root,
 		 */
 		foreach(lc, chain)
 		{
-			Plan   *subplan = lfirst(lc);
+			Plan	   *subplan = lfirst(lc);
 
 			result_plan->total_cost += subplan->total_cost;
 
@@ -2716,6 +2721,7 @@ select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
 		switch (strength)
 		{
 			case LCS_NONE:
+
 				/*
 				 * We don't need a tuple lock, only the ability to re-fetch
 				 * the row.  Regular tables support ROW_MARK_REFERENCE, but if
@@ -3026,7 +3032,7 @@ preprocess_groupclause(PlannerInfo *root, List *force)
 	{
 		foreach(sl, force)
 		{
-			Index ref = lfirst_int(sl);
+			Index		ref = lfirst_int(sl);
 			SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
 
 			new_groupclause = lappend(new_groupclause, cl);
@@ -3120,7 +3126,7 @@ extract_rollup_sets(List *groupingSets)
 {
 	int			num_sets_raw = list_length(groupingSets);
 	int			num_empty = 0;
-	int			num_sets = 0;		/* distinct sets */
+	int			num_sets = 0;	/* distinct sets */
 	int			num_chains = 0;
 	List	   *result = NIL;
 	List	  **results;
@@ -3152,23 +3158,23 @@ extract_rollup_sets(List *groupingSets)
 		return list_make1(groupingSets);
 
 	/*
-	 * We don't strictly need to remove duplicate sets here, but if we
-	 * don't, they tend to become scattered through the result, which is
-	 * a bit confusing (and irritating if we ever decide to optimize them
-	 * out). So we remove them here and add them back after.
+	 * We don't strictly need to remove duplicate sets here, but if we don't,
+	 * they tend to become scattered through the result, which is a bit
+	 * confusing (and irritating if we ever decide to optimize them out). So
+	 * we remove them here and add them back after.
 	 *
 	 * For each non-duplicate set, we fill in the following:
 	 *
-	 * orig_sets[i] = list of the original set lists
-	 * set_masks[i] = bitmapset for testing inclusion
-	 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
+	 * orig_sets[i] = list of the original set lists set_masks[i] = bitmapset
+	 * for testing inclusion adjacency[i] = array [n, v1, v2, ... vn] of
+	 * adjacency indices
 	 *
 	 * chains[i] will be the result group this set is assigned to.
 	 *
-	 * We index all of these from 1 rather than 0 because it is convenient
-	 * to leave 0 free for the NIL node in the graph algorithm.
+	 * We index all of these from 1 rather than 0 because it is convenient to
+	 * leave 0 free for the NIL node in the graph algorithm.
 	 */
-	orig_sets = palloc0((num_sets_raw + 1) * sizeof(List*));
+	orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
 	set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
 	adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
 	adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
@@ -3192,7 +3198,8 @@ extract_rollup_sets(List *groupingSets)
 		/* we can only be a dup if we're the same length as a previous set */
 		if (j_size == list_length(candidate))
 		{
-			int		k;
+			int			k;
+
 			for (k = j; k < i; ++k)
 			{
 				if (bms_equal(set_masks[k], candidate_set))
@@ -3215,8 +3222,8 @@ extract_rollup_sets(List *groupingSets)
 		}
 		else
 		{
-			int		k;
-			int		n_adj = 0;
+			int			k;
+			int			n_adj = 0;
 
 			orig_sets[i] = list_make1(candidate);
 			set_masks[i] = candidate_set;
@@ -3259,8 +3266,8 @@ extract_rollup_sets(List *groupingSets)
 
 	for (i = 1; i <= num_sets; ++i)
 	{
-		int u = state->pair_vu[i];
-		int v = state->pair_uv[i];
+		int			u = state->pair_vu[i];
+		int			v = state->pair_uv[i];
 
 		if (u > 0 && u < i)
 			chains[i] = chains[u];
@@ -3271,11 +3278,11 @@ extract_rollup_sets(List *groupingSets)
 	}
 
 	/* build result lists. */
-	results = palloc0((num_chains + 1) * sizeof(List*));
+	results = palloc0((num_chains + 1) * sizeof(List *));
 
 	for (i = 1; i <= num_sets; ++i)
 	{
-		int c = chains[i];
+		int			c = chains[i];
 
 		Assert(c > 0);
 
@@ -3334,15 +3341,16 @@ reorder_grouping_sets(List *groupingsets, List *sortclause)
 
 	foreach(lc, groupingsets)
 	{
-		List   *candidate = lfirst(lc);
-		List   *new_elems = list_difference_int(candidate, previous);
+		List	   *candidate = lfirst(lc);
+		List	   *new_elems = list_difference_int(candidate, previous);
 
 		if (list_length(new_elems) > 0)
 		{
 			while (list_length(sortclause) > list_length(previous))
 			{
 				SortGroupClause *sc = list_nth(sortclause, list_length(previous));
-				int ref = sc->tleSortGroupRef;
+				int			ref = sc->tleSortGroupRef;
+
 				if (list_member_int(new_elems, ref))
 				{
 					previous = lappend_int(previous, ref);
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 90e13e49889..a7f65dd529f 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -452,7 +452,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
 			break;
 		case T_SampleScan:
 			{
-				SampleScan	   *splan = (SampleScan *) plan;
+				SampleScan *splan = (SampleScan *) plan;
 
 				splan->scanrelid += rtoffset;
 				splan->plan.targetlist =
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 86585c58ee7..d40083d396e 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -1475,8 +1475,8 @@ contain_leaked_vars_walker(Node *node, void *context)
 				ListCell   *rarg;
 
 				/*
-				 * Check the comparison function and arguments passed to it for
-				 * each pair of row elements.
+				 * Check the comparison function and arguments passed to it
+				 * for each pair of row elements.
 				 */
 				forthree(opid, rcexpr->opnos,
 						 larg, rcexpr->largs,
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 3fe27126086..7f7aa24bb83 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -712,7 +712,7 @@ create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 Path *
 create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
 {
-	Path		   *pathnode = makeNode(Path);
+	Path	   *pathnode = makeNode(Path);
 
 	pathnode->pathtype = T_SampleScan;
 	pathnode->parent = rel;
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index a857ba35264..b04dc2ed49e 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -51,8 +51,8 @@ int			constraint_exclusion = CONSTRAINT_EXCLUSION_PARTITION;
 get_relation_info_hook_type get_relation_info_hook = NULL;
 
 
-static bool infer_collation_opclass_match(InferenceElem *elem, Relation	idxRel,
-						Bitmapset *inferAttrs, List *idxExprs);
+static bool infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
+							  Bitmapset *inferAttrs, List *idxExprs);
 static int32 get_rel_data_width(Relation rel, int32 *attr_widths);
 static List *get_relation_constraints(PlannerInfo *root,
 						 Oid relationObjectId, RelOptInfo *rel,
@@ -427,6 +427,7 @@ List *
 infer_arbiter_indexes(PlannerInfo *root)
 {
 	OnConflictExpr *onconflict = root->parse->onConflict;
+
 	/* Iteration state */
 	Relation	relation;
 	Oid			relationObjectId;
@@ -468,9 +469,9 @@ infer_arbiter_indexes(PlannerInfo *root)
 	 */
 	foreach(l, onconflict->arbiterElems)
 	{
-		InferenceElem  *elem;
-		Var			   *var;
-		int				attno;
+		InferenceElem *elem;
+		Var		   *var;
+		int			attno;
 
 		elem = (InferenceElem *) lfirst(l);
 
@@ -548,8 +549,8 @@ infer_arbiter_indexes(PlannerInfo *root)
 			goto next;
 
 		/*
-		 * Note that we do not perform a check against indcheckxmin (like
-		 * e.g. get_relation_info()) here to eliminate candidates, because
+		 * Note that we do not perform a check against indcheckxmin (like e.g.
+		 * get_relation_info()) here to eliminate candidates, because
 		 * uniqueness checking only cares about the most recently committed
 		 * tuple versions.
 		 */
@@ -605,7 +606,7 @@ infer_arbiter_indexes(PlannerInfo *root)
 		idxExprs = RelationGetIndexExpressions(idxRel);
 		foreach(el, onconflict->arbiterElems)
 		{
-			InferenceElem   *elem = (InferenceElem *) lfirst(el);
+			InferenceElem *elem = (InferenceElem *) lfirst(el);
 
 			/*
 			 * Ensure that collation/opclass aspects of inference expression
@@ -710,7 +711,7 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
 {
 	AttrNumber	natt;
 	Oid			inferopfamily = InvalidOid;		/* OID of att opfamily */
-	Oid			inferopcinputtype = InvalidOid;		/* OID of att opfamily */
+	Oid			inferopcinputtype = InvalidOid; /* OID of att opfamily */
 
 	/*
 	 * If inference specification element lacks collation/opclass, then no
@@ -730,9 +731,9 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
 
 	for (natt = 1; natt <= idxRel->rd_att->natts; natt++)
 	{
-		Oid		opfamily = idxRel->rd_opfamily[natt - 1];
-		Oid		opcinputtype = idxRel->rd_opcintype[natt - 1];
-		Oid		collation = idxRel->rd_indcollation[natt - 1];
+		Oid			opfamily = idxRel->rd_opfamily[natt - 1];
+		Oid			opcinputtype = idxRel->rd_opcintype[natt - 1];
+		Oid			collation = idxRel->rd_indcollation[natt - 1];
 
 		if (elem->inferopclass != InvalidOid &&
 			(inferopfamily != opfamily || inferopcinputtype != opcinputtype))
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index 0f25539d124..773e7b2be1c 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -578,12 +578,13 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
 				/* we do NOT descend into the contained expression */
 				return false;
 			case PVC_RECURSE_AGGREGATES:
+
 				/*
-				 * we do NOT descend into the contained expression,
-				 * even if the caller asked for it, because we never
-				 * actually evaluate it - the result is driven entirely
-				 * off the associated GROUP BY clause, so we never need
-				 * to extract the actual Vars here.
+				 * we do NOT descend into the contained expression, even if
+				 * the caller asked for it, because we never actually evaluate
+				 * it - the result is driven entirely off the associated GROUP
+				 * BY clause, so we never need to extract the actual Vars
+				 * here.
 				 */
 				return false;
 		}
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 82c9abfa915..fc463faa6be 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -53,7 +53,7 @@ static Query *transformInsertStmt(ParseState *pstate, InsertStmt *stmt);
 static List *transformInsertRow(ParseState *pstate, List *exprlist,
 				   List *stmtcols, List *icolumns, List *attrnos);
 static OnConflictExpr *transformOnConflictClause(ParseState *pstate,
-												 OnConflictClause *onConflictClause);
+						  OnConflictClause *onConflictClause);
 static int	count_rowexpr_columns(ParseState *pstate, Node *expr);
 static Query *transformSelectStmt(ParseState *pstate, SelectStmt *stmt);
 static Query *transformValuesClause(ParseState *pstate, SelectStmt *stmt);
@@ -65,7 +65,7 @@ static void determineRecursiveColTypes(ParseState *pstate,
 static Query *transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt);
 static List *transformReturningList(ParseState *pstate, List *returningList);
 static List *transformUpdateTargetList(ParseState *pstate,
-								  List *targetList);
+						  List *targetList);
 static Query *transformDeclareCursorStmt(ParseState *pstate,
 						   DeclareCursorStmt *stmt);
 static Query *transformExplainStmt(ParseState *pstate,
@@ -441,7 +441,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
 	}
 
 	isOnConflictUpdate = (stmt->onConflictClause &&
-						  stmt->onConflictClause->action == ONCONFLICT_UPDATE);
+						stmt->onConflictClause->action == ONCONFLICT_UPDATE);
 
 	/*
 	 * We have three cases to deal with: DEFAULT VALUES (selectStmt == NULL),
@@ -882,7 +882,7 @@ transformOnConflictClause(ParseState *pstate,
 	RangeTblEntry *exclRte = NULL;
 	int			exclRelIndex = 0;
 	List	   *exclRelTlist = NIL;
-	OnConflictExpr   *result;
+	OnConflictExpr *result;
 
 	/* Process the arbiter clause, ON CONFLICT ON (...) */
 	transformOnConflictArbiter(pstate, onConflictClause, &arbiterElems,
@@ -2059,10 +2059,10 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt)
 static List *
 transformUpdateTargetList(ParseState *pstate, List *origTlist)
 {
-	List		   *tlist = NIL;
-	RangeTblEntry  *target_rte;
-	ListCell	   *orig_tl;
-	ListCell	   *tl;
+	List	   *tlist = NIL;
+	RangeTblEntry *target_rte;
+	ListCell   *orig_tl;
+	ListCell   *tl;
 
 	tlist = transformTargetList(pstate, origTlist,
 								EXPR_KIND_UPDATE_SOURCE);
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 1e3f2e0ffa2..478d8ca70bd 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -64,11 +64,11 @@ static void check_ungrouped_columns(Node *node, ParseState *pstate, Query *qry,
 static bool check_ungrouped_columns_walker(Node *node,
 							   check_ungrouped_columns_context *context);
 static void finalize_grouping_exprs(Node *node, ParseState *pstate, Query *qry,
-									List *groupClauses, PlannerInfo *root,
-									bool have_non_var_grouping);
+						List *groupClauses, PlannerInfo *root,
+						bool have_non_var_grouping);
 static bool finalize_grouping_exprs_walker(Node *node,
 							   check_ungrouped_columns_context *context);
-static void check_agglevels_and_constraints(ParseState *pstate,Node *expr);
+static void check_agglevels_and_constraints(ParseState *pstate, Node *expr);
 static List *expand_groupingset_node(GroupingSet *gs);
 
 /*
@@ -246,9 +246,9 @@ transformGroupingFunc(ParseState *pstate, GroupingFunc *p)
 
 	foreach(lc, args)
 	{
-		Node *current_result;
+		Node	   *current_result;
 
-		current_result = transformExpr(pstate, (Node*) lfirst(lc), pstate->p_expr_kind);
+		current_result = transformExpr(pstate, (Node *) lfirst(lc), pstate->p_expr_kind);
 
 		/* acceptability of expressions is checked later */
 
@@ -284,7 +284,7 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr)
 
 	if (isAgg)
 	{
-		Aggref *agg = (Aggref *) expr;
+		Aggref	   *agg = (Aggref *) expr;
 
 		directargs = agg->aggdirectargs;
 		args = agg->args;
@@ -335,7 +335,11 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr)
 			Assert(false);		/* can't happen */
 			break;
 		case EXPR_KIND_OTHER:
-			/* Accept aggregate/grouping here; caller must throw error if wanted */
+
+			/*
+			 * Accept aggregate/grouping here; caller must throw error if
+			 * wanted
+			 */
 			break;
 		case EXPR_KIND_JOIN_ON:
 		case EXPR_KIND_JOIN_USING:
@@ -348,7 +352,11 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr)
 		case EXPR_KIND_FROM_SUBSELECT:
 			/* Should only be possible in a LATERAL subquery */
 			Assert(pstate->p_lateral_active);
-			/* Aggregate/grouping scope rules make it worth being explicit here */
+
+			/*
+			 * Aggregate/grouping scope rules make it worth being explicit
+			 * here
+			 */
 			if (isAgg)
 				err = _("aggregate functions are not allowed in FROM clause of their own query level");
 			else
@@ -932,7 +940,7 @@ transformWindowFuncCall(ParseState *pstate, WindowFunc *wfunc,
 void
 parseCheckAggregates(ParseState *pstate, Query *qry)
 {
-	List       *gset_common = NIL;
+	List	   *gset_common = NIL;
 	List	   *groupClauses = NIL;
 	List	   *groupClauseCommonVars = NIL;
 	bool		have_non_var_grouping;
@@ -956,7 +964,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
 		 * The limit of 4096 is arbitrary and exists simply to avoid resource
 		 * issues from pathological constructs.
 		 */
-		List *gsets = expand_grouping_sets(qry->groupingSets, 4096);
+		List	   *gsets = expand_grouping_sets(qry->groupingSets, 4096);
 
 		if (!gsets)
 			ereport(ERROR,
@@ -964,8 +972,8 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
 					 errmsg("too many grouping sets present (max 4096)"),
 					 parser_errposition(pstate,
 										qry->groupClause
-										? exprLocation((Node *) qry->groupClause)
-										: exprLocation((Node *) qry->groupingSets))));
+									? exprLocation((Node *) qry->groupClause)
+							   : exprLocation((Node *) qry->groupingSets))));
 
 		/*
 		 * The intersection will often be empty, so help things along by
@@ -985,9 +993,9 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
 
 		/*
 		 * If there was only one grouping set in the expansion, AND if the
-		 * groupClause is non-empty (meaning that the grouping set is not empty
-		 * either), then we can ditch the grouping set and pretend we just had
-		 * a normal GROUP BY.
+		 * groupClause is non-empty (meaning that the grouping set is not
+		 * empty either), then we can ditch the grouping set and pretend we
+		 * just had a normal GROUP BY.
 		 */
 		if (list_length(gsets) == 1 && qry->groupClause)
 			qry->groupingSets = NIL;
@@ -1012,13 +1020,13 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
 	 * Build a list of the acceptable GROUP BY expressions for use by
 	 * check_ungrouped_columns().
 	 *
-	 * We get the TLE, not just the expr, because GROUPING wants to know
-	 * the sortgroupref.
+	 * We get the TLE, not just the expr, because GROUPING wants to know the
+	 * sortgroupref.
 	 */
 	foreach(l, qry->groupClause)
 	{
 		SortGroupClause *grpcl = (SortGroupClause *) lfirst(l);
-		TargetEntry	   *expr;
+		TargetEntry *expr;
 
 		expr = get_sortgroupclause_tle(grpcl, qry->targetList);
 		if (expr == NULL)
@@ -1052,13 +1060,14 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
 	 * scans.  (Note we have to flatten aliases before this.)
 	 *
 	 * Track Vars that are included in all grouping sets separately in
-	 * groupClauseCommonVars, since these are the only ones we can use to check
-	 * for functional dependencies.
+	 * groupClauseCommonVars, since these are the only ones we can use to
+	 * check for functional dependencies.
 	 */
 	have_non_var_grouping = false;
 	foreach(l, groupClauses)
 	{
 		TargetEntry *tle = lfirst(l);
+
 		if (!IsA(tle->expr, Var))
 		{
 			have_non_var_grouping = true;
@@ -1335,7 +1344,7 @@ check_ungrouped_columns_walker(Node *node,
 /*
  * finalize_grouping_exprs -
  *	  Scan the given expression tree for GROUPING() and related calls,
- *    and validate and process their arguments.
+ *	  and validate and process their arguments.
  *
  * This is split out from check_ungrouped_columns above because it needs
  * to modify the nodes (which it does in-place, not via a mutator) while
@@ -1411,19 +1420,19 @@ finalize_grouping_exprs_walker(Node *node,
 		GroupingFunc *grp = (GroupingFunc *) node;
 
 		/*
-		 * We only need to check GroupingFunc nodes at the exact level to which
-		 * they belong, since they cannot mix levels in arguments.
+		 * We only need to check GroupingFunc nodes at the exact level to
+		 * which they belong, since they cannot mix levels in arguments.
 		 */
 
 		if ((int) grp->agglevelsup == context->sublevels_up)
 		{
-			ListCell  *lc;
-			List	  *ref_list = NIL;
+			ListCell   *lc;
+			List	   *ref_list = NIL;
 
 			foreach(lc, grp->args)
 			{
-				Node   *expr = lfirst(lc);
-				Index	ref = 0;
+				Node	   *expr = lfirst(lc);
+				Index		ref = 0;
 
 				if (context->root)
 					expr = flatten_join_alias_vars(context->root, expr);
@@ -1436,7 +1445,7 @@ finalize_grouping_exprs_walker(Node *node,
 
 				if (IsA(expr, Var))
 				{
-					Var *var = (Var *) expr;
+					Var		   *var = (Var *) expr;
 
 					if (var->varlevelsup == context->sublevels_up)
 					{
@@ -1517,10 +1526,10 @@ finalize_grouping_exprs_walker(Node *node,
  *
  * For SET nodes, recursively expand contained CUBE and ROLLUP.
  */
-static List*
+static List *
 expand_groupingset_node(GroupingSet *gs)
 {
-	List * result = NIL;
+	List	   *result = NIL;
 
 	switch (gs->kind)
 	{
@@ -1540,8 +1549,8 @@ expand_groupingset_node(GroupingSet *gs)
 
 				while (curgroup_size > 0)
 				{
-					List   *current_result = NIL;
-					int		i = curgroup_size;
+					List	   *current_result = NIL;
+					int			i = curgroup_size;
 
 					foreach(lc, rollup_val)
 					{
@@ -1568,10 +1577,10 @@ expand_groupingset_node(GroupingSet *gs)
 
 		case GROUPING_SET_CUBE:
 			{
-				List   *cube_list = gs->content;
-				int		number_bits = list_length(cube_list);
-				uint32	num_sets;
-				uint32	i;
+				List	   *cube_list = gs->content;
+				int			number_bits = list_length(cube_list);
+				uint32		num_sets;
+				uint32		i;
 
 				/* parser should cap this much lower */
 				Assert(number_bits < 31);
@@ -1580,9 +1589,9 @@ expand_groupingset_node(GroupingSet *gs)
 
 				for (i = 0; i < num_sets; i++)
 				{
-					List *current_result = NIL;
-					ListCell *lc;
-					uint32 mask = 1U;
+					List	   *current_result = NIL;
+					ListCell   *lc;
+					uint32		mask = 1U;
 
 					foreach(lc, cube_list)
 					{
@@ -1611,7 +1620,7 @@ expand_groupingset_node(GroupingSet *gs)
 
 				foreach(lc, gs->content)
 				{
-					List *current_result = expand_groupingset_node(lfirst(lc));
+					List	   *current_result = expand_groupingset_node(lfirst(lc));
 
 					result = list_concat(result, current_result);
 				}
@@ -1625,8 +1634,9 @@ expand_groupingset_node(GroupingSet *gs)
 static int
 cmp_list_len_asc(const void *a, const void *b)
 {
-	int la = list_length(*(List*const*)a);
-	int lb = list_length(*(List*const*)b);
+	int			la = list_length(*(List *const *) a);
+	int			lb = list_length(*(List *const *) b);
+
 	return (la > lb) ? 1 : (la == lb) ? 0 : -1;
 }
 
@@ -1641,7 +1651,7 @@ List *
 expand_grouping_sets(List *groupingSets, int limit)
 {
 	List	   *expanded_groups = NIL;
-	List       *result = NIL;
+	List	   *result = NIL;
 	double		numsets = 1;
 	ListCell   *lc;
 
@@ -1650,7 +1660,7 @@ expand_grouping_sets(List *groupingSets, int limit)
 
 	foreach(lc, groupingSets)
 	{
-		List *current_result = NIL;
+		List	   *current_result = NIL;
 		GroupingSet *gs = lfirst(lc);
 
 		current_result = expand_groupingset_node(gs);
@@ -1666,9 +1676,9 @@ expand_grouping_sets(List *groupingSets, int limit)
 	}
 
 	/*
-	 * Do cartesian product between sublists of expanded_groups.
-	 * While at it, remove any duplicate elements from individual
-	 * grouping sets (we must NOT change the number of sets though)
+	 * Do cartesian product between sublists of expanded_groups. While at it,
+	 * remove any duplicate elements from individual grouping sets (we must
+	 * NOT change the number of sets though)
 	 */
 
 	foreach(lc, (List *) linitial(expanded_groups))
@@ -1698,16 +1708,16 @@ expand_grouping_sets(List *groupingSets, int limit)
 
 	if (list_length(result) > 1)
 	{
-		int		result_len = list_length(result);
-		List  **buf = palloc(sizeof(List*) * result_len);
-		List  **ptr = buf;
+		int			result_len = list_length(result);
+		List	  **buf = palloc(sizeof(List *) * result_len);
+		List	  **ptr = buf;
 
 		foreach(lc, result)
 		{
 			*ptr++ = lfirst(lc);
 		}
 
-		qsort(buf, result_len, sizeof(List*), cmp_list_len_asc);
+		qsort(buf, result_len, sizeof(List *), cmp_list_len_asc);
 
 		result = NIL;
 		ptr = buf;
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index f8eebfe8c3e..e90e1d68e3a 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -82,7 +82,7 @@ static TargetEntry *findTargetlistEntrySQL99(ParseState *pstate, Node *node,
 						 List **tlist, ParseExprKind exprKind);
 static int get_matching_location(int sortgroupref,
 					  List *sortgrouprefs, List *exprs);
-static List *resolve_unique_index_expr(ParseState *pstate, InferClause * infer,
+static List *resolve_unique_index_expr(ParseState *pstate, InferClause *infer,
 						  Relation heapRel);
 static List *addTargetToGroupList(ParseState *pstate, TargetEntry *tle,
 					 List *grouplist, List *targetlist, int location,
@@ -426,14 +426,15 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j, List *namespace)
 static RangeTblEntry *
 transformTableSampleEntry(ParseState *pstate, RangeTableSample *rv)
 {
-	RangeTblEntry   *rte = NULL;
+	RangeTblEntry *rte = NULL;
 	CommonTableExpr *cte = NULL;
 	TableSampleClause *tablesample = NULL;
 
 	/* if relation has an unqualified name, it might be a CTE reference */
 	if (!rv->relation->schemaname)
 	{
-		Index	levelsup;
+		Index		levelsup;
+
 		cte = scanNameSpaceForCTE(pstate, rv->relation->relname, &levelsup);
 	}
 
@@ -443,7 +444,7 @@ transformTableSampleEntry(ParseState *pstate, RangeTableSample *rv)
 
 	if (!rte ||
 		(rte->relkind != RELKIND_RELATION &&
-		rte->relkind != RELKIND_MATVIEW))
+		 rte->relkind != RELKIND_MATVIEW))
 		ereport(ERROR,
 				(errcode(ERRCODE_SYNTAX_ERROR),
 				 errmsg("TABLESAMPLE clause can only be used on tables and materialized views"),
@@ -1167,7 +1168,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
 	else if (IsA(n, RangeTableSample))
 	{
 		/* Tablesample reference */
-		RangeTableSample   *rv = (RangeTableSample *) n;
+		RangeTableSample *rv = (RangeTableSample *) n;
 		RangeTblRef *rtr;
 		RangeTblEntry *rte = NULL;
 		int			rtindex;
@@ -1738,9 +1739,9 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
  * CUBE or ROLLUP can be nested inside GROUPING SETS (but not the reverse),
  * and we leave that alone if we find it. But if we see GROUPING SETS inside
  * GROUPING SETS, we can flatten and normalize as follows:
- *   GROUPING SETS (a, (b,c), GROUPING SETS ((c,d),(e)), (f,g))
+ *	 GROUPING SETS (a, (b,c), GROUPING SETS ((c,d),(e)), (f,g))
  * becomes
- *   GROUPING SETS ((a), (b,c), (c,d), (e), (f,g))
+ *	 GROUPING SETS ((a), (b,c), (c,d), (e), (f,g))
  *
  * This is per the spec's syntax transformations, but these are the only such
  * transformations we do in parse analysis, so that queries retain the
@@ -1750,12 +1751,12 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
  *
  * When we're done, the resulting list should contain only these possible
  * elements:
- *   - an expression
- *   - a CUBE or ROLLUP with a list of expressions nested 2 deep
- *   - a GROUPING SET containing any of:
- *      - expression lists
- *      - empty grouping sets
- *      - CUBE or ROLLUP nodes with lists nested 2 deep
+ *	 - an expression
+ *	 - a CUBE or ROLLUP with a list of expressions nested 2 deep
+ *	 - a GROUPING SET containing any of:
+ *		- expression lists
+ *		- empty grouping sets
+ *		- CUBE or ROLLUP nodes with lists nested 2 deep
  * The return is a new list, but doesn't deep-copy the old nodes except for
  * GroupingSet nodes.
  *
@@ -1775,7 +1776,8 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
 	{
 		case T_RowExpr:
 			{
-				RowExpr *r = (RowExpr *) expr;
+				RowExpr    *r = (RowExpr *) expr;
+
 				if (r->row_format == COERCE_IMPLICIT_CAST)
 					return flatten_grouping_sets((Node *) r->args,
 												 false, NULL);
@@ -1792,7 +1794,8 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
 
 				/*
 				 * at the top level, we skip over all empty grouping sets; the
-				 * caller can supply the canonical GROUP BY () if nothing is left.
+				 * caller can supply the canonical GROUP BY () if nothing is
+				 * left.
 				 */
 
 				if (toplevel && gset->kind == GROUPING_SET_EMPTY)
@@ -1800,15 +1803,15 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
 
 				foreach(l2, gset->content)
 				{
-					Node   *n2 = flatten_grouping_sets(lfirst(l2), false, NULL);
+					Node	   *n2 = flatten_grouping_sets(lfirst(l2), false, NULL);
 
 					result_set = lappend(result_set, n2);
 				}
 
 				/*
-				 * At top level, keep the grouping set node; but if we're in a nested
-				 * grouping set, then we need to concat the flattened result into the
-				 * outer list if it's simply nested.
+				 * At top level, keep the grouping set node; but if we're in a
+				 * nested grouping set, then we need to concat the flattened
+				 * result into the outer list if it's simply nested.
 				 */
 
 				if (toplevel || (gset->kind != GROUPING_SET_SETS))
@@ -1823,12 +1826,13 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets)
 				List	   *result = NIL;
 				ListCell   *l;
 
-				foreach(l, (List *)expr)
+				foreach(l, (List *) expr)
 				{
-					Node   *n = flatten_grouping_sets(lfirst(l), toplevel, hasGroupingSets);
+					Node	   *n = flatten_grouping_sets(lfirst(l), toplevel, hasGroupingSets);
+
 					if (n != (Node *) NIL)
 					{
-						if (IsA(n,List))
+						if (IsA(n, List))
 							result = list_concat(result, (List *) n);
 						else
 							result = lappend(result, n);
@@ -1888,15 +1892,15 @@ transformGroupClauseExpr(List **flatresult, Bitmapset *seen_local,
 		 * (Duplicates in grouping sets can affect the number of returned
 		 * rows, so can't be dropped indiscriminately.)
 		 *
-		 * Since we don't care about anything except the sortgroupref,
-		 * we can use a bitmapset rather than scanning lists.
+		 * Since we don't care about anything except the sortgroupref, we can
+		 * use a bitmapset rather than scanning lists.
 		 */
-		if (bms_is_member(tle->ressortgroupref,seen_local))
+		if (bms_is_member(tle->ressortgroupref, seen_local))
 			return 0;
 
 		/*
-		 * If we're already in the flat clause list, we don't need
-		 * to consider adding ourselves again.
+		 * If we're already in the flat clause list, we don't need to consider
+		 * adding ourselves again.
 		 */
 		found = targetIsInSortList(tle, InvalidOid, *flatresult);
 		if (found)
@@ -1928,6 +1932,7 @@ transformGroupClauseExpr(List **flatresult, Bitmapset *seen_local,
 			if (sc->tleSortGroupRef == tle->ressortgroupref)
 			{
 				SortGroupClause *grpc = copyObject(sc);
+
 				if (!toplevel)
 					grpc->nulls_first = false;
 				*flatresult = lappend(*flatresult, grpc);
@@ -1983,17 +1988,18 @@ transformGroupClauseList(List **flatresult,
 
 	foreach(gl, list)
 	{
-		Node        *gexpr = (Node *) lfirst(gl);
-
-		Index ref = transformGroupClauseExpr(flatresult,
-											 seen_local,
-											 pstate,
-											 gexpr,
-											 targetlist,
-											 sortClause,
-											 exprKind,
-											 useSQL99,
-											 toplevel);
+		Node	   *gexpr = (Node *) lfirst(gl);
+
+		Index		ref = transformGroupClauseExpr(flatresult,
+												   seen_local,
+												   pstate,
+												   gexpr,
+												   targetlist,
+												   sortClause,
+												   exprKind,
+												   useSQL99,
+												   toplevel);
+
 		if (ref > 0)
 		{
 			seen_local = bms_add_member(seen_local, ref);
@@ -2036,14 +2042,14 @@ transformGroupingSet(List **flatresult,
 
 	foreach(gl, gset->content)
 	{
-		Node   *n = lfirst(gl);
+		Node	   *n = lfirst(gl);
 
 		if (IsA(n, List))
 		{
-			List *l = transformGroupClauseList(flatresult,
-											   pstate, (List *) n,
-											   targetlist, sortClause,
-											   exprKind, useSQL99, false);
+			List	   *l = transformGroupClauseList(flatresult,
+													 pstate, (List *) n,
+													 targetlist, sortClause,
+												  exprKind, useSQL99, false);
 
 			content = lappend(content, makeGroupingSet(GROUPING_SET_SIMPLE,
 													   l,
@@ -2055,20 +2061,20 @@ transformGroupingSet(List **flatresult,
 
 			content = lappend(content, transformGroupingSet(flatresult,
 															pstate, gset2,
-															targetlist, sortClause,
-															exprKind, useSQL99, false));
+													  targetlist, sortClause,
+												 exprKind, useSQL99, false));
 		}
 		else
 		{
-			Index ref = transformGroupClauseExpr(flatresult,
-												 NULL,
-												 pstate,
-												 n,
-												 targetlist,
-												 sortClause,
-												 exprKind,
-												 useSQL99,
-												 false);
+			Index		ref = transformGroupClauseExpr(flatresult,
+													   NULL,
+													   pstate,
+													   n,
+													   targetlist,
+													   sortClause,
+													   exprKind,
+													   useSQL99,
+													   false);
 
 			content = lappend(content, makeGroupingSet(GROUPING_SET_SIMPLE,
 													   list_make1_int(ref),
@@ -2121,7 +2127,7 @@ transformGroupingSet(List **flatresult,
  *
  * pstate		ParseState
  * grouplist	clause to transform
- * groupingSets	reference to list to contain the grouping set tree
+ * groupingSets reference to list to contain the grouping set tree
  * targetlist	reference to TargetEntry list
  * sortClause	ORDER BY clause (SortGroupClause nodes)
  * exprKind		expression kind
@@ -2136,34 +2142,34 @@ transformGroupClause(ParseState *pstate, List *grouplist, List **groupingSets,
 	List	   *flat_grouplist;
 	List	   *gsets = NIL;
 	ListCell   *gl;
-	bool        hasGroupingSets = false;
+	bool		hasGroupingSets = false;
 	Bitmapset  *seen_local = NULL;
 
 	/*
-	 * Recursively flatten implicit RowExprs. (Technically this is only
-	 * needed for GROUP BY, per the syntax rules for grouping sets, but
-	 * we do it anyway.)
+	 * Recursively flatten implicit RowExprs. (Technically this is only needed
+	 * for GROUP BY, per the syntax rules for grouping sets, but we do it
+	 * anyway.)
 	 */
 	flat_grouplist = (List *) flatten_grouping_sets((Node *) grouplist,
 													true,
 													&hasGroupingSets);
 
 	/*
-	 * If the list is now empty, but hasGroupingSets is true, it's because
-	 * we elided redundant empty grouping sets. Restore a single empty
-	 * grouping set to leave a canonical form: GROUP BY ()
+	 * If the list is now empty, but hasGroupingSets is true, it's because we
+	 * elided redundant empty grouping sets. Restore a single empty grouping
+	 * set to leave a canonical form: GROUP BY ()
 	 */
 
 	if (flat_grouplist == NIL && hasGroupingSets)
 	{
 		flat_grouplist = list_make1(makeGroupingSet(GROUPING_SET_EMPTY,
 													NIL,
-													exprLocation((Node *) grouplist)));
+										  exprLocation((Node *) grouplist)));
 	}
 
 	foreach(gl, flat_grouplist)
 	{
-		Node        *gexpr = (Node *) lfirst(gl);
+		Node	   *gexpr = (Node *) lfirst(gl);
 
 		if (IsA(gexpr, GroupingSet))
 		{
@@ -2184,17 +2190,17 @@ transformGroupClause(ParseState *pstate, List *grouplist, List **groupingSets,
 					gsets = lappend(gsets,
 									transformGroupingSet(&result,
 														 pstate, gset,
-														 targetlist, sortClause,
-														 exprKind, useSQL99, true));
+													  targetlist, sortClause,
+												  exprKind, useSQL99, true));
 					break;
 			}
 		}
 		else
 		{
-			Index ref = transformGroupClauseExpr(&result, seen_local,
-												 pstate, gexpr,
-												 targetlist, sortClause,
-												 exprKind, useSQL99, true);
+			Index		ref = transformGroupClauseExpr(&result, seen_local,
+													   pstate, gexpr,
+													   targetlist, sortClause,
+												   exprKind, useSQL99, true);
 
 			if (ref > 0)
 			{
@@ -2661,9 +2667,9 @@ resolve_unique_index_expr(ParseState *pstate, InferClause *infer,
 
 	foreach(l, infer->indexElems)
 	{
-		IndexElem	   *ielem = (IndexElem *) lfirst(l);
-		InferenceElem  *pInfer = makeNode(InferenceElem);
-		Node		   *parse;
+		IndexElem  *ielem = (IndexElem *) lfirst(l);
+		InferenceElem *pInfer = makeNode(InferenceElem);
+		Node	   *parse;
 
 		/*
 		 * Raw grammar re-uses CREATE INDEX infrastructure for unique index
@@ -2684,7 +2690,7 @@ resolve_unique_index_expr(ParseState *pstate, InferClause *infer,
 		if (ielem->nulls_ordering != SORTBY_NULLS_DEFAULT)
 			ereport(ERROR,
 					(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
-					 errmsg("NULLS FIRST/LAST is not allowed in ON CONFLICT clause"),
+			 errmsg("NULLS FIRST/LAST is not allowed in ON CONFLICT clause"),
 					 parser_errposition(pstate,
 										exprLocation((Node *) infer))));
 
@@ -2767,7 +2773,7 @@ transformOnConflictArbiter(ParseState *pstate,
 				 errmsg("ON CONFLICT DO UPDATE requires inference specification or constraint name"),
 				 errhint("For example, ON CONFLICT (<column>)."),
 				 parser_errposition(pstate,
-									exprLocation((Node *) onConflictClause))));
+								  exprLocation((Node *) onConflictClause))));
 
 	/*
 	 * To simplify certain aspects of its design, speculative insertion into
@@ -2776,9 +2782,9 @@ transformOnConflictArbiter(ParseState *pstate,
 	if (IsCatalogRelation(pstate->p_target_relation))
 		ereport(ERROR,
 				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("ON CONFLICT not supported with system catalog tables"),
+			  errmsg("ON CONFLICT not supported with system catalog tables"),
 				 parser_errposition(pstate,
-									exprLocation((Node *) onConflictClause))));
+								  exprLocation((Node *) onConflictClause))));
 
 	/* Same applies to table used by logical decoding as catalog table */
 	if (RelationIsUsedAsCatalogTable(pstate->p_target_relation))
@@ -2787,7 +2793,7 @@ transformOnConflictArbiter(ParseState *pstate,
 				 errmsg("ON CONFLICT not supported on table \"%s\" used as a catalog table",
 						RelationGetRelationName(pstate->p_target_relation)),
 				 parser_errposition(pstate,
-									exprLocation((Node *) onConflictClause))));
+								  exprLocation((Node *) onConflictClause))));
 
 	/* ON CONFLICT DO NOTHING does not require an inference clause */
 	if (infer)
@@ -2795,9 +2801,8 @@ transformOnConflictArbiter(ParseState *pstate,
 		List	   *save_namespace;
 
 		/*
-		 * While we process the arbiter expressions, accept only
-		 * non-qualified references to the target table. Hide any other
-		 * relations.
+		 * While we process the arbiter expressions, accept only non-qualified
+		 * references to the target table. Hide any other relations.
 		 */
 		save_namespace = pstate->p_namespace;
 		pstate->p_namespace = NIL;
@@ -2806,7 +2811,7 @@ transformOnConflictArbiter(ParseState *pstate,
 
 		if (infer->indexElems)
 			*arbiterExpr = resolve_unique_index_expr(pstate, infer,
-													 pstate->p_target_relation);
+												  pstate->p_target_relation);
 
 		/*
 		 * Handling inference WHERE clause (for partial unique index
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index fa50f92d8dd..fa9761bac31 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -778,14 +778,15 @@ TableSampleClause *
 ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
 				 List *sampleargs, int location)
 {
-	HeapTuple		tuple;
+	HeapTuple	tuple;
 	Form_pg_tablesample_method tsm;
 	Form_pg_proc procform;
 	TableSampleClause *tablesample;
-	List		   *fargs;
-	ListCell	   *larg;
-	int				nargs, initnargs;
-	Oid				init_arg_types[FUNC_MAX_ARGS];
+	List	   *fargs;
+	ListCell   *larg;
+	int			nargs,
+				initnargs;
+	Oid			init_arg_types[FUNC_MAX_ARGS];
 
 	/* Load the tablesample method */
 	tuple = SearchSysCache1(TABLESAMPLEMETHODNAME, PointerGetDatum(samplemethod));
@@ -817,7 +818,7 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
 	tuple = SearchSysCache1(PROCOID,
 							ObjectIdGetDatum(tablesample->tsminit));
 
-	if (!HeapTupleIsValid(tuple))	/* should not happen */
+	if (!HeapTupleIsValid(tuple))		/* should not happen */
 		elog(ERROR, "cache lookup failed for function %u",
 			 tablesample->tsminit);
 
@@ -826,15 +827,15 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
 	Assert(initnargs >= 3);
 
 	/*
-	 * First parameter is used to pass the SampleScanState, second is
-	 * seed (REPEATABLE), skip the processing for them here, just assert
-	 * that the types are correct.
+	 * First parameter is used to pass the SampleScanState, second is seed
+	 * (REPEATABLE), skip the processing for them here, just assert that the
+	 * types are correct.
 	 */
 	Assert(procform->proargtypes.values[0] == INTERNALOID);
 	Assert(procform->proargtypes.values[1] == INT4OID);
 	initnargs -= 2;
 	memcpy(init_arg_types, procform->proargtypes.values + 2,
-				   initnargs * sizeof(Oid));
+		   initnargs * sizeof(Oid));
 
 	/* Now we are done with the catalog */
 	ReleaseSysCache(tuple);
@@ -842,7 +843,7 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
 	/* Process repeatable (seed) */
 	if (repeatable != NULL)
 	{
-		Node   *arg = repeatable;
+		Node	   *arg = repeatable;
 
 		if (arg && IsA(arg, A_Const))
 		{
@@ -851,7 +852,7 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
 			if (con->val.type == T_Null)
 				ereport(ERROR,
 						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-						 errmsg("REPEATABLE clause must be NOT NULL numeric value"),
+				  errmsg("REPEATABLE clause must be NOT NULL numeric value"),
 						 parser_errposition(pstate, con->location)));
 
 		}
@@ -867,21 +868,21 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
 	if (list_length(sampleargs) != initnargs)
 		ereport(ERROR,
 				(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
-		 errmsg_plural("tablesample method \"%s\" expects %d argument got %d",
-					   "tablesample method \"%s\" expects %d arguments got %d",
-					   initnargs,
-					   samplemethod,
-					   initnargs, list_length(sampleargs)),
-		 parser_errposition(pstate, location)));
+		errmsg_plural("tablesample method \"%s\" expects %d argument got %d",
+					  "tablesample method \"%s\" expects %d arguments got %d",
+					  initnargs,
+					  samplemethod,
+					  initnargs, list_length(sampleargs)),
+				 parser_errposition(pstate, location)));
 
 	/* Transform the arguments, typecasting them as needed. */
 	fargs = NIL;
 	nargs = 0;
 	foreach(larg, sampleargs)
 	{
-		Node   *inarg = (Node *) lfirst(larg);
-		Node   *arg = transformExpr(pstate, inarg, EXPR_KIND_FROM_FUNCTION);
-		Oid		argtype = exprType(arg);
+		Node	   *inarg = (Node *) lfirst(larg);
+		Node	   *arg = transformExpr(pstate, inarg, EXPR_KIND_FROM_FUNCTION);
+		Oid			argtype = exprType(arg);
 
 		if (argtype != init_arg_types[nargs])
 		{
@@ -889,12 +890,12 @@ ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable,
 								 COERCION_IMPLICIT))
 				ereport(ERROR,
 						(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-					 errmsg("wrong parameter %d for tablesample method \"%s\"",
-							nargs + 1, samplemethod),
-					 errdetail("Expected type %s got %s.",
-							   format_type_be(init_arg_types[nargs]),
-							   format_type_be(argtype)),
-					 parser_errposition(pstate, exprLocation(inarg))));
+				   errmsg("wrong parameter %d for tablesample method \"%s\"",
+						  nargs + 1, samplemethod),
+						 errdetail("Expected type %s got %s.",
+								   format_type_be(init_arg_types[nargs]),
+								   format_type_be(argtype)),
+						 parser_errposition(pstate, exprLocation(inarg))));
 
 			arg = coerce_type(pstate, arg, argtype, init_arg_types[nargs], -1,
 							  COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1);
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 562c2f54f92..0b2dacfd593 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -530,8 +530,8 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
 						  FuzzyAttrMatchState *fuzzystate, RangeTblEntry *rte,
 						  const char *actual, const char *match, int attnum)
 {
-	int		columndistance;
-	int		matchlen;
+	int			columndistance;
+	int			matchlen;
 
 	/* Bail before computing the Levenshtein distance if there's no hope. */
 	if (fuzzy_rte_penalty > fuzzystate->distance)
@@ -550,7 +550,7 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
 		varstr_levenshtein_less_equal(actual, strlen(actual), match, matchlen,
 									  1, 1, 1,
 									  fuzzystate->distance + 1
-										- fuzzy_rte_penalty);
+									  - fuzzy_rte_penalty);
 
 	/*
 	 * If more than half the characters are different, don't treat it as a
@@ -560,8 +560,8 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
 		return;
 
 	/*
-	 * From this point on, we can ignore the distinction between the
-	 * RTE-name distance and the column-name distance.
+	 * From this point on, we can ignore the distinction between the RTE-name
+	 * distance and the column-name distance.
 	 */
 	columndistance += fuzzy_rte_penalty;
 
@@ -581,11 +581,11 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
 	else if (columndistance == fuzzystate->distance)
 	{
 		/*
-		 * This match distance may equal a prior match within this same
-		 * range table.  When that happens, the prior match may also be
-		 * given, but only if there is no more than two equally distant
-		 * matches from the RTE (in turn, our caller will only accept
-		 * two equally distant matches overall).
+		 * This match distance may equal a prior match within this same range
+		 * table.  When that happens, the prior match may also be given, but
+		 * only if there is no more than two equally distant matches from the
+		 * RTE (in turn, our caller will only accept two equally distant
+		 * matches overall).
 		 */
 		if (AttributeNumberIsValid(fuzzystate->second))
 		{
@@ -606,9 +606,9 @@ updateFuzzyAttrMatchState(int fuzzy_rte_penalty,
 		else if (fuzzystate->distance <= MAX_FUZZY_DISTANCE)
 		{
 			/*
-			 * Record as provisional first match (this can occasionally
-			 * occur because previous lowest distance was "too low a
-			 * bar", rather than being associated with a real match)
+			 * Record as provisional first match (this can occasionally occur
+			 * because previous lowest distance was "too low a bar", rather
+			 * than being associated with a real match)
 			 */
 			fuzzystate->rfirst = rte;
 			fuzzystate->first = attnum;
@@ -820,8 +820,8 @@ searchRangeTableForCol(ParseState *pstate, const char *alias, char *colname,
 
 		foreach(l, pstate->p_rtable)
 		{
-			RangeTblEntry	   *rte = (RangeTblEntry *) lfirst(l);
-			int					fuzzy_rte_penalty = 0;
+			RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
+			int			fuzzy_rte_penalty = 0;
 
 			/*
 			 * Typically, it is not useful to look for matches within join
@@ -851,7 +851,7 @@ searchRangeTableForCol(ParseState *pstate, const char *alias, char *colname,
 			 */
 			if (scanRTEForColumn(orig_pstate, rte, colname, location,
 								 fuzzy_rte_penalty, fuzzystate)
-					&& fuzzy_rte_penalty == 0)
+				&& fuzzy_rte_penalty == 0)
 			{
 				fuzzystate->rfirst = rte;
 				fuzzystate->first = InvalidAttrNumber;
@@ -3040,8 +3040,8 @@ void
 errorMissingColumn(ParseState *pstate,
 				   char *relname, char *colname, int location)
 {
-	FuzzyAttrMatchState	   *state;
-	char				   *closestfirst = NULL;
+	FuzzyAttrMatchState *state;
+	char	   *closestfirst = NULL;
 
 	/*
 	 * Search the entire rtable looking for possible matches.  If we find one,
@@ -3056,10 +3056,10 @@ errorMissingColumn(ParseState *pstate,
 	 * Extract closest col string for best match, if any.
 	 *
 	 * Infer an exact match referenced despite not being visible from the fact
-	 * that an attribute number was not present in state passed back -- this is
-	 * what is reported when !closestfirst.  There might also be an exact match
-	 * that was qualified with an incorrect alias, in which case closestfirst
-	 * will be set (so hint is the same as generic fuzzy case).
+	 * that an attribute number was not present in state passed back -- this
+	 * is what is reported when !closestfirst.  There might also be an exact
+	 * match that was qualified with an incorrect alias, in which case
+	 * closestfirst will be set (so hint is the same as generic fuzzy case).
 	 */
 	if (state->rfirst && AttributeNumberIsValid(state->first))
 		closestfirst = strVal(list_nth(state->rfirst->eref->colnames,
@@ -3074,19 +3074,19 @@ errorMissingColumn(ParseState *pstate,
 		ereport(ERROR,
 				(errcode(ERRCODE_UNDEFINED_COLUMN),
 				 relname ?
-				 errmsg("column %s.%s does not exist", relname, colname):
+				 errmsg("column %s.%s does not exist", relname, colname) :
 				 errmsg("column \"%s\" does not exist", colname),
 				 state->rfirst ? closestfirst ?
-				 errhint("Perhaps you meant to reference the column \"%s\".\"%s\".",
-						 state->rfirst->eref->aliasname, closestfirst):
+		  errhint("Perhaps you meant to reference the column \"%s\".\"%s\".",
+				  state->rfirst->eref->aliasname, closestfirst) :
 				 errhint("There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query.",
-						 colname, state->rfirst->eref->aliasname): 0,
+						 colname, state->rfirst->eref->aliasname) : 0,
 				 parser_errposition(pstate, location)));
 	}
 	else
 	{
 		/* Handle case where there are two equally useful column hints */
-		char				   *closestsecond;
+		char	   *closestsecond;
 
 		closestsecond = strVal(list_nth(state->rsecond->eref->colnames,
 										state->second - 1));
@@ -3094,7 +3094,7 @@ errorMissingColumn(ParseState *pstate,
 		ereport(ERROR,
 				(errcode(ERRCODE_UNDEFINED_COLUMN),
 				 relname ?
-				 errmsg("column %s.%s does not exist", relname, colname):
+				 errmsg("column %s.%s does not exist", relname, colname) :
 				 errmsg("column \"%s\" does not exist", colname),
 				 errhint("Perhaps you meant to reference the column \"%s\".\"%s\" or the column \"%s\".\"%s\".",
 						 state->rfirst->eref->aliasname, closestfirst,
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index 1ba6ca76f42..661663994ee 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -797,7 +797,7 @@ fail:
 	ereport(ERROR,
 			(errcode(ERRCODE_SYNTAX_ERROR),
 			 errmsg("invalid type name \"%s\"", str)));
-	return NULL; /* keep compiler quiet */
+	return NULL;				/* keep compiler quiet */
 }
 
 /*
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 0a55db4a823..16d40c72406 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -1804,8 +1804,8 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
 					rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE)
 					ereport(ERROR,
 							(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-						   errmsg("inherited relation \"%s\" is not a table or foreign table",
-								  inh->relname)));
+							 errmsg("inherited relation \"%s\" is not a table or foreign table",
+									inh->relname)));
 				for (count = 0; count < rel->rd_att->natts; count++)
 				{
 					Form_pg_attribute inhattr = rel->rd_att->attrs[count];
@@ -2496,7 +2496,7 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
 
 			case AT_AlterColumnType:
 				{
-					ColumnDef *def = (ColumnDef *) cmd->def;
+					ColumnDef  *def = (ColumnDef *) cmd->def;
 
 					/*
 					 * For ALTER COLUMN TYPE, transform the USING clause if
diff --git a/src/backend/port/atomics.c b/src/backend/port/atomics.c
index 263c68f4662..3350fb7df30 100644
--- a/src/backend/port/atomics.c
+++ b/src/backend/port/atomics.c
@@ -15,7 +15,7 @@
 
 /*
  * We want the functions below to be inline; but if the compiler doesn't
- * support that, fall back on providing them as regular functions.	See
+ * support that, fall back on providing them as regular functions.  See
  * STATIC_IF_INLINE in c.h.
  */
 #define ATOMICS_INCLUDE_DEFINITIONS
@@ -50,6 +50,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
 					 "size mismatch of atomic_flag vs slock_t");
 
 #ifndef HAVE_SPINLOCKS
+
 	/*
 	 * NB: If we're using semaphore based TAS emulation, be careful to use a
 	 * separate set of semaphores. Otherwise we'd get in trouble if an atomic
@@ -73,7 +74,7 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
 	S_UNLOCK((slock_t *) &ptr->sema);
 }
 
-#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
+#endif   /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
 
 #ifdef PG_HAVE_ATOMIC_U32_SIMULATION
 void
@@ -98,7 +99,8 @@ bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 									uint32 *expected, uint32 newval)
 {
-	bool ret;
+	bool		ret;
+
 	/*
 	 * Do atomic op under a spinlock. It might look like we could just skip
 	 * the cmpxchg if the lock isn't available, but that'd just emulate a
@@ -109,7 +111,7 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 	 */
 	SpinLockAcquire((slock_t *) &ptr->sema);
 
-	/* perform compare/exchange logic*/
+	/* perform compare/exchange logic */
 	ret = ptr->value == *expected;
 	*expected = ptr->value;
 	if (ret)
@@ -124,7 +126,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 uint32
 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
 {
-	uint32 oldval;
+	uint32		oldval;
+
 	SpinLockAcquire((slock_t *) &ptr->sema);
 	oldval = ptr->value;
 	ptr->value += add_;
@@ -132,4 +135,4 @@ pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
 	return oldval;
 }
 
-#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
+#endif   /* PG_HAVE_ATOMIC_U32_SIMULATION */
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index d95ab9273c5..8be5bbe1aba 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -391,7 +391,7 @@ CreateAnonymousSegment(Size *size)
 				 (mmap_errno == ENOMEM) ?
 				 errhint("This error usually means that PostgreSQL's request "
 					"for a shared memory segment exceeded available memory, "
-					  "swap space, or huge pages. To reduce the request size "
+					 "swap space, or huge pages. To reduce the request size "
 						 "(currently %zu bytes), reduce PostgreSQL's shared "
 					   "memory usage, perhaps by reducing shared_buffers or "
 						 "max_connections.",
diff --git a/src/backend/port/win32_latch.c b/src/backend/port/win32_latch.c
index c7d4bdddc21..ee9526245fd 100644
--- a/src/backend/port/win32_latch.c
+++ b/src/backend/port/win32_latch.c
@@ -151,7 +151,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
 	if (wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
 	{
 		/* Need an event object to represent events on the socket */
-		int			flags = FD_CLOSE; /* always check for errors/EOF */
+		int			flags = FD_CLOSE;	/* always check for errors/EOF */
 
 		if (wakeEvents & WL_SOCKET_READABLE)
 			flags |= FD_READ;
diff --git a/src/backend/port/win32_sema.c b/src/backend/port/win32_sema.c
index 011e2fd4a6a..4fd1e2aa133 100644
--- a/src/backend/port/win32_sema.c
+++ b/src/backend/port/win32_sema.c
@@ -153,6 +153,7 @@ PGSemaphoreLock(PGSemaphore sema)
 				done = true;
 				break;
 			case WAIT_IO_COMPLETION:
+
 				/*
 				 * The system interrupted the wait to execute an I/O
 				 * completion routine or asynchronous procedure call in this
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index f4b30ba80e2..5b699594449 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -297,8 +297,8 @@ static void do_autovacuum(void);
 static void FreeWorkerInfo(int code, Datum arg);
 
 static autovac_table *table_recheck_autovac(Oid relid, HTAB *table_toast_map,
-											TupleDesc pg_class_desc,
-									int effective_multixact_freeze_max_age);
+					  TupleDesc pg_class_desc,
+					  int effective_multixact_freeze_max_age);
 static void relation_needs_vacanalyze(Oid relid, AutoVacOpts *relopts,
 						  Form_pg_class classForm,
 						  PgStat_StatTabEntry *tabentry,
@@ -1915,8 +1915,8 @@ do_autovacuum(void)
 
 	/*
 	 * Compute the multixact age for which freezing is urgent.  This is
-	 * normally autovacuum_multixact_freeze_max_age, but may be less if we
-	 * are short of multixact member space.
+	 * normally autovacuum_multixact_freeze_max_age, but may be less if we are
+	 * short of multixact member space.
 	 */
 	effective_multixact_freeze_max_age = MultiXactMemberFreezeThreshold();
 
@@ -2782,7 +2782,7 @@ relation_needs_vacanalyze(Oid relid,
 static void
 autovacuum_do_vac_analyze(autovac_table *tab, BufferAccessStrategy bstrategy)
 {
-	RangeVar		rangevar;
+	RangeVar	rangevar;
 
 	/* Set up command parameters --- use local variables instead of palloc */
 	MemSet(&rangevar, 0, sizeof(rangevar));
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index 377733377be..f57224c10fe 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -254,15 +254,15 @@ BackgroundWorkerStateChange(void)
 		}
 
 		/*
-		 * If the worker is marked for termination, we don't need to add it
-		 * to the registered workers list; we can just free the slot.
-		 * However, if bgw_notify_pid is set, the process that registered the
-		 * worker may need to know that we've processed the terminate request,
-		 * so be sure to signal it.
+		 * If the worker is marked for termination, we don't need to add it to
+		 * the registered workers list; we can just free the slot. However, if
+		 * bgw_notify_pid is set, the process that registered the worker may
+		 * need to know that we've processed the terminate request, so be sure
+		 * to signal it.
 		 */
 		if (slot->terminate)
 		{
-			int	notify_pid;
+			int			notify_pid;
 
 			/*
 			 * We need a memory barrier here to make sure that the load of
@@ -426,7 +426,7 @@ BackgroundWorkerStopNotifications(pid_t pid)
 void
 ResetBackgroundWorkerCrashTimes(void)
 {
-	slist_mutable_iter	iter;
+	slist_mutable_iter iter;
 
 	slist_foreach_modify(iter, &BackgroundWorkerList)
 	{
@@ -435,8 +435,8 @@ ResetBackgroundWorkerCrashTimes(void)
 		rw = slist_container(RegisteredBgWorker, rw_lnode, iter.cur);
 
 		/*
-		 * For workers that should not be restarted, we don't want to lose
-		 * the information that they have crashed; otherwise, they would be
+		 * For workers that should not be restarted, we don't want to lose the
+		 * information that they have crashed; otherwise, they would be
 		 * restarted, which is wrong.
 		 */
 		if (rw->rw_worker.bgw_restart_time != BGW_NEVER_RESTART)
@@ -679,7 +679,8 @@ StartBackgroundWorker(void)
 		/*
 		 * Early initialization.  Some of this could be useful even for
 		 * background workers that aren't using shared memory, but they can
-		 * call the individual startup routines for those subsystems if needed.
+		 * call the individual startup routines for those subsystems if
+		 * needed.
 		 */
 		BaseInit();
 
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 1e6073abca4..e9fbc381cc9 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -2580,7 +2580,7 @@ CreateSharedBackendStatus(void)
 		buffer = (char *) BackendSslStatusBuffer;
 		for (i = 0; i < MaxBackends; i++)
 		{
-			BackendStatusArray[i].st_sslstatus = (PgBackendSSLStatus *)buffer;
+			BackendStatusArray[i].st_sslstatus = (PgBackendSSLStatus *) buffer;
 			buffer += sizeof(PgBackendSSLStatus);
 		}
 	}
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 87f543031ac..ee0b01820b1 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2603,7 +2603,7 @@ reaper(SIGNAL_ARGS)
 			if (EXIT_STATUS_3(exitstatus))
 			{
 				ereport(LOG,
-					(errmsg("shutdown at recovery target")));
+						(errmsg("shutdown at recovery target")));
 				Shutdown = SmartShutdown;
 				TerminateChildren(SIGTERM);
 				pmState = PM_WAIT_BACKENDS;
@@ -2930,9 +2930,9 @@ CleanupBackgroundWorker(int pid,
 		}
 
 		/*
-		 * We must release the postmaster child slot whether this worker
-		 * is connected to shared memory or not, but we only treat it as
-		 * a crash if it is in fact connected.
+		 * We must release the postmaster child slot whether this worker is
+		 * connected to shared memory or not, but we only treat it as a crash
+		 * if it is in fact connected.
 		 */
 		if (!ReleasePostmasterChildSlot(rw->rw_child_slot) &&
 			(rw->rw_worker.bgw_flags & BGWORKER_SHMEM_ACCESS) != 0)
diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c
index 4c1460cb1c4..fa29624667e 100644
--- a/src/backend/replication/basebackup.c
+++ b/src/backend/replication/basebackup.c
@@ -51,7 +51,7 @@ typedef struct
 
 
 static int64 sendDir(char *path, int basepathlen, bool sizeonly,
-					 List *tablespaces, bool sendtblspclinks);
+		List *tablespaces, bool sendtblspclinks);
 static bool sendFile(char *readfilename, char *tarfilename,
 		 struct stat * statbuf, bool missing_ok);
 static void sendFileWithContent(const char *filename, const char *content);
@@ -130,11 +130,12 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
 								  &labelfile, tblspcdir, &tablespaces,
 								  &tblspc_map_file,
 								  opt->progress, opt->sendtblspcmapfile);
+
 	/*
 	 * Once do_pg_start_backup has been called, ensure that any failure causes
-	 * us to abort the backup so we don't "leak" a backup counter. For this reason,
-	 * *all* functionality between do_pg_start_backup() and do_pg_stop_backup()
-	 * should be inside the error cleanup block!
+	 * us to abort the backup so we don't "leak" a backup counter. For this
+	 * reason, *all* functionality between do_pg_start_backup() and
+	 * do_pg_stop_backup() should be inside the error cleanup block!
 	 */
 
 	PG_ENSURE_ERROR_CLEANUP(base_backup_cleanup, (Datum) 0);
@@ -145,8 +146,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
 		SendXlogRecPtrResult(startptr, starttli);
 
 		/*
-		 * Calculate the relative path of temporary statistics directory in order
-		 * to skip the files which are located in that directory later.
+		 * Calculate the relative path of temporary statistics directory in
+		 * order to skip the files which are located in that directory later.
 		 */
 		if (is_absolute_path(pgstat_stat_directory) &&
 			strncmp(pgstat_stat_directory, DataDir, datadirpathlen) == 0)
@@ -900,8 +901,8 @@ sendDir(char *path, int basepathlen, bool sizeonly, List *tablespaces,
 		/*
 		 * If there's a backup_label or tablespace_map file, it belongs to a
 		 * backup started by the user with pg_start_backup(). It is *not*
-		 * correct for this backup, our backup_label/tablespace_map is injected
-		 * into the tar separately.
+		 * correct for this backup, our backup_label/tablespace_map is
+		 * injected into the tar separately.
 		 */
 		if (strcmp(de->d_name, BACKUP_LABEL_FILE) == 0)
 			continue;
@@ -1226,8 +1227,8 @@ _tarWriteHeader(const char *filename, const char *linktarget,
 	enum tarError rc;
 
 	rc = tarCreateHeader(h, filename, linktarget, statbuf->st_size,
-					statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
-					statbuf->st_mtime);
+						 statbuf->st_mode, statbuf->st_uid, statbuf->st_gid,
+						 statbuf->st_mtime);
 
 	switch (rc)
 	{
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index 19dc9efedd4..b7bbcf6ee77 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -89,15 +89,15 @@ _PG_init(void)
 static void
 libpqrcv_connect(char *conninfo)
 {
-	const char	*keys[5];
-	const char	*vals[5];
+	const char *keys[5];
+	const char *vals[5];
 
 	/*
-	 * We use the expand_dbname parameter to process the connection string
-	 * (or URI), and pass some extra options. The deliberately undocumented
-	 * parameter "replication=true" makes it a replication connection.
-	 * The database name is ignored by the server in replication mode, but
-	 * specify "replication" for .pgpass lookup.
+	 * We use the expand_dbname parameter to process the connection string (or
+	 * URI), and pass some extra options. The deliberately undocumented
+	 * parameter "replication=true" makes it a replication connection. The
+	 * database name is ignored by the server in replication mode, but specify
+	 * "replication" for .pgpass lookup.
 	 */
 	keys[0] = "dbname";
 	vals[0] = conninfo;
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index ea388182692..c629da317eb 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -67,9 +67,9 @@ static void DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf
 static void DecodeSpecConfirm(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
 
 static void DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
-						 xl_xact_parsed_commit *parsed, TransactionId xid);
+			 xl_xact_parsed_commit *parsed, TransactionId xid);
 static void DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
-						xl_xact_parsed_abort *parsed, TransactionId xid);
+			xl_xact_parsed_abort *parsed, TransactionId xid);
 
 /* common function to decode tuples */
 static void DecodeXLogTuple(char *data, Size len, ReorderBufferTupleBuf *tup);
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index ed78e36192a..824bc915b1d 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -234,7 +234,7 @@ CreateInitDecodingContext(char *plugin,
 	if (slot->data.database == InvalidOid)
 		ereport(ERROR,
 				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-				 errmsg("cannot use physical replication slot for logical decoding")));
+		errmsg("cannot use physical replication slot for logical decoding")));
 
 	if (slot->data.database != MyDatabaseId)
 		ereport(ERROR,
@@ -726,7 +726,7 @@ filter_by_origin_cb_wrapper(LogicalDecodingContext *ctx, RepOriginId origin_id)
 {
 	LogicalErrorCallbackState state;
 	ErrorContextCallback errcallback;
-	bool ret;
+	bool		ret;
 
 	/* Push callback + info on the error context stack */
 	state.ctx = ctx;
diff --git a/src/backend/replication/logical/logicalfuncs.c b/src/backend/replication/logical/logicalfuncs.c
index 021de837bed..a354a3f819f 100644
--- a/src/backend/replication/logical/logicalfuncs.c
+++ b/src/backend/replication/logical/logicalfuncs.c
@@ -400,7 +400,7 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin
 		 * what we need.
 		 */
 		if (!binary &&
-			ctx->options.output_type != OUTPUT_PLUGIN_TEXTUAL_OUTPUT)
+			ctx->options.output_type !=OUTPUT_PLUGIN_TEXTUAL_OUTPUT)
 			ereport(ERROR,
 					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
 					 errmsg("logical decoding output plugin \"%s\" produces binary output, but \"%s\" expects textual data",
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index b4b98a512e1..f4ba86e8369 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -13,7 +13,7 @@
  * This file provides the following:
  * * An infrastructure to name nodes in a replication setup
  * * A facility to efficiently store and persist replication progress in an
- *   efficient and durable manner.
+ *	 efficient and durable manner.
  *
  * Replication origin consist out of a descriptive, user defined, external
  * name and a short, thus space efficient, internal 2 byte one. This split
@@ -45,22 +45,22 @@
  * There are several levels of locking at work:
  *
  * * To create and drop replication origins an exclusive lock on
- *   pg_replication_slot is required for the duration. That allows us to
- *   safely and conflict free assign new origins using a dirty snapshot.
+ *	 pg_replication_slot is required for the duration. That allows us to
+ *	 safely and conflict free assign new origins using a dirty snapshot.
  *
  * * When creating an in-memory replication progress slot the ReplicationOirgin
- *   LWLock has to be held exclusively; when iterating over the replication
- *   progress a shared lock has to be held, the same when advancing the
- *   replication progress of an individual backend that has not setup as the
- *   session's replication origin.
+ *	 LWLock has to be held exclusively; when iterating over the replication
+ *	 progress a shared lock has to be held, the same when advancing the
+ *	 replication progress of an individual backend that has not setup as the
+ *	 session's replication origin.
  *
  * * When manipulating or looking at the remote_lsn and local_lsn fields of a
- *   replication progress slot that slot's lwlock has to be held. That's
- *   primarily because we do not assume 8 byte writes (the LSN) is atomic on
- *   all our platforms, but it also simplifies memory ordering concerns
- *   between the remote and local lsn. We use a lwlock instead of a spinlock
- *   so it's less harmful to hold the lock over a WAL write
- *   (c.f. AdvanceReplicationProgress).
+ *	 replication progress slot that slot's lwlock has to be held. That's
+ *	 primarily because we do not assume 8 byte writes (the LSN) is atomic on
+ *	 all our platforms, but it also simplifies memory ordering concerns
+ *	 between the remote and local lsn. We use a lwlock instead of a spinlock
+ *	 so it's less harmful to hold the lock over a WAL write
+ *	 (c.f. AdvanceReplicationProgress).
  *
  * ---------------------------------------------------------------------------
  */
@@ -105,7 +105,7 @@ typedef struct ReplicationState
 	/*
 	 * Local identifier for the remote node.
 	 */
-	RepOriginId	roident;
+	RepOriginId roident;
 
 	/*
 	 * Location of the latest commit from the remote side.
@@ -135,22 +135,22 @@ typedef struct ReplicationState
  */
 typedef struct ReplicationStateOnDisk
 {
-	RepOriginId	roident;
+	RepOriginId roident;
 	XLogRecPtr	remote_lsn;
 } ReplicationStateOnDisk;
 
 
 typedef struct ReplicationStateCtl
 {
-	int					tranche_id;
-	LWLockTranche		tranche;
-	ReplicationState	states[FLEXIBLE_ARRAY_MEMBER];
+	int			tranche_id;
+	LWLockTranche tranche;
+	ReplicationState states[FLEXIBLE_ARRAY_MEMBER];
 } ReplicationStateCtl;
 
 /* external variables */
-RepOriginId	replorigin_sesssion_origin = InvalidRepOriginId; /* assumed identity */
+RepOriginId replorigin_sesssion_origin = InvalidRepOriginId;	/* assumed identity */
 XLogRecPtr	replorigin_sesssion_origin_lsn = InvalidXLogRecPtr;
-TimestampTz	replorigin_sesssion_origin_timestamp = 0;
+TimestampTz replorigin_sesssion_origin_timestamp = 0;
 
 /*
  * Base address into a shared memory array of replication states of size
@@ -188,7 +188,7 @@ replorigin_check_prerequisites(bool check_slots, bool recoveryOK)
 	if (!recoveryOK && RecoveryInProgress())
 		ereport(ERROR,
 				(errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
-				 errmsg("cannot manipulate replication origins during recovery")));
+		   errmsg("cannot manipulate replication origins during recovery")));
 
 }
 
@@ -207,9 +207,9 @@ RepOriginId
 replorigin_by_name(char *roname, bool missing_ok)
 {
 	Form_pg_replication_origin ident;
-	Oid		roident = InvalidOid;
-	HeapTuple tuple;
-	Datum	roname_d;
+	Oid			roident = InvalidOid;
+	HeapTuple	tuple;
+	Datum		roname_d;
 
 	roname_d = CStringGetTextDatum(roname);
 
@@ -235,10 +235,10 @@ replorigin_by_name(char *roname, bool missing_ok)
 RepOriginId
 replorigin_create(char *roname)
 {
-	Oid		roident;
-	HeapTuple tuple = NULL;
-	Relation rel;
-	Datum	roname_d;
+	Oid			roident;
+	HeapTuple	tuple = NULL;
+	Relation	rel;
+	Datum		roname_d;
 	SnapshotData SnapshotDirty;
 	SysScanDesc scan;
 	ScanKeyData key;
@@ -271,6 +271,7 @@ replorigin_create(char *roname)
 		bool		nulls[Natts_pg_replication_origin];
 		Datum		values[Natts_pg_replication_origin];
 		bool		collides;
+
 		CHECK_FOR_INTERRUPTS();
 
 		ScanKeyInit(&key,
@@ -279,7 +280,7 @@ replorigin_create(char *roname)
 					ObjectIdGetDatum(roident));
 
 		scan = systable_beginscan(rel, ReplicationOriginIdentIndex,
-								  true /* indexOK */,
+								  true /* indexOK */ ,
 								  &SnapshotDirty,
 								  1, &key);
 
@@ -295,7 +296,7 @@ replorigin_create(char *roname)
 			 */
 			memset(&nulls, 0, sizeof(nulls));
 
-			values[Anum_pg_replication_origin_roident -1] = ObjectIdGetDatum(roident);
+			values[Anum_pg_replication_origin_roident - 1] = ObjectIdGetDatum(roident);
 			values[Anum_pg_replication_origin_roname - 1] = roname_d;
 
 			tuple = heap_form_tuple(RelationGetDescr(rel), values, nulls);
@@ -306,7 +307,7 @@ replorigin_create(char *roname)
 		}
 	}
 
-	/* now release lock again,  */
+	/* now release lock again,	*/
 	heap_close(rel, ExclusiveLock);
 
 	if (tuple == NULL)
@@ -327,8 +328,8 @@ replorigin_create(char *roname)
 void
 replorigin_drop(RepOriginId roident)
 {
-	HeapTuple tuple = NULL;
-	Relation rel;
+	HeapTuple	tuple = NULL;
+	Relation	rel;
 	int			i;
 
 	Assert(IsTransactionState());
@@ -379,7 +380,7 @@ replorigin_drop(RepOriginId roident)
 
 	CommandCounterIncrement();
 
-	/* now release lock again,  */
+	/* now release lock again,	*/
 	heap_close(rel, ExclusiveLock);
 }
 
@@ -394,7 +395,7 @@ replorigin_drop(RepOriginId roident)
 bool
 replorigin_by_oid(RepOriginId roident, bool missing_ok, char **roname)
 {
-	HeapTuple tuple;
+	HeapTuple	tuple;
 	Form_pg_replication_origin ric;
 
 	Assert(OidIsValid((Oid) roident));
@@ -446,7 +447,7 @@ ReplicationOriginShmemSize(void)
 	size = add_size(size, offsetof(ReplicationStateCtl, states));
 
 	size = add_size(size,
-					mul_size(max_replication_slots, sizeof(ReplicationState)));
+				  mul_size(max_replication_slots, sizeof(ReplicationState)));
 	return size;
 }
 
@@ -462,11 +463,11 @@ ReplicationOriginShmemInit(void)
 		ShmemInitStruct("ReplicationOriginState",
 						ReplicationOriginShmemSize(),
 						&found);
-	replication_states =	replication_states_ctl->states;
+	replication_states = replication_states_ctl->states;
 
 	if (!found)
 	{
-		int i;
+		int			i;
 
 		replication_states_ctl->tranche_id = LWLockNewTrancheId();
 		replication_states_ctl->tranche.name = "ReplicationOrigins";
@@ -556,7 +557,7 @@ CheckPointReplicationOrigin(void)
 	{
 		ReplicationStateOnDisk disk_state;
 		ReplicationState *curstate = &replication_states[i];
-		XLogRecPtr local_lsn;
+		XLogRecPtr	local_lsn;
 
 		if (curstate->roident == InvalidRepOriginId)
 			continue;
@@ -636,16 +637,17 @@ void
 StartupReplicationOrigin(void)
 {
 	const char *path = "pg_logical/replorigin_checkpoint";
-	int fd;
-	int readBytes;
-	uint32 magic = REPLICATION_STATE_MAGIC;
-	int last_state = 0;
-	pg_crc32c file_crc;
-	pg_crc32c crc;
+	int			fd;
+	int			readBytes;
+	uint32		magic = REPLICATION_STATE_MAGIC;
+	int			last_state = 0;
+	pg_crc32c	file_crc;
+	pg_crc32c	crc;
 
 	/* don't want to overwrite already existing state */
 #ifdef USE_ASSERT_CHECKING
 	static bool already_started = false;
+
 	Assert(!already_started);
 	already_started = true;
 #endif
@@ -660,8 +662,8 @@ StartupReplicationOrigin(void)
 	fd = OpenTransientFile((char *) path, O_RDONLY | PG_BINARY, 0);
 
 	/*
-	 * might have had max_replication_slots == 0 last run, or we just brought up a
-	 * standby.
+	 * might have had max_replication_slots == 0 last run, or we just brought
+	 * up a standby.
 	 */
 	if (fd < 0 && errno == ENOENT)
 		return;
@@ -681,8 +683,8 @@ StartupReplicationOrigin(void)
 
 	if (magic != REPLICATION_STATE_MAGIC)
 		ereport(PANIC,
-				(errmsg("replication checkpoint has wrong magic %u instead of %u",
-						magic, REPLICATION_STATE_MAGIC)));
+		   (errmsg("replication checkpoint has wrong magic %u instead of %u",
+				   magic, REPLICATION_STATE_MAGIC)));
 
 	/* we can skip locking here, no other access is possible */
 
@@ -697,7 +699,7 @@ StartupReplicationOrigin(void)
 		if (readBytes == sizeof(crc))
 		{
 			/* not pretty, but simple ... */
-			file_crc = *(pg_crc32c*) &disk_state;
+			file_crc = *(pg_crc32c *) &disk_state;
 			break;
 		}
 
@@ -731,8 +733,8 @@ StartupReplicationOrigin(void)
 
 		elog(LOG, "recovered replication state of node %u to %X/%X",
 			 disk_state.roident,
-			 (uint32)(disk_state.remote_lsn >> 32),
-			 (uint32)disk_state.remote_lsn);
+			 (uint32) (disk_state.remote_lsn >> 32),
+			 (uint32) disk_state.remote_lsn);
 	}
 
 	/* now check checksum */
@@ -756,18 +758,18 @@ replorigin_redo(XLogReaderState *record)
 		case XLOG_REPLORIGIN_SET:
 			{
 				xl_replorigin_set *xlrec =
-					(xl_replorigin_set *) XLogRecGetData(record);
+				(xl_replorigin_set *) XLogRecGetData(record);
 
 				replorigin_advance(xlrec->node_id,
 								   xlrec->remote_lsn, record->EndRecPtr,
-								   xlrec->force /* backward */,
-								   false /* WAL log */);
+								   xlrec->force /* backward */ ,
+								   false /* WAL log */ );
 				break;
 			}
 		case XLOG_REPLORIGIN_DROP:
 			{
 				xl_replorigin_drop *xlrec;
-				int i;
+				int			i;
 
 				xlrec = (xl_replorigin_drop *) XLogRecGetData(record);
 
@@ -812,7 +814,7 @@ replorigin_advance(RepOriginId node,
 				   XLogRecPtr remote_commit, XLogRecPtr local_commit,
 				   bool go_backward, bool wal_log)
 {
-	int i;
+	int			i;
 	ReplicationState *replication_state = NULL;
 	ReplicationState *free_state = NULL;
 
@@ -899,6 +901,7 @@ replorigin_advance(RepOriginId node,
 	if (wal_log)
 	{
 		xl_replorigin_set xlrec;
+
 		xlrec.remote_lsn = remote_commit;
 		xlrec.node_id = node;
 		xlrec.force = go_backward;
@@ -911,8 +914,8 @@ replorigin_advance(RepOriginId node,
 
 	/*
 	 * Due to - harmless - race conditions during a checkpoint we could see
-	 * values here that are older than the ones we already have in
-	 * memory. Don't overwrite those.
+	 * values here that are older than the ones we already have in memory.
+	 * Don't overwrite those.
 	 */
 	if (go_backward || replication_state->remote_lsn < remote_commit)
 		replication_state->remote_lsn = remote_commit;
@@ -973,7 +976,6 @@ replorigin_get_progress(RepOriginId node, bool flush)
 static void
 ReplicationOriginExitCleanup(int code, Datum arg)
 {
-
 	LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
 
 	if (session_replication_state != NULL &&
@@ -1000,8 +1002,8 @@ void
 replorigin_session_setup(RepOriginId node)
 {
 	static bool registered_cleanup;
-	int		i;
-	int		free_slot = -1;
+	int			i;
+	int			free_slot = -1;
 
 	if (!registered_cleanup)
 	{
@@ -1014,7 +1016,7 @@ replorigin_session_setup(RepOriginId node)
 	if (session_replication_state != NULL)
 		ereport(ERROR,
 				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-				 errmsg("cannot setup replication origin when one is already setup")));
+		errmsg("cannot setup replication origin when one is already setup")));
 
 	/* Lock exclusively, as we may have to create a new table entry. */
 	LWLockAcquire(ReplicationOriginLock, LW_EXCLUSIVE);
@@ -1043,8 +1045,8 @@ replorigin_session_setup(RepOriginId node)
 		{
 			ereport(ERROR,
 					(errcode(ERRCODE_OBJECT_IN_USE),
-					 errmsg("replication identiefer %d is already active for pid %d",
-							curstate->roident, curstate->acquired_by)));
+			 errmsg("replication identiefer %d is already active for pid %d",
+					curstate->roident, curstate->acquired_by)));
 		}
 
 		/* ok, found slot */
@@ -1126,8 +1128,8 @@ replorigin_session_advance(XLogRecPtr remote_commit, XLogRecPtr local_commit)
 XLogRecPtr
 replorigin_session_get_progress(bool flush)
 {
-	XLogRecPtr remote_lsn;
-	XLogRecPtr local_lsn;
+	XLogRecPtr	remote_lsn;
+	XLogRecPtr	local_lsn;
 
 	Assert(session_replication_state != NULL);
 
@@ -1158,7 +1160,7 @@ replorigin_session_get_progress(bool flush)
 Datum
 pg_replication_origin_create(PG_FUNCTION_ARGS)
 {
-	char *name;
+	char	   *name;
 	RepOriginId roident;
 
 	replorigin_check_prerequisites(false, false);
@@ -1177,7 +1179,7 @@ pg_replication_origin_create(PG_FUNCTION_ARGS)
 Datum
 pg_replication_origin_drop(PG_FUNCTION_ARGS)
 {
-	char *name;
+	char	   *name;
 	RepOriginId roident;
 
 	replorigin_check_prerequisites(false, false);
@@ -1200,7 +1202,7 @@ pg_replication_origin_drop(PG_FUNCTION_ARGS)
 Datum
 pg_replication_origin_oid(PG_FUNCTION_ARGS)
 {
-	char *name;
+	char	   *name;
 	RepOriginId roident;
 
 	replorigin_check_prerequisites(false, false);
@@ -1221,7 +1223,7 @@ pg_replication_origin_oid(PG_FUNCTION_ARGS)
 Datum
 pg_replication_origin_session_setup(PG_FUNCTION_ARGS)
 {
-	char *name;
+	char	   *name;
 	RepOriginId origin;
 
 	replorigin_check_prerequisites(true, false);
@@ -1329,8 +1331,8 @@ Datum
 pg_replication_origin_advance(PG_FUNCTION_ARGS)
 {
 	text	   *name = PG_GETARG_TEXT_P(0);
-	XLogRecPtr remote_commit = PG_GETARG_LSN(1);
-	RepOriginId  node;
+	XLogRecPtr	remote_commit = PG_GETARG_LSN(1);
+	RepOriginId node;
 
 	replorigin_check_prerequisites(true, false);
 
@@ -1345,7 +1347,7 @@ pg_replication_origin_advance(PG_FUNCTION_ARGS)
 	 * set up the initial replication state, but not for replay.
 	 */
 	replorigin_advance(node, remote_commit, InvalidXLogRecPtr,
-					   true /* go backward */, true /* wal log */);
+					   true /* go backward */ , true /* wal log */ );
 
 	UnlockRelationOid(ReplicationOriginRelationId, RowExclusiveLock);
 
@@ -1365,7 +1367,7 @@ pg_replication_origin_progress(PG_FUNCTION_ARGS)
 {
 	char	   *name;
 	bool		flush;
-	RepOriginId	roident;
+	RepOriginId roident;
 	XLogRecPtr	remote_lsn = InvalidXLogRecPtr;
 
 	replorigin_check_prerequisites(true, true);
@@ -1456,7 +1458,7 @@ pg_show_replication_origin_status(PG_FUNCTION_ARGS)
 		 * silently accept that it might be gone.
 		 */
 		if (replorigin_by_oid(state->roident, true,
-							 &roname))
+							  &roname))
 		{
 			values[1] = CStringGetTextDatum(roname);
 			nulls[1] = false;
@@ -1464,7 +1466,7 @@ pg_show_replication_origin_status(PG_FUNCTION_ARGS)
 
 		LWLockAcquire(&state->lock, LW_SHARED);
 
-		values[ 2] = LSNGetDatum(state->remote_lsn);
+		values[2] = LSNGetDatum(state->remote_lsn);
 		nulls[2] = false;
 
 		values[3] = LSNGetDatum(state->local_lsn);
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 2d86323f6f4..fa98580302a 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -1337,6 +1337,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
 			switch (change->action)
 			{
 				case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM:
+
 					/*
 					 * Confirmation for speculative insertion arrived. Simply
 					 * use as a normal record. It'll be cleaned up at the end
@@ -1380,10 +1381,10 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
 						goto change_done;
 
 					/*
-					 * For now ignore sequence changes entirely. Most of
-					 * the time they don't log changes using records we
-					 * understand, so it doesn't make sense to handle the
-					 * few cases we do.
+					 * For now ignore sequence changes entirely. Most of the
+					 * time they don't log changes using records we
+					 * understand, so it doesn't make sense to handle the few
+					 * cases we do.
 					 */
 					if (relation->rd_rel->relkind == RELKIND_SEQUENCE)
 						goto change_done;
@@ -1395,9 +1396,9 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
 						rb->apply_change(rb, txn, relation, change);
 
 						/*
-						 * Only clear reassembled toast chunks if we're
-						 * sure they're not required anymore. The creator
-						 * of the tuple tells us.
+						 * Only clear reassembled toast chunks if we're sure
+						 * they're not required anymore. The creator of the
+						 * tuple tells us.
 						 */
 						if (change->data.tp.clear_toast_afterwards)
 							ReorderBufferToastReset(rb, txn);
@@ -1418,7 +1419,8 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
 													  change);
 					}
 
-				change_done:
+			change_done:
+
 					/*
 					 * Either speculative insertion was confirmed, or it was
 					 * unsuccessful and the record isn't needed anymore.
@@ -1437,6 +1439,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
 					break;
 
 				case REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT:
+
 					/*
 					 * Speculative insertions are dealt with by delaying the
 					 * processing of the insert until the confirmation record
@@ -1704,9 +1707,9 @@ ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
 	txn->final_lsn = lsn;
 
 	/*
-	 * Process cache invalidation messages if there are any. Even if we're
-	 * not interested in the transaction's contents, it could have manipulated
-	 * the catalog and we need to update the caches according to that.
+	 * Process cache invalidation messages if there are any. Even if we're not
+	 * interested in the transaction's contents, it could have manipulated the
+	 * catalog and we need to update the caches according to that.
 	 */
 	if (txn->base_snapshot != NULL && txn->ninvalidations > 0)
 	{
@@ -2068,7 +2071,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
 
 	switch (change->action)
 	{
-		/* fall through these, they're all similar enough */
+			/* fall through these, they're all similar enough */
 		case REORDER_BUFFER_CHANGE_INSERT:
 		case REORDER_BUFFER_CHANGE_UPDATE:
 		case REORDER_BUFFER_CHANGE_DELETE:
@@ -2322,7 +2325,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
 	/* restore individual stuff */
 	switch (change->action)
 	{
-		/* fall through these, they're all similar enough */
+			/* fall through these, they're all similar enough */
 		case REORDER_BUFFER_CHANGE_INSERT:
 		case REORDER_BUFFER_CHANGE_UPDATE:
 		case REORDER_BUFFER_CHANGE_DELETE:
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 35e1c06a31b..efab4ca0df1 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -153,9 +153,8 @@ struct SnapBuild
 	TransactionId xmax;
 
 	/*
-	 * Don't replay commits from an LSN < this LSN. This can be set
-	 * externally but it will also be advanced (never retreat) from within
-	 * snapbuild.c.
+	 * Don't replay commits from an LSN < this LSN. This can be set externally
+	 * but it will also be advanced (never retreat) from within snapbuild.c.
 	 */
 	XLogRecPtr	start_decoding_at;
 
@@ -244,7 +243,7 @@ struct SnapBuild
  * removes knowledge about the previously used resowner, so we save it here.
  */
 static ResourceOwner SavedResourceOwnerDuringExport = NULL;
-static bool	ExportInProgress = false;
+static bool ExportInProgress = false;
 
 /* transaction state manipulation functions */
 static void SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid);
@@ -599,7 +598,7 @@ SnapBuildExportSnapshot(SnapBuild *builder)
 
 	ereport(LOG,
 			(errmsg_plural("exported logical decoding snapshot: \"%s\" with %u transaction ID",
-						   "exported logical decoding snapshot: \"%s\" with %u transaction IDs",
+		"exported logical decoding snapshot: \"%s\" with %u transaction IDs",
 						   snap->xcnt,
 						   snapname, snap->xcnt)));
 	return snapname;
@@ -904,8 +903,8 @@ SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid)
 			ereport(LOG,
 				  (errmsg("logical decoding found consistent point at %X/%X",
 						  (uint32) (lsn >> 32), (uint32) lsn),
-				errdetail("Transaction ID %u finished; no more running transactions.",
-						  xid)));
+				   errdetail("Transaction ID %u finished; no more running transactions.",
+							 xid)));
 			builder->state = SNAPBUILD_CONSISTENT;
 		}
 	}
@@ -1232,8 +1231,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
 	{
 		ereport(DEBUG1,
 				(errmsg_internal("skipping snapshot at %X/%X while building logical decoding snapshot, xmin horizon too low",
-						(uint32) (lsn >> 32), (uint32) lsn),
-				 errdetail_internal("initial xmin horizon of %u vs the snapshot's %u",
+								 (uint32) (lsn >> 32), (uint32) lsn),
+		errdetail_internal("initial xmin horizon of %u vs the snapshot's %u",
 				 builder->initial_xmin_horizon, running->oldestRunningXid)));
 		return true;
 	}
@@ -1252,8 +1251,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
 			builder->start_decoding_at = lsn + 1;
 
 		/* As no transactions were running xmin/xmax can be trivially set. */
-		builder->xmin = running->nextXid; /* < are finished */
-		builder->xmax = running->nextXid; /* >= are running */
+		builder->xmin = running->nextXid;		/* < are finished */
+		builder->xmax = running->nextXid;		/* >= are running */
 
 		/* so we can safely use the faster comparisons */
 		Assert(TransactionIdIsNormal(builder->xmin));
@@ -1302,8 +1301,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
 		 * currently running transactions have finished. We'll update both
 		 * while waiting for the pending transactions to finish.
 		 */
-		builder->xmin = running->nextXid; /* < are finished */
-		builder->xmax = running->nextXid;  /* >= are running */
+		builder->xmin = running->nextXid;		/* < are finished */
+		builder->xmax = running->nextXid;		/* >= are running */
 
 		/* so we can safely use the faster comparisons */
 		Assert(TransactionIdIsNormal(builder->xmin));
@@ -1688,7 +1687,7 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn)
 
 	INIT_CRC32C(checksum);
 	COMP_CRC32C(checksum,
-			   ((char *) &ondisk) + SnapBuildOnDiskNotChecksummedSize,
+				((char *) &ondisk) + SnapBuildOnDiskNotChecksummedSize,
 			SnapBuildOnDiskConstantSize - SnapBuildOnDiskNotChecksummedSize);
 
 	/* read SnapBuild */
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index e02571b8bbc..060343f1689 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -84,7 +84,7 @@ typedef struct ReplicationSlotOnDisk
 	sizeof(ReplicationSlotOnDisk) - ReplicationSlotOnDiskConstantSize
 
 #define SLOT_MAGIC		0x1051CA1		/* format identifier */
-#define SLOT_VERSION	2				/* version for new files */
+#define SLOT_VERSION	2		/* version for new files */
 
 /* Control array for replication slot management */
 ReplicationSlotCtlData *ReplicationSlotCtl = NULL;
@@ -349,8 +349,8 @@ ReplicationSlotAcquire(const char *name)
 	if (active_pid != 0)
 		ereport(ERROR,
 				(errcode(ERRCODE_OBJECT_IN_USE),
-				 errmsg("replication slot \"%s\" is already active for pid %d",
-						name, active_pid)));
+			   errmsg("replication slot \"%s\" is already active for pid %d",
+					  name, active_pid)));
 
 	/* We made this slot active, so it's ours now. */
 	MyReplicationSlot = slot;
diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c
index 3d9aadbd839..9a2793f7ec3 100644
--- a/src/backend/replication/slotfuncs.c
+++ b/src/backend/replication/slotfuncs.c
@@ -99,9 +99,9 @@ pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
 	CheckLogicalDecodingRequirements();
 
 	/*
-	 * Acquire a logical decoding slot, this will check for conflicting
-	 * names. Initially create it as ephemeral - that allows us to nicely
-	 * handle errors during initialization because it'll get dropped if this
+	 * Acquire a logical decoding slot, this will check for conflicting names.
+	 * Initially create it as ephemeral - that allows us to nicely handle
+	 * errors during initialization because it'll get dropped if this
 	 * transaction fails. We'll make it persistent at the end.
 	 */
 	ReplicationSlotCreate(NameStr(*name), true, RS_EPHEMERAL);
diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c
index b26f5fcf637..f77a790fd87 100644
--- a/src/backend/replication/walreceiverfuncs.c
+++ b/src/backend/replication/walreceiverfuncs.c
@@ -329,7 +329,7 @@ GetReplicationApplyDelay(void)
 	long		secs;
 	int			usecs;
 
-	TimestampTz	chunckReplayStartTime;
+	TimestampTz chunckReplayStartTime;
 
 	SpinLockAcquire(&walrcv->mutex);
 	receivePtr = walrcv->receivedUpto;
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 4a20569e65e..eb1b89b9c32 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -781,6 +781,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
 	else
 	{
 		CheckLogicalDecodingRequirements();
+
 		/*
 		 * Initially create the slot as ephemeral - that allows us to nicely
 		 * handle errors during initialization because it'll get dropped if
@@ -1266,9 +1267,9 @@ exec_replication_command(const char *cmd_string)
 	MemoryContext old_context;
 
 	/*
-	 * Log replication command if log_replication_commands is enabled.
-	 * Even when it's disabled, log the command with DEBUG1 level for
-	 * backward compatibility.
+	 * Log replication command if log_replication_commands is enabled. Even
+	 * when it's disabled, log the command with DEBUG1 level for backward
+	 * compatibility.
 	 */
 	ereport(log_replication_commands ? LOG : DEBUG1,
 			(errmsg("received replication command: %s", cmd_string)));
@@ -2663,8 +2664,8 @@ WalSndWakeup(void)
 
 	for (i = 0; i < max_wal_senders; i++)
 	{
-		Latch *latch;
-		WalSnd *walsnd = &WalSndCtl->walsnds[i];
+		Latch	   *latch;
+		WalSnd	   *walsnd = &WalSndCtl->walsnds[i];
 
 		/*
 		 * Get latch pointer with spinlock held, for the unlikely case that
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index e15e23c2e1f..bbd6b77c5ea 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -682,7 +682,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
  * order of the original tlist's non-junk entries.  This is needed for
  * processing VALUES RTEs.
  */
-static List*
+static List *
 rewriteTargetListIU(List *targetList,
 					CmdType commandType,
 					Relation target_relation,
@@ -1750,8 +1750,8 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
 	/*
 	 * Apply any row level security policies.  We do this last because it
 	 * requires special recursion detection if the new quals have sublink
-	 * subqueries, and if we did it in the loop above query_tree_walker
-	 * would then recurse into those quals a second time.
+	 * subqueries, and if we did it in the loop above query_tree_walker would
+	 * then recurse into those quals a second time.
 	 */
 	rt_index = 0;
 	foreach(lc, parsetree->rtable)
@@ -1795,11 +1795,11 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
 
 				activeRIRs = lcons_oid(RelationGetRelid(rel), activeRIRs);
 
-				expression_tree_walker( (Node*) securityQuals,
-										fireRIRonSubLink, (void*)activeRIRs );
+				expression_tree_walker((Node *) securityQuals,
+									   fireRIRonSubLink, (void *) activeRIRs);
 
-				expression_tree_walker( (Node*) withCheckOptions,
-										fireRIRonSubLink, (void*)activeRIRs );
+				expression_tree_walker((Node *) withCheckOptions,
+									   fireRIRonSubLink, (void *) activeRIRs);
 
 				activeRIRs = list_delete_first(activeRIRs);
 			}
@@ -1814,7 +1814,7 @@ fireRIRrules(Query *parsetree, List *activeRIRs, bool forUpdatePushedDown)
 											 rte->securityQuals);
 
 			parsetree->withCheckOptions = list_concat(withCheckOptions,
-													  parsetree->withCheckOptions);
+												parsetree->withCheckOptions);
 		}
 
 		/*
@@ -2662,7 +2662,7 @@ rewriteTargetView(Query *parsetree, Relation view)
 
 				if (!tle->resjunk)
 					modified_cols = bms_add_member(modified_cols,
-								tle->resno - FirstLowInvalidHeapAttributeNumber);
+							tle->resno - FirstLowInvalidHeapAttributeNumber);
 			}
 		}
 
@@ -2797,8 +2797,8 @@ rewriteTargetView(Query *parsetree, Relation view)
 	 * happens in ordinary SELECT usage of a view: all referenced columns must
 	 * have read permission, even if optimization finds that some of them can
 	 * be discarded during query transformation.  The flattening we're doing
-	 * here is an optional optimization, too.  (If you are unpersuaded and want
-	 * to change this, note that applying adjust_view_column_set to
+	 * here is an optional optimization, too.  (If you are unpersuaded and
+	 * want to change this, note that applying adjust_view_column_set to
 	 * view_rte->selectedCols is clearly *not* the right answer, since that
 	 * neglects base-rel columns used in the view's WHERE quals.)
 	 *
@@ -3150,9 +3150,9 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
 
 				/* Process the main targetlist ... */
 				parsetree->targetList = rewriteTargetListIU(parsetree->targetList,
-															parsetree->commandType,
+													  parsetree->commandType,
 															rt_entry_relation,
-															parsetree->resultRelation,
+												   parsetree->resultRelation,
 															&attrnos);
 				/* ... and the VALUES expression lists */
 				rewriteValuesRTE(values_rte, rt_entry_relation, attrnos);
@@ -3334,9 +3334,9 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
 		if (parsetree->onConflict &&
 			(product_queries != NIL || hasUpdate) &&
 			!updatableview)
-				ereport(ERROR,
-						(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-						 errmsg("INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules")));
+			ereport(ERROR,
+					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+					 errmsg("INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules")));
 
 		heap_close(rt_entry_relation, NoLock);
 	}
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index e3dfdefe55c..1da90ff8943 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -729,7 +729,7 @@ IncrementVarSublevelsUp_walker(Node *node,
 	}
 	if (IsA(node, GroupingFunc))
 	{
-		GroupingFunc   *grp = (GroupingFunc *) node;
+		GroupingFunc *grp = (GroupingFunc *) node;
 
 		if (grp->agglevelsup >= context->min_sublevels_up)
 			grp->agglevelsup += context->delta_sublevels_up;
diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c
index 2c095ce88ac..5a2f696934a 100644
--- a/src/backend/rewrite/rowsecurity.c
+++ b/src/backend/rewrite/rowsecurity.c
@@ -1,6 +1,6 @@
 /*
  * rewrite/rowsecurity.c
- *    Routines to support policies for row level security (aka RLS).
+ *	  Routines to support policies for row level security (aka RLS).
  *
  * Policies in PostgreSQL provide a mechanism to limit what records are
  * returned to a user and what records a user is permitted to add to a table.
@@ -57,12 +57,12 @@
 #include "tcop/utility.h"
 
 static List *pull_row_security_policies(CmdType cmd, Relation relation,
-										Oid user_id);
-static void process_policies(Query* root, List *policies, int rt_index,
-							 Expr **final_qual,
-							 Expr **final_with_check_qual,
-							 bool *hassublinks,
-							 BoolExprType boolop);
+						   Oid user_id);
+static void process_policies(Query *root, List *policies, int rt_index,
+				 Expr **final_qual,
+				 Expr **final_with_check_qual,
+				 bool *hassublinks,
+				 BoolExprType boolop);
 static bool check_role_for_policy(ArrayType *policy_roles, Oid user_id);
 
 /*
@@ -77,8 +77,8 @@ static bool check_role_for_policy(ArrayType *policy_roles, Oid user_id);
  * See below where the hook is called in prepend_row_security_policies for
  * insight into how to use this hook.
  */
-row_security_policy_hook_type	row_security_policy_hook_permissive = NULL;
-row_security_policy_hook_type	row_security_policy_hook_restrictive = NULL;
+row_security_policy_hook_type row_security_policy_hook_permissive = NULL;
+row_security_policy_hook_type row_security_policy_hook_restrictive = NULL;
 
 /*
  * Get any row security quals and check quals that should be applied to the
@@ -89,27 +89,27 @@ row_security_policy_hook_type	row_security_policy_hook_restrictive = NULL;
  * set to true if any of the quals returned contain sublinks.
  */
 void
-get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
+get_row_security_policies(Query *root, CmdType commandType, RangeTblEntry *rte,
 						  int rt_index, List **securityQuals,
 						  List **withCheckOptions, bool *hasRowSecurity,
 						  bool *hasSubLinks)
 {
-	Expr			   *rowsec_expr = NULL;
-	Expr			   *rowsec_with_check_expr = NULL;
-	Expr			   *hook_expr_restrictive = NULL;
-	Expr			   *hook_with_check_expr_restrictive = NULL;
-	Expr			   *hook_expr_permissive = NULL;
-	Expr			   *hook_with_check_expr_permissive = NULL;
-
-	List			   *rowsec_policies;
-	List			   *hook_policies_restrictive = NIL;
-	List			   *hook_policies_permissive = NIL;
-
-	Relation 			rel;
-	Oid					user_id;
-	int					sec_context;
-	int					rls_status;
-	bool				defaultDeny = false;
+	Expr	   *rowsec_expr = NULL;
+	Expr	   *rowsec_with_check_expr = NULL;
+	Expr	   *hook_expr_restrictive = NULL;
+	Expr	   *hook_with_check_expr_restrictive = NULL;
+	Expr	   *hook_expr_permissive = NULL;
+	Expr	   *hook_with_check_expr_permissive = NULL;
+
+	List	   *rowsec_policies;
+	List	   *hook_policies_restrictive = NIL;
+	List	   *hook_policies_permissive = NIL;
+
+	Relation	rel;
+	Oid			user_id;
+	int			sec_context;
+	int			rls_status;
+	bool		defaultDeny = false;
 
 	/* Defaults for the return values */
 	*securityQuals = NIL;
@@ -124,9 +124,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 	user_id = rte->checkAsUser ? rte->checkAsUser : GetUserId();
 
 	/*
-	 * If this is not a normal relation, or we have been told
-	 * to explicitly skip RLS (perhaps because this is an FK check)
-	 * then just return immediately.
+	 * If this is not a normal relation, or we have been told to explicitly
+	 * skip RLS (perhaps because this is an FK check) then just return
+	 * immediately.
 	 */
 	if (rte->relid < FirstNormalObjectId
 		|| rte->relkind != RELKIND_RELATION
@@ -148,9 +148,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 	if (rls_status == RLS_NONE_ENV)
 	{
 		/*
-		 * Indicate that this query may involve RLS and must therefore
-		 * be replanned if the environment changes (GUCs, role), but we
-		 * are not adding anything here.
+		 * Indicate that this query may involve RLS and must therefore be
+		 * replanned if the environment changes (GUCs, role), but we are not
+		 * adding anything here.
 		 */
 		*hasRowSecurity = true;
 
@@ -166,15 +166,14 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 	/*
 	 * Check if this is only the default-deny policy.
 	 *
-	 * Normally, if the table has row security enabled but there are
-	 * no policies, we use a default-deny policy and not allow anything.
-	 * However, when an extension uses the hook to add their own
-	 * policies, we don't want to include the default deny policy or
-	 * there won't be any way for a user to use an extension exclusively
-	 * for the policies to be used.
+	 * Normally, if the table has row security enabled but there are no
+	 * policies, we use a default-deny policy and not allow anything. However,
+	 * when an extension uses the hook to add their own policies, we don't
+	 * want to include the default deny policy or there won't be any way for a
+	 * user to use an extension exclusively for the policies to be used.
 	 */
 	if (((RowSecurityPolicy *) linitial(rowsec_policies))->policy_id
-			== InvalidOid)
+		== InvalidOid)
 		defaultDeny = true;
 
 	/* Now that we have our policies, build the expressions from them. */
@@ -187,8 +186,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 	 * extensions can add either permissive or restrictive policies.
 	 *
 	 * Note that, as with the internal policies, if multiple policies are
-	 * returned then they will be combined into a single expression with
-	 * all of them OR'd (for permissive) or AND'd (for restrictive) together.
+	 * returned then they will be combined into a single expression with all
+	 * of them OR'd (for permissive) or AND'd (for restrictive) together.
 	 *
 	 * If only a USING policy is returned by the extension then it will be
 	 * used for WITH CHECK as well, similar to how internal policies are
@@ -202,7 +201,7 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 	 */
 	if (row_security_policy_hook_restrictive)
 	{
-		hook_policies_restrictive = (*row_security_policy_hook_restrictive)(commandType, rel);
+		hook_policies_restrictive = (*row_security_policy_hook_restrictive) (commandType, rel);
 
 		/* Build the expression from any policies returned. */
 		if (hook_policies_restrictive != NIL)
@@ -215,7 +214,7 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 
 	if (row_security_policy_hook_permissive)
 	{
-		hook_policies_permissive = (*row_security_policy_hook_permissive)(commandType, rel);
+		hook_policies_permissive = (*row_security_policy_hook_permissive) (commandType, rel);
 
 		/* Build the expression from any policies returned. */
 		if (hook_policies_permissive != NIL)
@@ -226,9 +225,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 	}
 
 	/*
-	 * If the only built-in policy is the default-deny one, and hook
-	 * policies exist, then use the hook policies only and do not apply
-	 * the default-deny policy.  Otherwise, we will apply both sets below.
+	 * If the only built-in policy is the default-deny one, and hook policies
+	 * exist, then use the hook policies only and do not apply the
+	 * default-deny policy.  Otherwise, we will apply both sets below.
 	 */
 	if (defaultDeny &&
 		(hook_policies_restrictive != NIL || hook_policies_permissive != NIL))
@@ -238,10 +237,10 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 	}
 
 	/*
-	 * For INSERT or UPDATE, we need to add the WITH CHECK quals to
-	 * Query's withCheckOptions to verify that any new records pass the
-	 * WITH CHECK policy (this will be a copy of the USING policy, if no
-	 * explicit WITH CHECK policy exists).
+	 * For INSERT or UPDATE, we need to add the WITH CHECK quals to Query's
+	 * withCheckOptions to verify that any new records pass the WITH CHECK
+	 * policy (this will be a copy of the USING policy, if no explicit WITH
+	 * CHECK policy exists).
 	 */
 	if (commandType == CMD_INSERT || commandType == CMD_UPDATE)
 	{
@@ -257,11 +256,11 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 		 */
 		if (hook_with_check_expr_restrictive)
 		{
-			WithCheckOption	   *wco;
+			WithCheckOption *wco;
 
 			wco = (WithCheckOption *) makeNode(WithCheckOption);
 			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
-														  WCO_RLS_UPDATE_CHECK;
+				WCO_RLS_UPDATE_CHECK;
 			wco->relname = pstrdup(RelationGetRelationName(rel));
 			wco->qual = (Node *) hook_with_check_expr_restrictive;
 			wco->cascaded = false;
@@ -269,16 +268,16 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 		}
 
 		/*
-		 * Handle built-in policies, if there are no permissive
-		 * policies from the hook.
+		 * Handle built-in policies, if there are no permissive policies from
+		 * the hook.
 		 */
 		if (rowsec_with_check_expr && !hook_with_check_expr_permissive)
 		{
-			WithCheckOption	   *wco;
+			WithCheckOption *wco;
 
 			wco = (WithCheckOption *) makeNode(WithCheckOption);
 			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
-														  WCO_RLS_UPDATE_CHECK;
+				WCO_RLS_UPDATE_CHECK;
 			wco->relname = pstrdup(RelationGetRelationName(rel));
 			wco->qual = (Node *) rowsec_with_check_expr;
 			wco->cascaded = false;
@@ -287,11 +286,11 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 		/* Handle the hook policies, if there are no built-in ones. */
 		else if (!rowsec_with_check_expr && hook_with_check_expr_permissive)
 		{
-			WithCheckOption	   *wco;
+			WithCheckOption *wco;
 
 			wco = (WithCheckOption *) makeNode(WithCheckOption);
 			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
-														  WCO_RLS_UPDATE_CHECK;
+				WCO_RLS_UPDATE_CHECK;
 			wco->relname = pstrdup(RelationGetRelationName(rel));
 			wco->qual = (Node *) hook_with_check_expr_permissive;
 			wco->cascaded = false;
@@ -300,9 +299,9 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 		/* Handle the case where there are both. */
 		else if (rowsec_with_check_expr && hook_with_check_expr_permissive)
 		{
-			WithCheckOption	   *wco;
-			List			   *combined_quals = NIL;
-			Expr			   *combined_qual_eval;
+			WithCheckOption *wco;
+			List	   *combined_quals = NIL;
+			Expr	   *combined_qual_eval;
 
 			combined_quals = lcons(copyObject(rowsec_with_check_expr),
 								   combined_quals);
@@ -314,7 +313,7 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 
 			wco = (WithCheckOption *) makeNode(WithCheckOption);
 			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
-														  WCO_RLS_UPDATE_CHECK;
+				WCO_RLS_UPDATE_CHECK;
 			wco->relname = pstrdup(RelationGetRelationName(rel));
 			wco->qual = (Node *) combined_qual_eval;
 			wco->cascaded = false;
@@ -361,8 +360,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 
 			foreach(item, conflictSecurityQuals)
 			{
-				Expr			   *conflict_rowsec_expr = (Expr *) lfirst(item);
-				WithCheckOption	   *wco;
+				Expr	   *conflict_rowsec_expr = (Expr *) lfirst(item);
+				WithCheckOption *wco;
 
 				wco = (WithCheckOption *) makeNode(WithCheckOption);
 
@@ -393,8 +392,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 		/* if we have both, we have to combine them with an OR */
 		else if (rowsec_expr && hook_expr_permissive)
 		{
-			List   *combined_quals = NIL;
-			Expr   *combined_qual_eval;
+			List	   *combined_quals = NIL;
+			Expr	   *combined_qual_eval;
 
 			combined_quals = lcons(copyObject(rowsec_expr), combined_quals);
 			combined_quals = lcons(copyObject(hook_expr_permissive),
@@ -409,8 +408,8 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 	heap_close(rel, NoLock);
 
 	/*
-	 * Mark this query as having row security, so plancache can invalidate
-	 * it when necessary (eg: role changes)
+	 * Mark this query as having row security, so plancache can invalidate it
+	 * when necessary (eg: role changes)
 	 */
 	*hasRowSecurity = true;
 
@@ -427,26 +426,27 @@ get_row_security_policies(Query* root, CmdType commandType, RangeTblEntry* rte,
 static List *
 pull_row_security_policies(CmdType cmd, Relation relation, Oid user_id)
 {
-	List			   *policies = NIL;
-	ListCell		   *item;
+	List	   *policies = NIL;
+	ListCell   *item;
 
 	/*
 	 * Row security is enabled for the relation and the row security GUC is
-	 * either 'on' or 'force' here, so find the policies to apply to the table.
-	 * There must always be at least one policy defined (may be the simple
-	 * 'default-deny' policy, if none are explicitly defined on the table).
+	 * either 'on' or 'force' here, so find the policies to apply to the
+	 * table. There must always be at least one policy defined (may be the
+	 * simple 'default-deny' policy, if none are explicitly defined on the
+	 * table).
 	 */
 	foreach(item, relation->rd_rsdesc->policies)
 	{
-		RowSecurityPolicy  *policy = (RowSecurityPolicy *) lfirst(item);
+		RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item);
 
 		/* Always add ALL policies, if they exist. */
 		if (policy->polcmd == '*' &&
-				check_role_for_policy(policy->roles, user_id))
+			check_role_for_policy(policy->roles, user_id))
 			policies = lcons(policy, policies);
 
 		/* Add relevant command-specific policies to the list. */
-		switch(cmd)
+		switch (cmd)
 		{
 			case CMD_SELECT:
 				if (policy->polcmd == ACL_SELECT_CHR
@@ -482,8 +482,8 @@ pull_row_security_policies(CmdType cmd, Relation relation, Oid user_id)
 	 */
 	if (policies == NIL)
 	{
-		RowSecurityPolicy  *policy = NULL;
-		Datum               role;
+		RowSecurityPolicy *policy = NULL;
+		Datum		role;
 
 		role = ObjectIdGetDatum(ACL_ID_PUBLIC);
 
@@ -519,18 +519,18 @@ pull_row_security_policies(CmdType cmd, Relation relation, Oid user_id)
  * qual_eval, with_check_eval, and hassublinks are output variables
  */
 static void
-process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
+process_policies(Query *root, List *policies, int rt_index, Expr **qual_eval,
 				 Expr **with_check_eval, bool *hassublinks,
 				 BoolExprType boolop)
 {
-	ListCell		   *item;
-	List			   *quals = NIL;
-	List			   *with_check_quals = NIL;
+	ListCell   *item;
+	List	   *quals = NIL;
+	List	   *with_check_quals = NIL;
 
 	/*
-	 * Extract the USING and WITH CHECK quals from each of the policies
-	 * and add them to our lists.  We only want WITH CHECK quals if this
-	 * RTE is the query's result relation.
+	 * Extract the USING and WITH CHECK quals from each of the policies and
+	 * add them to our lists.  We only want WITH CHECK quals if this RTE is
+	 * the query's result relation.
 	 */
 	foreach(item, policies)
 	{
@@ -545,8 +545,8 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
 									 with_check_quals);
 
 		/*
-		 * For each policy, if there is only a USING clause then copy/use it for
-		 * the WITH CHECK policy also, if this RTE is the query's result
+		 * For each policy, if there is only a USING clause then copy/use it
+		 * for the WITH CHECK policy also, if this RTE is the query's result
 		 * relation.
 		 */
 		if (policy->qual != NULL && policy->with_check_qual == NULL &&
@@ -568,16 +568,16 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
 								BoolGetDatum(false), false, true), quals);
 
 	/*
-	 * Row security quals always have the target table as varno 1, as no
-	 * joins are permitted in row security expressions. We must walk the
-	 * expression, updating any references to varno 1 to the varno
-	 * the table has in the outer query.
+	 * Row security quals always have the target table as varno 1, as no joins
+	 * are permitted in row security expressions. We must walk the expression,
+	 * updating any references to varno 1 to the varno the table has in the
+	 * outer query.
 	 *
 	 * We rewrite the expression in-place.
 	 *
 	 * We must have some quals at this point; the default-deny policy, if
-	 * nothing else.  Note that we might not have any WITH CHECK quals-
-	 * that's fine, as this might not be the resultRelation.
+	 * nothing else.  Note that we might not have any WITH CHECK quals- that's
+	 * fine, as this might not be the resultRelation.
 	 */
 	Assert(quals != NIL);
 
@@ -593,11 +593,11 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
 	if (list_length(quals) > 1)
 		*qual_eval = makeBoolExpr(boolop, quals, -1);
 	else
-		*qual_eval = (Expr*) linitial(quals);
+		*qual_eval = (Expr *) linitial(quals);
 
 	/*
-	 * Similairly, if more than one WITH CHECK qual is returned, then
-	 * they need to be combined together.
+	 * Similairly, if more than one WITH CHECK qual is returned, then they
+	 * need to be combined together.
 	 *
 	 * with_check_quals is allowed to be NIL here since this might not be the
 	 * resultRelation (see above).
@@ -605,7 +605,7 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
 	if (list_length(with_check_quals) > 1)
 		*with_check_eval = makeBoolExpr(boolop, with_check_quals, -1);
 	else if (with_check_quals != NIL)
-		*with_check_eval = (Expr*) linitial(with_check_quals);
+		*with_check_eval = (Expr *) linitial(with_check_quals);
 	else
 		*with_check_eval = NULL;
 
@@ -614,7 +614,7 @@ process_policies(Query* root, List *policies, int rt_index, Expr **qual_eval,
 
 /*
  * check_role_for_policy -
- *   determines if the policy should be applied for the current role
+ *	 determines if the policy should be applied for the current role
  */
 static bool
 check_role_for_policy(ArrayType *policy_roles, Oid user_id)
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index ef1f9a69008..3ae2848da05 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -69,9 +69,9 @@ InitBufferPool(void)
 
 	/* Align descriptors to a cacheline boundary. */
 	BufferDescriptors = (BufferDescPadded *) CACHELINEALIGN(
-		ShmemInitStruct("Buffer Descriptors",
-						NBuffers * sizeof(BufferDescPadded) + PG_CACHE_LINE_SIZE,
-						&foundDescs));
+										ShmemInitStruct("Buffer Descriptors",
+					NBuffers * sizeof(BufferDescPadded) + PG_CACHE_LINE_SIZE,
+														&foundDescs));
 
 	BufferBlocks = (char *)
 		ShmemInitStruct("Buffer Blocks",
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 861ec3ed494..cc973b53a91 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -68,8 +68,8 @@
 
 typedef struct PrivateRefCountEntry
 {
-	Buffer buffer;
-	int32 refcount;
+	Buffer		buffer;
+	int32		refcount;
 } PrivateRefCountEntry;
 
 /* 64 bytes, about the size of a cache line on common systems */
@@ -132,8 +132,8 @@ static uint32 PrivateRefCountClock = 0;
 static PrivateRefCountEntry *ReservedRefCountEntry = NULL;
 
 static void ReservePrivateRefCountEntry(void);
-static PrivateRefCountEntry* NewPrivateRefCountEntry(Buffer buffer);
-static PrivateRefCountEntry* GetPrivateRefCountEntry(Buffer buffer, bool do_move);
+static PrivateRefCountEntry *NewPrivateRefCountEntry(Buffer buffer);
+static PrivateRefCountEntry *GetPrivateRefCountEntry(Buffer buffer, bool do_move);
 static inline int32 GetPrivateRefCount(Buffer buffer);
 static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref);
 
@@ -154,7 +154,7 @@ ReservePrivateRefCountEntry(void)
 	 * majority of cases.
 	 */
 	{
-		int i;
+		int			i;
 
 		for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
 		{
@@ -180,10 +180,10 @@ ReservePrivateRefCountEntry(void)
 		 * hashtable. Use that slot.
 		 */
 		PrivateRefCountEntry *hashent;
-		bool found;
+		bool		found;
 
 		/* select victim slot */
-		ReservedRefCountEntry  =
+		ReservedRefCountEntry =
 			&PrivateRefCountArray[PrivateRefCountClock++ % REFCOUNT_ARRAY_ENTRIES];
 
 		/* Better be used, otherwise we shouldn't get here. */
@@ -208,7 +208,7 @@ ReservePrivateRefCountEntry(void)
 /*
  * Fill a previously reserved refcount entry.
  */
-static PrivateRefCountEntry*
+static PrivateRefCountEntry *
 NewPrivateRefCountEntry(Buffer buffer)
 {
 	PrivateRefCountEntry *res;
@@ -234,7 +234,7 @@ NewPrivateRefCountEntry(Buffer buffer)
  * do_move is true, and the entry resides in the hashtable the entry is
  * optimized for frequent access by moving it to the array.
  */
-static PrivateRefCountEntry*
+static PrivateRefCountEntry *
 GetPrivateRefCountEntry(Buffer buffer, bool do_move)
 {
 	PrivateRefCountEntry *res;
@@ -280,7 +280,7 @@ GetPrivateRefCountEntry(Buffer buffer, bool do_move)
 	else
 	{
 		/* move buffer from hashtable into the free array slot */
-		bool found;
+		bool		found;
 		PrivateRefCountEntry *free;
 
 		/* Ensure there's a free array slot */
@@ -346,6 +346,7 @@ ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
 		ref < &PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES])
 	{
 		ref->buffer = InvalidBuffer;
+
 		/*
 		 * Mark the just used entry as reserved - in many scenarios that
 		 * allows us to avoid ever having to search the array/hash for free
@@ -355,8 +356,9 @@ ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
 	}
 	else
 	{
-		bool found;
-		Buffer buffer = ref->buffer;
+		bool		found;
+		Buffer		buffer = ref->buffer;
+
 		hash_search(PrivateRefCountHash,
 					(void *) &buffer,
 					HASH_REMOVE,
@@ -669,8 +671,8 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
 											  found);
 
 			/*
-			 * In RBM_ZERO_AND_LOCK mode the caller expects the page to
-			 * be locked on return.
+			 * In RBM_ZERO_AND_LOCK mode the caller expects the page to be
+			 * locked on return.
 			 */
 			if (!isLocalBuf)
 			{
@@ -809,9 +811,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
 	 * page before the caller has had a chance to initialize it.
 	 *
 	 * Since no-one else can be looking at the page contents yet, there is no
-	 * difference between an exclusive lock and a cleanup-strength lock.
-	 * (Note that we cannot use LockBuffer() of LockBufferForCleanup() here,
-	 * because they assert that the buffer is already valid.)
+	 * difference between an exclusive lock and a cleanup-strength lock. (Note
+	 * that we cannot use LockBuffer() of LockBufferForCleanup() here, because
+	 * they assert that the buffer is already valid.)
 	 */
 	if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
 		!isLocalBuf)
@@ -939,8 +941,8 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
 	for (;;)
 	{
 		/*
-		 * Ensure, while the spinlock's not yet held, that there's a free refcount
-		 * entry.
+		 * Ensure, while the spinlock's not yet held, that there's a free
+		 * refcount entry.
 		 */
 		ReservePrivateRefCountEntry();
 
@@ -2169,6 +2171,7 @@ CheckForBufferLeaks(void)
 	if (PrivateRefCountOverflowed)
 	{
 		HASH_SEQ_STATUS hstat;
+
 		hash_seq_init(&hstat, PrivateRefCountHash);
 		while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
 		{
@@ -2974,6 +2977,7 @@ IncrBufferRefCount(Buffer buffer)
 	else
 	{
 		PrivateRefCountEntry *ref;
+
 		ref = GetPrivateRefCountEntry(buffer, true);
 		Assert(ref != NULL);
 		ref->refcount++;
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index c36e80af23a..bc2c7730003 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -50,7 +50,7 @@ typedef struct
 	 * Statistics.  These counters should be wide enough that they can't
 	 * overflow during a single bgwriter cycle.
 	 */
-	uint32		 completePasses; /* Complete cycles of the clock sweep */
+	uint32		completePasses; /* Complete cycles of the clock sweep */
 	pg_atomic_uint32 numBufferAllocs;	/* Buffers allocated since last reset */
 
 	/*
@@ -111,7 +111,7 @@ static void AddBufferToRing(BufferAccessStrategy strategy,
 static inline uint32
 ClockSweepTick(void)
 {
-	uint32 victim;
+	uint32		victim;
 
 	/*
 	 * Atomically move hand ahead one buffer - if there's several processes
@@ -123,7 +123,7 @@ ClockSweepTick(void)
 
 	if (victim >= NBuffers)
 	{
-		uint32 originalVictim = victim;
+		uint32		originalVictim = victim;
 
 		/* always wrap what we look up in BufferDescriptors */
 		victim = victim % NBuffers;
@@ -136,9 +136,9 @@ ClockSweepTick(void)
 		 */
 		if (victim == 0)
 		{
-			uint32 expected;
-			uint32 wrapped;
-			bool success = false;
+			uint32		expected;
+			uint32		wrapped;
+			bool		success = false;
 
 			expected = originalVictim + 1;
 
@@ -381,6 +381,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
 	if (complete_passes)
 	{
 		*complete_passes = StrategyControl->completePasses;
+
 		/*
 		 * Additionally add the number of wraparounds that happened before
 		 * completePasses could be incremented. C.f. ClockSweepTick().
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index bed8478dd1a..68d43c66b6f 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -2517,14 +2517,14 @@ walkdir(char *path, void (*action) (char *fname, bool isdir))
 			int			len;
 			struct stat lst;
 
-			len = readlink(subpath, linkpath, sizeof(linkpath)-1);
+			len = readlink(subpath, linkpath, sizeof(linkpath) - 1);
 			if (len < 0)
 				ereport(ERROR,
 						(errcode_for_file_access(),
 						 errmsg("could not read symbolic link \"%s\": %m",
 								subpath)));
 
-			if (len >= sizeof(linkpath)-1)
+			if (len >= sizeof(linkpath) - 1)
 				ereport(ERROR,
 						(errmsg("symbolic link \"%s\" target is too long",
 								subpath)));
diff --git a/src/backend/storage/file/reinit.c b/src/backend/storage/file/reinit.c
index afd92554fbd..429a99bc788 100644
--- a/src/backend/storage/file/reinit.c
+++ b/src/backend/storage/file/reinit.c
@@ -341,11 +341,11 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
 		FreeDir(dbspace_dir);
 
 		/*
-		 * copy_file() above has already called pg_flush_data() on the
-		 * files it created. Now we need to fsync those files, because
-		 * a checkpoint won't do it for us while we're in recovery. We
-		 * do this in a separate pass to allow the kernel to perform
-		 * all the flushes (especially the metadata ones) at once.
+		 * copy_file() above has already called pg_flush_data() on the files
+		 * it created. Now we need to fsync those files, because a checkpoint
+		 * won't do it for us while we're in recovery. We do this in a
+		 * separate pass to allow the kernel to perform all the flushes
+		 * (especially the metadata ones) at once.
 		 */
 		dbspace_dir = AllocateDir(dbspacedirname);
 		if (dbspace_dir == NULL)
diff --git a/src/backend/storage/ipc/dsm_impl.c b/src/backend/storage/ipc/dsm_impl.c
index 0859fbfc930..0b10dac7290 100644
--- a/src/backend/storage/ipc/dsm_impl.c
+++ b/src/backend/storage/ipc/dsm_impl.c
@@ -332,8 +332,8 @@ dsm_impl_posix(dsm_op op, dsm_handle handle, Size request_size,
 
 		ereport(elevel,
 				(errcode_for_dynamic_shared_memory(),
-		 errmsg("could not resize shared memory segment \"%s\" to %zu bytes: %m",
-				name, request_size)));
+				 errmsg("could not resize shared memory segment \"%s\" to %zu bytes: %m",
+						name, request_size)));
 		return false;
 	}
 
@@ -875,8 +875,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size,
 
 		ereport(elevel,
 				(errcode_for_dynamic_shared_memory(),
-		 errmsg("could not resize shared memory segment \"%s\" to %zu bytes: %m",
-				name, request_size)));
+				 errmsg("could not resize shared memory segment \"%s\" to %zu bytes: %m",
+						name, request_size)));
 		return false;
 	}
 	else if (*mapped_size < request_size)
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 0b3ad7294a7..4f3c5c9dec9 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -1707,12 +1707,12 @@ ProcArrayInstallRestoredXmin(TransactionId xmin, PGPROC *proc)
 	pgxact = &allPgXact[proc->pgprocno];
 
 	/*
-	 * Be certain that the referenced PGPROC has an advertised xmin which
-	 * is no later than the one we're installing, so that the system-wide
-	 * xmin can't go backwards.  Also, make sure it's running in the same
-	 * database, so that the per-database xmin cannot go backwards.
+	 * Be certain that the referenced PGPROC has an advertised xmin which is
+	 * no later than the one we're installing, so that the system-wide xmin
+	 * can't go backwards.  Also, make sure it's running in the same database,
+	 * so that the per-database xmin cannot go backwards.
 	 */
-	xid = pgxact->xmin;		/* fetch just once */
+	xid = pgxact->xmin;			/* fetch just once */
 	if (proc->databaseId == MyDatabaseId &&
 		TransactionIdIsNormal(xid) &&
 		TransactionIdPrecedesOrEquals(xid, xmin))
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
index daca634a551..126cb0751b3 100644
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -317,7 +317,7 @@ shm_mq_set_handle(shm_mq_handle *mqh, BackgroundWorkerHandle *handle)
 shm_mq_result
 shm_mq_send(shm_mq_handle *mqh, Size nbytes, const void *data, bool nowait)
 {
-	shm_mq_iovec	iov;
+	shm_mq_iovec iov;
 
 	iov.data = data;
 	iov.len = nbytes;
@@ -385,7 +385,7 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
 	offset = mqh->mqh_partial_bytes;
 	do
 	{
-		Size	chunksize;
+		Size		chunksize;
 
 		/* Figure out which bytes need to be sent next. */
 		if (offset >= iov[which_iov].len)
@@ -399,18 +399,18 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
 
 		/*
 		 * We want to avoid copying the data if at all possible, but every
-		 * chunk of bytes we write into the queue has to be MAXALIGN'd,
-		 * except the last.  Thus, if a chunk other than the last one ends
-		 * on a non-MAXALIGN'd boundary, we have to combine the tail end of
-		 * its data with data from one or more following chunks until we
-		 * either reach the last chunk or accumulate a number of bytes which
-		 * is MAXALIGN'd.
+		 * chunk of bytes we write into the queue has to be MAXALIGN'd, except
+		 * the last.  Thus, if a chunk other than the last one ends on a
+		 * non-MAXALIGN'd boundary, we have to combine the tail end of its
+		 * data with data from one or more following chunks until we either
+		 * reach the last chunk or accumulate a number of bytes which is
+		 * MAXALIGN'd.
 		 */
 		if (which_iov + 1 < iovcnt &&
 			offset + MAXIMUM_ALIGNOF > iov[which_iov].len)
 		{
-			char	tmpbuf[MAXIMUM_ALIGNOF];
-			int		j = 0;
+			char		tmpbuf[MAXIMUM_ALIGNOF];
+			int			j = 0;
 
 			for (;;)
 			{
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index 7c95f4c6a95..dc9207164ee 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -188,8 +188,8 @@ ProcessCatchupInterrupt(void)
 		 *
 		 * It is awfully tempting to just call AcceptInvalidationMessages()
 		 * without the rest of the xact start/stop overhead, and I think that
-		 * would actually work in the normal case; but I am not sure that things
-		 * would clean up nicely if we got an error partway through.
+		 * would actually work in the normal case; but I am not sure that
+		 * things would clean up nicely if we got an error partway through.
 		 */
 		if (IsTransactionOrTransactionBlock())
 		{
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 1acd2f090b5..46cab4911e7 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -62,14 +62,14 @@
  * work. That's problematic because we're now stuck waiting inside the OS.
 
  * To mitigate those races we use a two phased attempt at locking:
- *   Phase 1: Try to do it atomically, if we succeed, nice
- *   Phase 2: Add ourselves to the waitqueue of the lock
- *   Phase 3: Try to grab the lock again, if we succeed, remove ourselves from
- *            the queue
- *   Phase 4: Sleep till wake-up, goto Phase 1
+ *	 Phase 1: Try to do it atomically, if we succeed, nice
+ *	 Phase 2: Add ourselves to the waitqueue of the lock
+ *	 Phase 3: Try to grab the lock again, if we succeed, remove ourselves from
+ *			  the queue
+ *	 Phase 4: Sleep till wake-up, goto Phase 1
  *
  * This protects us against the problem from above as nobody can release too
- *    quick, before we're queued, since after Phase 2 we're already queued.
+ *	  quick, before we're queued, since after Phase 2 we're already queued.
  * -------------------------------------------------------------------------
  */
 #include "postgres.h"
@@ -140,7 +140,7 @@ static LWLockTranche MainLWLockTranche;
 /* struct representing the LWLocks we're holding */
 typedef struct LWLockHandle
 {
-	LWLock *lock;
+	LWLock	   *lock;
 	LWLockMode	mode;
 } LWLockHandle;
 
@@ -183,7 +183,8 @@ PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode)
 	/* hide statement & context here, otherwise the log is just too verbose */
 	if (Trace_lwlocks)
 	{
-		uint32 state = pg_atomic_read_u32(&lock->state);
+		uint32		state = pg_atomic_read_u32(&lock->state);
+
 		ereport(LOG,
 				(errhidestmt(true),
 				 errhidecontext(true),
@@ -580,17 +581,17 @@ LWLockInitialize(LWLock *lock, int tranche_id)
  * Returns true if the lock isn't free and we need to wait.
  */
 static bool
-LWLockAttemptLock(LWLock* lock, LWLockMode mode)
+LWLockAttemptLock(LWLock *lock, LWLockMode mode)
 {
 	AssertArg(mode == LW_EXCLUSIVE || mode == LW_SHARED);
 
 	/* loop until we've determined whether we could acquire the lock or not */
 	while (true)
 	{
-		uint32 old_state;
-		uint32 expected_state;
-		uint32 desired_state;
-		bool lock_free;
+		uint32		old_state;
+		uint32		expected_state;
+		uint32		desired_state;
+		bool		lock_free;
 
 		old_state = pg_atomic_read_u32(&lock->state);
 		expected_state = old_state;
@@ -632,7 +633,7 @@ LWLockAttemptLock(LWLock* lock, LWLockMode mode)
 				return false;
 			}
 			else
-				return true; /* someobdy else has the lock */
+				return true;	/* someobdy else has the lock */
 		}
 	}
 	pg_unreachable();
@@ -667,7 +668,7 @@ LWLockWakeup(LWLock *lock)
 
 	dlist_foreach_modify(iter, &lock->waiters)
 	{
-		PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+		PGPROC	   *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
 
 		if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
 			continue;
@@ -683,6 +684,7 @@ LWLockWakeup(LWLock *lock)
 			 * automatically.
 			 */
 			new_release_ok = false;
+
 			/*
 			 * Don't wakeup (further) exclusive locks.
 			 */
@@ -693,7 +695,7 @@ LWLockWakeup(LWLock *lock)
 		 * Once we've woken up an exclusive lock, there's no point in waking
 		 * up anybody else.
 		 */
-		if(waiter->lwWaitMode == LW_EXCLUSIVE)
+		if (waiter->lwWaitMode == LW_EXCLUSIVE)
 			break;
 	}
 
@@ -716,10 +718,11 @@ LWLockWakeup(LWLock *lock)
 	/* Awaken any waiters I removed from the queue. */
 	dlist_foreach_modify(iter, &wakeup)
 	{
-		PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+		PGPROC	   *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
 
 		LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
 		dlist_delete(&waiter->lwWaitLink);
+
 		/*
 		 * Guarantee that lwWaiting being unset only becomes visible once the
 		 * unlink from the link has completed. Otherwise the target backend
@@ -799,7 +802,7 @@ LWLockQueueSelf(LWLock *lock, LWLockMode mode)
 static void
 LWLockDequeueSelf(LWLock *lock)
 {
-	bool	found = false;
+	bool		found = false;
 	dlist_mutable_iter iter;
 
 #ifdef LWLOCK_STATS
@@ -822,7 +825,8 @@ LWLockDequeueSelf(LWLock *lock)
 	 */
 	dlist_foreach_modify(iter, &lock->waiters)
 	{
-		PGPROC *proc = dlist_container(PGPROC, lwWaitLink, iter.cur);
+		PGPROC	   *proc = dlist_container(PGPROC, lwWaitLink, iter.cur);
+
 		if (proc == MyProc)
 		{
 			found = true;
@@ -844,7 +848,7 @@ LWLockDequeueSelf(LWLock *lock)
 		MyProc->lwWaiting = false;
 	else
 	{
-		int		extraWaits = 0;
+		int			extraWaits = 0;
 
 		/*
 		 * Somebody else dequeued us and has or will wake us up. Deal with the
@@ -881,6 +885,7 @@ LWLockDequeueSelf(LWLock *lock)
 	{
 		/* not waiting anymore */
 		uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
+
 		Assert(nwaiters < MAX_BACKENDS);
 	}
 #endif
@@ -1047,6 +1052,7 @@ LWLockAcquireCommon(LWLock *lock, LWLockMode mode, uint64 *valptr, uint64 val)
 		{
 			/* not waiting anymore */
 			uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
+
 			Assert(nwaiters < MAX_BACKENDS);
 		}
 #endif
@@ -1182,8 +1188,9 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
 		if (mustwait)
 		{
 			/*
-			 * Wait until awakened.  Like in LWLockAcquire, be prepared for bogus
-			 * wakeups, because we share the semaphore with ProcWaitForSignal.
+			 * Wait until awakened.  Like in LWLockAcquire, be prepared for
+			 * bogus wakeups, because we share the semaphore with
+			 * ProcWaitForSignal.
 			 */
 			LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
 
@@ -1204,6 +1211,7 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
 			{
 				/* not waiting anymore */
 				uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
+
 				Assert(nwaiters < MAX_BACKENDS);
 			}
 #endif
@@ -1216,11 +1224,11 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
 			LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
 
 			/*
-			  * Got lock in the second attempt, undo queueing. We need to
-			  * treat this as having successfully acquired the lock, otherwise
-			  * we'd not necessarily wake up people we've prevented from
-			  * acquiring the lock.
-			  */
+			 * Got lock in the second attempt, undo queueing. We need to treat
+			 * this as having successfully acquired the lock, otherwise we'd
+			 * not necessarily wake up people we've prevented from acquiring
+			 * the lock.
+			 */
 			LWLockDequeueSelf(lock);
 		}
 	}
@@ -1345,9 +1353,9 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
 
 		/*
 		 * Add myself to wait queue. Note that this is racy, somebody else
-		 * could wakeup before we're finished queuing.
-		 * NB: We're using nearly the same twice-in-a-row lock acquisition
-		 * protocol as LWLockAcquire(). Check its comments for details.
+		 * could wakeup before we're finished queuing. NB: We're using nearly
+		 * the same twice-in-a-row lock acquisition protocol as
+		 * LWLockAcquire(). Check its comments for details.
 		 */
 		LWLockQueueSelf(lock, LW_WAIT_UNTIL_FREE);
 
@@ -1405,6 +1413,7 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
 		{
 			/* not waiting anymore */
 			uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
+
 			Assert(nwaiters < MAX_BACKENDS);
 		}
 #endif
@@ -1477,7 +1486,7 @@ LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
 	 */
 	dlist_foreach_modify(iter, &lock->waiters)
 	{
-		PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+		PGPROC	   *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
 
 		if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
 			break;
@@ -1494,7 +1503,8 @@ LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
 	 */
 	dlist_foreach_modify(iter, &wakeup)
 	{
-		PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+		PGPROC	   *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur);
+
 		dlist_delete(&waiter->lwWaitLink);
 		/* check comment in LWLockWakeup() about this barrier */
 		pg_write_barrier();
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 33b2f69bf87..455ad266340 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -1596,6 +1596,7 @@ CheckDeadLockAlert(void)
 	int			save_errno = errno;
 
 	got_deadlock_timeout = true;
+
 	/*
 	 * Have to set the latch again, even if handle_sig_alarm already did. Back
 	 * then got_deadlock_timeout wasn't yet set... It's unlikely that this
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 41ecc999ae6..df77bb2f5c0 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -962,8 +962,8 @@ PageIndexDeleteNoCompact(Page page, OffsetNumber *itemnos, int nitems)
 				offset != MAXALIGN(offset))
 				ereport(ERROR,
 						(errcode(ERRCODE_DATA_CORRUPTED),
-						 errmsg("corrupted item pointer: offset = %u, length = %u",
-								offset, (unsigned int) itemlen)));
+				   errmsg("corrupted item pointer: offset = %u, length = %u",
+						  offset, (unsigned int) itemlen)));
 
 			if (nextitm < nitems && offnum == itemnos[nextitm])
 			{
@@ -1039,8 +1039,8 @@ PageIndexDeleteNoCompact(Page page, OffsetNumber *itemnos, int nitems)
 		if (totallen > (Size) (pd_special - pd_lower))
 			ereport(ERROR,
 					(errcode(ERRCODE_DATA_CORRUPTED),
-					 errmsg("corrupted item lengths: total %u, available space %u",
-							(unsigned int) totallen, pd_special - pd_lower)));
+			   errmsg("corrupted item lengths: total %u, available space %u",
+					  (unsigned int) totallen, pd_special - pd_lower)));
 
 		/*
 		 * Defragment the data areas of each tuple, being careful to preserve
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index b754d3bd198..42a43bb07b0 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -213,8 +213,8 @@ mdinit(void)
 		/*
 		 * XXX: The checkpointer needs to add entries to the pending ops table
 		 * when absorbing fsync requests.  That is done within a critical
-		 * section, which isn't usually allowed, but we make an exception.
-		 * It means that there's a theoretical possibility that you run out of
+		 * section, which isn't usually allowed, but we make an exception. It
+		 * means that there's a theoretical possibility that you run out of
 		 * memory while absorbing fsync requests, which leads to a PANIC.
 		 * Fortunately the hash table is small so that's unlikely to happen in
 		 * practice.
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index ea2a43209da..ce4bdafad9b 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -574,10 +574,10 @@ ProcessClientWriteInterrupt(bool blocked)
 
 	/*
 	 * We only want to process the interrupt here if socket writes are
-	 * blocking to increase the chance to get an error message to the
-	 * client. If we're not blocked there'll soon be a
-	 * CHECK_FOR_INTERRUPTS(). But if we're blocked we'll never get out of
-	 * that situation if the client has died.
+	 * blocking to increase the chance to get an error message to the client.
+	 * If we're not blocked there'll soon be a CHECK_FOR_INTERRUPTS(). But if
+	 * we're blocked we'll never get out of that situation if the client has
+	 * died.
 	 */
 	if (ProcDiePending && blocked)
 	{
@@ -2653,9 +2653,9 @@ die(SIGNAL_ARGS)
 
 	/*
 	 * If we're in single user mode, we want to quit immediately - we can't
-	 * rely on latches as they wouldn't work when stdin/stdout is a
-	 * file. Rather ugly, but it's unlikely to be worthwhile to invest much
-	 * more effort just for the benefit of single user mode.
+	 * rely on latches as they wouldn't work when stdin/stdout is a file.
+	 * Rather ugly, but it's unlikely to be worthwhile to invest much more
+	 * effort just for the benefit of single user mode.
 	 */
 	if (DoingCommandRead && whereToSendOutput != DestRemote)
 		ProcessInterrupts();
@@ -2906,13 +2906,13 @@ ProcessInterrupts(void)
 	 */
 	if (RecoveryConflictPending && DoingCommandRead)
 	{
-		QueryCancelPending = false;			/* this trumps QueryCancel */
+		QueryCancelPending = false;		/* this trumps QueryCancel */
 		RecoveryConflictPending = false;
 		LockErrorCleanup();
 		pgstat_report_recovery_conflict(RecoveryConflictReason);
 		ereport(FATAL,
 				(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
-				 errmsg("terminating connection due to conflict with recovery"),
+			  errmsg("terminating connection due to conflict with recovery"),
 				 errdetail_recovery_conflict(),
 				 errhint("In a moment you should be able to reconnect to the"
 						 " database and repeat your command.")));
@@ -3894,7 +3894,7 @@ PostgresMain(int argc, char *argv[],
 		if (pq_is_reading_msg())
 			ereport(FATAL,
 					(errcode(ERRCODE_PROTOCOL_VIOLATION),
-					 errmsg("terminating connection because protocol sync was lost")));
+			errmsg("terminating connection because protocol sync was lost")));
 
 		/* Now we can allow interrupts again */
 		RESUME_INTERRUPTS();
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index a95eff16cc5..7db9f96fdf7 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -135,8 +135,8 @@ check_xact_readonly(Node *parsetree)
 	/*
 	 * Note: Commands that need to do more complicated checking are handled
 	 * elsewhere, in particular COPY and plannable statements do their own
-	 * checking.  However they should all call PreventCommandIfReadOnly
-	 * or PreventCommandIfParallelMode to actually throw the error.
+	 * checking.  However they should all call PreventCommandIfReadOnly or
+	 * PreventCommandIfParallelMode to actually throw the error.
 	 */
 
 	switch (nodeTag(parsetree))
@@ -933,6 +933,7 @@ ProcessUtilitySlow(Node *parsetree,
 			case T_CreateSchemaStmt:
 				CreateSchemaCommand((CreateSchemaStmt *) parsetree,
 									queryString);
+
 				/*
 				 * EventTriggerCollectSimpleCommand called by
 				 * CreateSchemaCommand
@@ -1072,12 +1073,12 @@ ProcessUtilitySlow(Node *parsetree,
 							else
 							{
 								/*
-								 * Recurse for anything else.  If we need to do
-								 * so, "close" the current complex-command set,
-								 * and start a new one at the bottom; this is
-								 * needed to ensure the ordering of queued
-								 * commands is consistent with the way they are
-								 * executed here.
+								 * Recurse for anything else.  If we need to
+								 * do so, "close" the current complex-command
+								 * set, and start a new one at the bottom;
+								 * this is needed to ensure the ordering of
+								 * queued commands is consistent with the way
+								 * they are executed here.
 								 */
 								EventTriggerAlterTableEnd();
 								ProcessUtility(stmt,
@@ -1177,43 +1178,43 @@ ProcessUtilitySlow(Node *parsetree,
 							address =
 								DefineAggregate(stmt->defnames, stmt->args,
 												stmt->oldstyle,
-												stmt->definition, queryString);
+											  stmt->definition, queryString);
 							break;
 						case OBJECT_OPERATOR:
 							Assert(stmt->args == NIL);
 							address = DefineOperator(stmt->defnames,
-													  stmt->definition);
+													 stmt->definition);
 							break;
 						case OBJECT_TYPE:
 							Assert(stmt->args == NIL);
 							address = DefineType(stmt->defnames,
-												  stmt->definition);
+												 stmt->definition);
 							break;
 						case OBJECT_TSPARSER:
 							Assert(stmt->args == NIL);
 							address = DefineTSParser(stmt->defnames,
-													  stmt->definition);
+													 stmt->definition);
 							break;
 						case OBJECT_TSDICTIONARY:
 							Assert(stmt->args == NIL);
 							address = DefineTSDictionary(stmt->defnames,
-														  stmt->definition);
+														 stmt->definition);
 							break;
 						case OBJECT_TSTEMPLATE:
 							Assert(stmt->args == NIL);
 							address = DefineTSTemplate(stmt->defnames,
-														stmt->definition);
+													   stmt->definition);
 							break;
 						case OBJECT_TSCONFIGURATION:
 							Assert(stmt->args == NIL);
 							address = DefineTSConfiguration(stmt->defnames,
-															 stmt->definition,
-															 &secondaryObject);
+															stmt->definition,
+															&secondaryObject);
 							break;
 						case OBJECT_COLLATION:
 							Assert(stmt->args == NIL);
 							address = DefineCollation(stmt->defnames,
-													   stmt->definition);
+													  stmt->definition);
 							break;
 						default:
 							elog(ERROR, "unrecognized define stmt type: %d",
@@ -1256,17 +1257,18 @@ ProcessUtilitySlow(Node *parsetree,
 					/* ... and do it */
 					EventTriggerAlterTableStart(parsetree);
 					address =
-						DefineIndex(relid,	/* OID of heap relation */
+						DefineIndex(relid,		/* OID of heap relation */
 									stmt,
-									InvalidOid,		/* no predefined OID */
-									false,	/* is_alter_table */
-									true,	/* check_rights */
-									false,	/* skip_build */
-									false); /* quiet */
+									InvalidOid, /* no predefined OID */
+									false,		/* is_alter_table */
+									true,		/* check_rights */
+									false,		/* skip_build */
+									false);		/* quiet */
+
 					/*
-					 * Add the CREATE INDEX node itself to stash right away; if
-					 * there were any commands stashed in the ALTER TABLE code,
-					 * we need them to appear after this one.
+					 * Add the CREATE INDEX node itself to stash right away;
+					 * if there were any commands stashed in the ALTER TABLE
+					 * code, we need them to appear after this one.
 					 */
 					EventTriggerCollectSimpleCommand(address, secondaryObject,
 													 parsetree);
@@ -1285,7 +1287,7 @@ ProcessUtilitySlow(Node *parsetree,
 
 			case T_AlterExtensionContentsStmt:
 				address = ExecAlterExtensionContentsStmt((AlterExtensionContentsStmt *) parsetree,
-														  &secondaryObject);
+														 &secondaryObject);
 				break;
 
 			case T_CreateFdwStmt:
@@ -1377,10 +1379,11 @@ ProcessUtilitySlow(Node *parsetree,
 
 			case T_CreateTableAsStmt:
 				address = ExecCreateTableAs((CreateTableAsStmt *) parsetree,
-								  queryString, params, completionTag);
+										 queryString, params, completionTag);
 				break;
 
 			case T_RefreshMatViewStmt:
+
 				/*
 				 * REFRSH CONCURRENTLY executes some DDL commands internally.
 				 * Inhibit DDL command collection here to avoid those commands
@@ -1391,7 +1394,7 @@ ProcessUtilitySlow(Node *parsetree,
 				PG_TRY();
 				{
 					address = ExecRefreshMatView((RefreshMatViewStmt *) parsetree,
-												 queryString, params, completionTag);
+										 queryString, params, completionTag);
 				}
 				PG_CATCH();
 				{
@@ -1404,8 +1407,8 @@ ProcessUtilitySlow(Node *parsetree,
 
 			case T_CreateTrigStmt:
 				address = CreateTrigger((CreateTrigStmt *) parsetree,
-										 queryString, InvalidOid, InvalidOid,
-										 InvalidOid, InvalidOid, false);
+										queryString, InvalidOid, InvalidOid,
+										InvalidOid, InvalidOid, false);
 				break;
 
 			case T_CreatePLangStmt:
diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c
index a2f0f5cebd3..3af19048219 100644
--- a/src/backend/tsearch/spell.c
+++ b/src/backend/tsearch/spell.c
@@ -1335,8 +1335,8 @@ CheckAffix(const char *word, size_t len, AFFIX *Affix, int flagflags, char *neww
 	else
 	{
 		/*
-		 * if prefix is an all non-changed part's length then all word contains
-		 * only prefix and suffix, so out
+		 * if prefix is an all non-changed part's length then all word
+		 * contains only prefix and suffix, so out
 		 */
 		if (baselen && *baselen + strlen(Affix->find) <= Affix->replen)
 			return NULL;
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index e7aecc95c97..3ca168b4736 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -5202,7 +5202,7 @@ get_rolespec_tuple(const Node *node)
 			if (!HeapTupleIsValid(tuple))
 				ereport(ERROR,
 						(errcode(ERRCODE_UNDEFINED_OBJECT),
-						 errmsg("role \"%s\" does not exist", role->rolename)));
+					  errmsg("role \"%s\" does not exist", role->rolename)));
 			break;
 
 		case ROLESPEC_CURRENT_USER:
@@ -5221,7 +5221,7 @@ get_rolespec_tuple(const Node *node)
 			ereport(ERROR,
 					(errcode(ERRCODE_UNDEFINED_OBJECT),
 					 errmsg("role \"%s\" does not exist", "public")));
-			tuple = NULL;	/* make compiler happy */
+			tuple = NULL;		/* make compiler happy */
 
 		default:
 			elog(ERROR, "unexpected role type %d", role->roletype);
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index f7b57da48e7..c14ea23dfbc 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -687,7 +687,7 @@ array_position_start(PG_FUNCTION_ARGS)
 
 /*
  * array_position_common
- * 		Common code for array_position and array_position_start
+ *		Common code for array_position and array_position_start
  *
  * These are separate wrappers for the sake of opr_sanity regression test.
  * They are not strict so we have to test for null inputs explicitly.
@@ -755,7 +755,8 @@ array_position_common(FunctionCallInfo fcinfo)
 
 	/*
 	 * We arrange to look up type info for array_create_iterator only once per
-	 * series of calls, assuming the element type doesn't change underneath us.
+	 * series of calls, assuming the element type doesn't change underneath
+	 * us.
 	 */
 	my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
 	if (my_extra == NULL)
@@ -778,8 +779,8 @@ array_position_common(FunctionCallInfo fcinfo)
 		if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
 			ereport(ERROR,
 					(errcode(ERRCODE_UNDEFINED_FUNCTION),
-					 errmsg("could not identify an equality operator for type %s",
-							format_type_be(element_type))));
+				errmsg("could not identify an equality operator for type %s",
+					   format_type_be(element_type))));
 
 		my_extra->element_type = element_type;
 		fmgr_info(typentry->eq_opr_finfo.fn_oid, &my_extra->proc);
@@ -892,7 +893,8 @@ array_positions(PG_FUNCTION_ARGS)
 
 	/*
 	 * We arrange to look up type info for array_create_iterator only once per
-	 * series of calls, assuming the element type doesn't change underneath us.
+	 * series of calls, assuming the element type doesn't change underneath
+	 * us.
 	 */
 	my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
 	if (my_extra == NULL)
@@ -915,15 +917,16 @@ array_positions(PG_FUNCTION_ARGS)
 		if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
 			ereport(ERROR,
 					(errcode(ERRCODE_UNDEFINED_FUNCTION),
-					 errmsg("could not identify an equality operator for type %s",
-							format_type_be(element_type))));
+				errmsg("could not identify an equality operator for type %s",
+					   format_type_be(element_type))));
 
 		my_extra->element_type = element_type;
 		fmgr_info(typentry->eq_opr_finfo.fn_oid, &my_extra->proc);
 	}
 
 	/*
-	 * Accumulate each array position iff the element matches the given element.
+	 * Accumulate each array position iff the element matches the given
+	 * element.
 	 */
 	array_iterator = array_create_iterator(array, 0, my_extra);
 	while (array_iterate(array_iterator, &value, &isnull))
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 84e4db8416a..5391ea0bf0b 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -920,7 +920,7 @@ typedef struct NUMProc
 				num_count,		/* number of write digits	*/
 				num_in,			/* is inside number		*/
 				num_curr,		/* current position in number	*/
-				out_pre_spaces,	/* spaces before first digit	*/
+				out_pre_spaces, /* spaces before first digit	*/
 
 				read_dec,		/* to_number - was read dec. point	*/
 				read_post,		/* to_number - number of dec. digit */
@@ -981,7 +981,7 @@ static char *get_last_relevant_decnum(char *num);
 static void NUM_numpart_from_char(NUMProc *Np, int id, int input_len);
 static void NUM_numpart_to_char(NUMProc *Np, int id);
 static char *NUM_processor(FormatNode *node, NUMDesc *Num, char *inout,
-			  char *number, int from_char_input_len, int to_char_out_pre_spaces,
+		   char *number, int from_char_input_len, int to_char_out_pre_spaces,
 			  int sign, bool is_to_char, Oid collid);
 static DCHCacheEntry *DCH_cache_search(char *str);
 static DCHCacheEntry *DCH_cache_getnew(char *str);
@@ -2541,14 +2541,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 					break;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_toupper_z(localized_full_months[tm->tm_mon - 1], collid);
+					char	   *str = str_toupper_z(localized_full_months[tm->tm_mon - 1], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2561,14 +2561,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 					break;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_initcap_z(localized_full_months[tm->tm_mon - 1], collid);
+					char	   *str = str_initcap_z(localized_full_months[tm->tm_mon - 1], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2581,14 +2581,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 					break;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_tolower_z(localized_full_months[tm->tm_mon - 1], collid);
+					char	   *str = str_tolower_z(localized_full_months[tm->tm_mon - 1], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2601,14 +2601,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 					break;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_toupper_z(localized_abbrev_months[tm->tm_mon - 1], collid);
+					char	   *str = str_toupper_z(localized_abbrev_months[tm->tm_mon - 1], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					strcpy(s, asc_toupper_z(months[tm->tm_mon - 1]));
@@ -2620,14 +2620,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 					break;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_initcap_z(localized_abbrev_months[tm->tm_mon - 1], collid);
+					char	   *str = str_initcap_z(localized_abbrev_months[tm->tm_mon - 1], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					strcpy(s, months[tm->tm_mon - 1]);
@@ -2639,14 +2639,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 					break;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_tolower_z(localized_abbrev_months[tm->tm_mon - 1], collid);
+					char	   *str = str_tolower_z(localized_abbrev_months[tm->tm_mon - 1], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					strcpy(s, asc_tolower_z(months[tm->tm_mon - 1]));
@@ -2662,14 +2662,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 				INVALID_FOR_INTERVAL;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_toupper_z(localized_full_days[tm->tm_wday], collid);
+					char	   *str = str_toupper_z(localized_full_days[tm->tm_wday], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2680,14 +2680,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 				INVALID_FOR_INTERVAL;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_initcap_z(localized_full_days[tm->tm_wday], collid);
+					char	   *str = str_initcap_z(localized_full_days[tm->tm_wday], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2698,14 +2698,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 				INVALID_FOR_INTERVAL;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_tolower_z(localized_full_days[tm->tm_wday], collid);
+					char	   *str = str_tolower_z(localized_full_days[tm->tm_wday], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2716,14 +2716,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 				INVALID_FOR_INTERVAL;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_toupper_z(localized_abbrev_days[tm->tm_wday], collid);
+					char	   *str = str_toupper_z(localized_abbrev_days[tm->tm_wday], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					strcpy(s, asc_toupper_z(days_short[tm->tm_wday]));
@@ -2733,14 +2733,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 				INVALID_FOR_INTERVAL;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_initcap_z(localized_abbrev_days[tm->tm_wday], collid);
+					char	   *str = str_initcap_z(localized_abbrev_days[tm->tm_wday], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					strcpy(s, days_short[tm->tm_wday]);
@@ -2750,14 +2750,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
 				INVALID_FOR_INTERVAL;
 				if (S_TM(n->suffix))
 				{
-					char *str = str_tolower_z(localized_abbrev_days[tm->tm_wday], collid);
+					char	   *str = str_tolower_z(localized_abbrev_days[tm->tm_wday], collid);
 
 					if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
 						strcpy(s, str);
 					else
 						ereport(ERROR,
 								(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-								 errmsg("localized string format value too long")));
+						  errmsg("localized string format value too long")));
 				}
 				else
 					strcpy(s, asc_tolower_z(days_short[tm->tm_wday]));
@@ -4572,7 +4572,7 @@ NUM_numpart_to_char(NUMProc *Np, int id)
 
 static char *
 NUM_processor(FormatNode *node, NUMDesc *Num, char *inout,
-			  char *number, int from_char_input_len, int to_char_out_pre_spaces,
+		   char *number, int from_char_input_len, int to_char_out_pre_spaces,
 			  int sign, bool is_to_char, Oid collid)
 {
 	FormatNode *n;
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index f08e288c21d..26d38433693 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -1442,7 +1442,7 @@ datum_to_json(Datum val, bool is_null, StringInfo result,
 				if (DATE_NOT_FINITE(date))
 				{
 					/* we have to format infinity ourselves */
-					appendStringInfoString(result,DT_INFINITY);
+					appendStringInfoString(result, DT_INFINITY);
 				}
 				else
 				{
@@ -1465,7 +1465,7 @@ datum_to_json(Datum val, bool is_null, StringInfo result,
 				if (TIMESTAMP_NOT_FINITE(timestamp))
 				{
 					/* we have to format infinity ourselves */
-					appendStringInfoString(result,DT_INFINITY);
+					appendStringInfoString(result, DT_INFINITY);
 				}
 				else if (timestamp2tm(timestamp, NULL, &tm, &fsec, NULL, NULL) == 0)
 				{
@@ -1492,7 +1492,7 @@ datum_to_json(Datum val, bool is_null, StringInfo result,
 				if (TIMESTAMP_NOT_FINITE(timestamp))
 				{
 					/* we have to format infinity ourselves */
-					appendStringInfoString(result,DT_INFINITY);
+					appendStringInfoString(result, DT_INFINITY);
 				}
 				else if (timestamp2tm(timestamp, &tz, &tm, &fsec, &tzn, NULL) == 0)
 				{
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index bccc6696a4f..c0959a0ee2a 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -57,7 +57,7 @@ typedef enum					/* type categories for datum_to_jsonb */
 	JSONBTYPE_COMPOSITE,		/* composite */
 	JSONBTYPE_JSONCAST,			/* something with an explicit cast to JSON */
 	JSONBTYPE_OTHER				/* all else */
-}	JsonbTypeCategory;
+} JsonbTypeCategory;
 
 static inline Datum jsonb_from_cstring(char *json, int len);
 static size_t checkStringLen(size_t len);
@@ -69,7 +69,7 @@ static void jsonb_in_object_field_start(void *pstate, char *fname, bool isnull);
 static void jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal);
 static void jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype);
 static void jsonb_categorize_type(Oid typoid,
-					  JsonbTypeCategory * tcategory,
+					  JsonbTypeCategory *tcategory,
 					  Oid *outfuncoid);
 static void composite_to_jsonb(Datum composite, JsonbInState *result);
 static void array_dim_to_jsonb(JsonbInState *result, int dim, int ndims, int *dims,
@@ -77,14 +77,14 @@ static void array_dim_to_jsonb(JsonbInState *result, int dim, int ndims, int *di
 				   JsonbTypeCategory tcategory, Oid outfuncoid);
 static void array_to_jsonb_internal(Datum array, JsonbInState *result);
 static void jsonb_categorize_type(Oid typoid,
-					  JsonbTypeCategory * tcategory,
+					  JsonbTypeCategory *tcategory,
 					  Oid *outfuncoid);
 static void datum_to_jsonb(Datum val, bool is_null, JsonbInState *result,
 			   JsonbTypeCategory tcategory, Oid outfuncoid,
 			   bool key_scalar);
 static void add_jsonb(Datum val, bool is_null, JsonbInState *result,
 		  Oid val_type, bool key_scalar);
-static JsonbParseState * clone_parse_state(JsonbParseState * state);
+static JsonbParseState *clone_parse_state(JsonbParseState *state);
 static char *JsonbToCStringWorker(StringInfo out, JsonbContainer *in, int estimated_len, bool indent);
 static void add_indent(StringInfo out, bool indent, int level);
 
@@ -365,10 +365,12 @@ jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype)
 		case JSON_TOKEN_TRUE:
 			v.type = jbvBool;
 			v.val.boolean = true;
+
 			break;
 		case JSON_TOKEN_FALSE:
 			v.type = jbvBool;
 			v.val.boolean = false;
+
 			break;
 		case JSON_TOKEN_NULL:
 			v.type = jbvNull;
@@ -448,15 +450,17 @@ JsonbToCStringWorker(StringInfo out, JsonbContainer *in, int estimated_len, bool
 	JsonbValue	v;
 	int			level = 0;
 	bool		redo_switch = false;
+
 	/* If we are indenting, don't add a space after a comma */
 	int			ispaces = indent ? 1 : 2;
+
 	/*
-	 * Don't indent the very first item. This gets set to the indent flag
-	 * at the bottom of the loop.
+	 * Don't indent the very first item. This gets set to the indent flag at
+	 * the bottom of the loop.
 	 */
-	bool        use_indent = false;
-	bool        raw_scalar = false;
-	bool        last_was_key = false;
+	bool		use_indent = false;
+	bool		raw_scalar = false;
+	bool		last_was_key = false;
 
 	if (out == NULL)
 		out = makeStringInfo();
@@ -530,13 +534,13 @@ JsonbToCStringWorker(StringInfo out, JsonbContainer *in, int estimated_len, bool
 					appendBinaryStringInfo(out, ", ", ispaces);
 				first = false;
 
-				if (! raw_scalar)
+				if (!raw_scalar)
 					add_indent(out, use_indent, level);
 				jsonb_put_escaped_value(out, &v);
 				break;
 			case WJB_END_ARRAY:
 				level--;
-				if (! raw_scalar)
+				if (!raw_scalar)
 				{
 					add_indent(out, use_indent, level);
 					appendStringInfoCharMacro(out, ']');
@@ -580,11 +584,11 @@ add_indent(StringInfo out, bool indent, int level)
  *
  * Given the datatype OID, return its JsonbTypeCategory, as well as the type's
  * output function OID.  If the returned category is JSONBTYPE_JSONCAST,
- *  we return the OID of the relevant cast function instead.
+ *	we return the OID of the relevant cast function instead.
  */
 static void
 jsonb_categorize_type(Oid typoid,
-					  JsonbTypeCategory * tcategory,
+					  JsonbTypeCategory *tcategory,
 					  Oid *outfuncoid)
 {
 	bool		typisvarlena;
@@ -649,16 +653,16 @@ jsonb_categorize_type(Oid typoid,
 				*tcategory = JSONBTYPE_OTHER;
 
 				/*
-				 * but first let's look for a cast to json (note: not to jsonb)
-				 * if it's not built-in.
+				 * but first let's look for a cast to json (note: not to
+				 * jsonb) if it's not built-in.
 				 */
 				if (typoid >= FirstNormalObjectId)
 				{
-					Oid castfunc;
+					Oid			castfunc;
 					CoercionPathType ctype;
 
 					ctype = find_coercion_pathway(JSONOID, typoid,
-												  COERCION_EXPLICIT, &castfunc);
+											   COERCION_EXPLICIT, &castfunc);
 					if (ctype == COERCION_PATH_FUNC && OidIsValid(castfunc))
 					{
 						*tcategory = JSONBTYPE_JSONCAST;
@@ -774,30 +778,30 @@ datum_to_jsonb(Datum val, bool is_null, JsonbInState *result,
 					}
 				}
 				break;
-		case JSONBTYPE_DATE:
-			{
-				DateADT		date;
-				struct pg_tm tm;
-				char		buf[MAXDATELEN + 1];
+			case JSONBTYPE_DATE:
+				{
+					DateADT		date;
+					struct pg_tm tm;
+					char		buf[MAXDATELEN + 1];
 
-				date = DatumGetDateADT(val);
-				jb.type = jbvString;
+					date = DatumGetDateADT(val);
+					jb.type = jbvString;
 
-				if (DATE_NOT_FINITE(date))
-				{
-					jb.val.string.len = strlen(DT_INFINITY);
-					jb.val.string.val = pstrdup(DT_INFINITY);
-				}
-				else
-				{
-					j2date(date + POSTGRES_EPOCH_JDATE,
-						   &(tm.tm_year), &(tm.tm_mon), &(tm.tm_mday));
-					EncodeDateOnly(&tm, USE_XSD_DATES, buf);
-					jb.val.string.len = strlen(buf);
-					jb.val.string.val = pstrdup(buf);
+					if (DATE_NOT_FINITE(date))
+					{
+						jb.val.string.len = strlen(DT_INFINITY);
+						jb.val.string.val = pstrdup(DT_INFINITY);
+					}
+					else
+					{
+						j2date(date + POSTGRES_EPOCH_JDATE,
+							   &(tm.tm_year), &(tm.tm_mon), &(tm.tm_mday));
+						EncodeDateOnly(&tm, USE_XSD_DATES, buf);
+						jb.val.string.len = strlen(buf);
+						jb.val.string.val = pstrdup(buf);
+					}
 				}
-			}
-			break;
+				break;
 			case JSONBTYPE_TIMESTAMP:
 				{
 					Timestamp	timestamp;
@@ -1534,9 +1538,11 @@ jsonb_object_two_arg(PG_FUNCTION_ARGS)
  * change them.
  */
 static JsonbParseState *
-clone_parse_state(JsonbParseState * state)
+clone_parse_state(JsonbParseState *state)
 {
-	JsonbParseState *result, *icursor, *ocursor;
+	JsonbParseState *result,
+			   *icursor,
+			   *ocursor;
 
 	if (state == NULL)
 		return NULL;
@@ -1544,14 +1550,14 @@ clone_parse_state(JsonbParseState * state)
 	result = palloc(sizeof(JsonbParseState));
 	icursor = state;
 	ocursor = result;
-	for(;;)
+	for (;;)
 	{
 		ocursor->contVal = icursor->contVal;
 		ocursor->size = icursor->size;
 		icursor = icursor->next;
 		if (icursor == NULL)
 			break;
-		ocursor->next= palloc(sizeof(JsonbParseState));
+		ocursor->next = palloc(sizeof(JsonbParseState));
 		ocursor = ocursor->next;
 	}
 	ocursor->next = NULL;
@@ -1652,15 +1658,16 @@ jsonb_agg_transfn(PG_FUNCTION_ARGS)
 				{
 					/* copy string values in the aggregate context */
 					char	   *buf = palloc(v.val.string.len + 1);
+
 					snprintf(buf, v.val.string.len + 1, "%s", v.val.string.val);
 					v.val.string.val = buf;
 				}
 				else if (v.type == jbvNumeric)
 				{
 					/* same for numeric */
-				  v.val.numeric =
+					v.val.numeric =
 					DatumGetNumeric(DirectFunctionCall1(numeric_uplus,
-														NumericGetDatum(v.val.numeric)));
+											NumericGetDatum(v.val.numeric)));
 
 				}
 				result->res = pushJsonbValue(&result->parseState,
@@ -1693,15 +1700,15 @@ jsonb_agg_finalfn(PG_FUNCTION_ARGS)
 
 	/*
 	 * We need to do a shallow clone of the argument in case the final
-	 * function is called more than once, so we avoid changing the argument.
-	 * A shallow clone is sufficient as we aren't going to change any of the
+	 * function is called more than once, so we avoid changing the argument. A
+	 * shallow clone is sufficient as we aren't going to change any of the
 	 * values, just add the final array end marker.
 	 */
 
 	result.parseState = clone_parse_state(arg->parseState);
 
 	result.res = pushJsonbValue(&result.parseState,
-								 WJB_END_ARRAY, NULL);
+								WJB_END_ARRAY, NULL);
 
 
 	out = JsonbValueToJsonb(result.res);
@@ -1813,6 +1820,7 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS)
 				{
 					/* copy string values in the aggregate context */
 					char	   *buf = palloc(v.val.string.len + 1);
+
 					snprintf(buf, v.val.string.len + 1, "%s", v.val.string.val);
 					v.val.string.val = buf;
 				}
@@ -1871,6 +1879,7 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS)
 				{
 					/* copy string values in the aggregate context */
 					char	   *buf = palloc(v.val.string.len + 1);
+
 					snprintf(buf, v.val.string.len + 1, "%s", v.val.string.val);
 					v.val.string.val = buf;
 				}
@@ -1878,8 +1887,8 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS)
 				{
 					/* same for numeric */
 					v.val.numeric =
-					  DatumGetNumeric(DirectFunctionCall1(numeric_uplus,
-														  NumericGetDatum(v.val.numeric)));
+					DatumGetNumeric(DirectFunctionCall1(numeric_uplus,
+											NumericGetDatum(v.val.numeric)));
 
 				}
 				result->res = pushJsonbValue(&result->parseState,
@@ -1900,7 +1909,7 @@ Datum
 jsonb_object_agg_finalfn(PG_FUNCTION_ARGS)
 {
 	JsonbInState *arg;
-	JsonbInState  result;
+	JsonbInState result;
 	Jsonb	   *out;
 
 	/* cannot be called directly because of internal-type argument */
@@ -1913,15 +1922,15 @@ jsonb_object_agg_finalfn(PG_FUNCTION_ARGS)
 
 	/*
 	 * We need to do a shallow clone of the argument in case the final
-	 * function is called more than once, so we avoid changing the argument.
-	 * A shallow clone is sufficient as we aren't going to change any of the
+	 * function is called more than once, so we avoid changing the argument. A
+	 * shallow clone is sufficient as we aren't going to change any of the
 	 * values, just add the final object end marker.
 	 */
 
 	result.parseState = clone_parse_state(arg->parseState);
 
 	result.res = pushJsonbValue(&result.parseState,
-								 WJB_END_OBJECT, NULL);
+								WJB_END_OBJECT, NULL);
 
 
 	out = JsonbValueToJsonb(result.res);
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index 974e3865249..4d733159d06 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -58,8 +58,8 @@ static int	lengthCompareJsonbStringValue(const void *a, const void *b);
 static int	lengthCompareJsonbPair(const void *a, const void *b, void *arg);
 static void uniqueifyJsonbObject(JsonbValue *object);
 static JsonbValue *pushJsonbValueScalar(JsonbParseState **pstate,
-										JsonbIteratorToken seq,
-										JsonbValue *scalarVal);
+					 JsonbIteratorToken seq,
+					 JsonbValue *scalarVal);
 
 /*
  * Turn an in-memory JsonbValue into a Jsonb for on-disk storage.
@@ -518,7 +518,7 @@ pushJsonbValue(JsonbParseState **pstate, JsonbIteratorToken seq,
 {
 	JsonbIterator *it;
 	JsonbValue *res = NULL;
-	JsonbValue v;
+	JsonbValue	v;
 	JsonbIteratorToken tok;
 
 	if (!jbval || (seq != WJB_ELEM && seq != WJB_VALUE) ||
@@ -543,7 +543,7 @@ pushJsonbValue(JsonbParseState **pstate, JsonbIteratorToken seq,
  */
 static JsonbValue *
 pushJsonbValueScalar(JsonbParseState **pstate, JsonbIteratorToken seq,
-			   JsonbValue *scalarVal)
+					 JsonbValue *scalarVal)
 {
 	JsonbValue *result = NULL;
 
@@ -1231,6 +1231,7 @@ JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash)
 			break;
 		case jbvBool:
 			tmp = scalarVal->val.boolean ? 0x02 : 0x04;
+
 			break;
 		default:
 			elog(ERROR, "invalid jsonb scalar type");
@@ -1304,7 +1305,7 @@ compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar)
 			case jbvBool:
 				if (aScalar->val.boolean == bScalar->val.boolean)
 					return 0;
-				else if (aScalar->val.boolean > bScalar->val.boolean)
+				else if (aScalar->val.boolean >bScalar->val.boolean)
 					return 1;
 				else
 					return -1;
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 9987c73784c..2f755744c13 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -110,8 +110,8 @@ static void sn_object_start(void *state);
 static void sn_object_end(void *state);
 static void sn_array_start(void *state);
 static void sn_array_end(void *state);
-static void sn_object_field_start (void *state, char *fname, bool isnull);
-static void sn_array_element_start (void *state, bool isnull);
+static void sn_object_field_start(void *state, char *fname, bool isnull);
+static void sn_array_element_start(void *state, bool isnull);
 static void sn_scalar(void *state, char *token, JsonTokenType tokentype);
 
 /* worker function for populate_recordset and to_recordset */
@@ -126,18 +126,18 @@ static JsonbValue *findJsonbValueFromContainerLen(JsonbContainer *container,
 
 /* functions supporting jsonb_delete, jsonb_replace and jsonb_concat */
 static JsonbValue *IteratorConcat(JsonbIterator **it1, JsonbIterator **it2,
-								  JsonbParseState **state);
+			   JsonbParseState **state);
 static JsonbValue *walkJsonb(JsonbIterator **it, JsonbParseState **state, bool stop_at_level_zero);
 static JsonbValue *replacePath(JsonbIterator **it, Datum *path_elems,
-							   bool *path_nulls, int path_len,
-							   JsonbParseState **st, int level, Jsonb *newval);
+			bool *path_nulls, int path_len,
+			JsonbParseState **st, int level, Jsonb *newval);
 static void replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
-							  int path_len, JsonbParseState **st, int level,
-							  Jsonb *newval, uint32	nelems);
+				  int path_len, JsonbParseState **st, int level,
+				  Jsonb *newval, uint32 nelems);
 static void replacePathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
-							 int path_len, JsonbParseState **st, int level,
-							 Jsonb *newval, uint32 npairs);
-static void addJsonbToParseState(JsonbParseState **jbps, Jsonb * jb);
+				 int path_len, JsonbParseState **st, int level,
+				 Jsonb *newval, uint32 npairs);
+static void addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb);
 
 /* state for json_object_keys */
 typedef struct OkeysState
@@ -250,10 +250,11 @@ typedef struct PopulateRecordsetState
 } PopulateRecordsetState;
 
 /* state for json_strip_nulls */
-typedef struct StripnullState{
+typedef struct StripnullState
+{
 	JsonLexContext *lex;
-	StringInfo  strval;
-	bool skip_next_null;
+	StringInfo	strval;
+	bool		skip_next_null;
 } StripnullState;
 
 /* Turn a jsonb object into a record */
@@ -3045,6 +3046,7 @@ static void
 sn_object_start(void *state)
 {
 	StripnullState *_state = (StripnullState *) state;
+
 	appendStringInfoCharMacro(_state->strval, '{');
 }
 
@@ -3052,6 +3054,7 @@ static void
 sn_object_end(void *state)
 {
 	StripnullState *_state = (StripnullState *) state;
+
 	appendStringInfoCharMacro(_state->strval, '}');
 }
 
@@ -3059,6 +3062,7 @@ static void
 sn_array_start(void *state)
 {
 	StripnullState *_state = (StripnullState *) state;
+
 	appendStringInfoCharMacro(_state->strval, '[');
 }
 
@@ -3066,21 +3070,21 @@ static void
 sn_array_end(void *state)
 {
 	StripnullState *_state = (StripnullState *) state;
+
 	appendStringInfoCharMacro(_state->strval, ']');
 }
 
 static void
-sn_object_field_start (void *state, char *fname, bool isnull)
+sn_object_field_start(void *state, char *fname, bool isnull)
 {
 	StripnullState *_state = (StripnullState *) state;
 
 	if (isnull)
 	{
 		/*
-		 * The next thing must be a scalar or isnull couldn't be true,
-		 * so there is no danger of this state being carried down
-		 * into a nested  object or array. The flag will be reset in the
-		 * scalar action.
+		 * The next thing must be a scalar or isnull couldn't be true, so
+		 * there is no danger of this state being carried down into a nested
+		 * object or array. The flag will be reset in the scalar action.
 		 */
 		_state->skip_next_null = true;
 		return;
@@ -3090,16 +3094,16 @@ sn_object_field_start (void *state, char *fname, bool isnull)
 		appendStringInfoCharMacro(_state->strval, ',');
 
 	/*
-	 * Unfortunately we don't have the quoted and escaped string any more,
-	 * so we have to re-escape it.
+	 * Unfortunately we don't have the quoted and escaped string any more, so
+	 * we have to re-escape it.
 	 */
-	escape_json(_state->strval,fname);
+	escape_json(_state->strval, fname);
 
 	appendStringInfoCharMacro(_state->strval, ':');
 }
 
 static void
-sn_array_element_start (void *state, bool isnull)
+sn_array_element_start(void *state, bool isnull)
 {
 	StripnullState *_state = (StripnullState *) state;
 
@@ -3114,7 +3118,7 @@ sn_scalar(void *state, char *token, JsonTokenType tokentype)
 
 	if (_state->skip_next_null)
 	{
-		Assert (tokentype == JSON_TOKEN_NULL);
+		Assert(tokentype == JSON_TOKEN_NULL);
 		_state->skip_next_null = false;
 		return;
 	}
@@ -3132,7 +3136,7 @@ Datum
 json_strip_nulls(PG_FUNCTION_ARGS)
 {
 	text	   *json = PG_GETARG_TEXT_P(0);
-	StripnullState  *state;
+	StripnullState *state;
 	JsonLexContext *lex;
 	JsonSemAction *sem;
 
@@ -3166,13 +3170,14 @@ json_strip_nulls(PG_FUNCTION_ARGS)
 Datum
 jsonb_strip_nulls(PG_FUNCTION_ARGS)
 {
-	Jsonb * jb = PG_GETARG_JSONB(0);
+	Jsonb	   *jb = PG_GETARG_JSONB(0);
 	JsonbIterator *it;
 	JsonbParseState *parseState = NULL;
 	JsonbValue *res = NULL;
-	int type;
-	JsonbValue v,k;
-	bool last_was_key = false;
+	int			type;
+	JsonbValue	v,
+				k;
+	bool		last_was_key = false;
 
 	if (JB_ROOT_IS_SCALAR(jb))
 		PG_RETURN_POINTER(jb);
@@ -3181,7 +3186,7 @@ jsonb_strip_nulls(PG_FUNCTION_ARGS)
 
 	while ((type = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)
 	{
-		Assert( ! (type == WJB_KEY && last_was_key));
+		Assert(!(type == WJB_KEY && last_was_key));
 
 		if (type == WJB_KEY)
 		{
@@ -3225,13 +3230,12 @@ jsonb_strip_nulls(PG_FUNCTION_ARGS)
  * like getting jbvBinary values, so we can't just push jb as a whole.
  */
 static void
-addJsonbToParseState(JsonbParseState **jbps, Jsonb * jb)
+addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb)
 {
-
 	JsonbIterator *it;
-	JsonbValue    *o = &(*jbps)->contVal;
-	int            type;
-	JsonbValue     v;
+	JsonbValue *o = &(*jbps)->contVal;
+	int			type;
+	JsonbValue	v;
 
 	it = JsonbIteratorInit(&jb->root);
 
@@ -3239,8 +3243,8 @@ addJsonbToParseState(JsonbParseState **jbps, Jsonb * jb)
 
 	if (JB_ROOT_IS_SCALAR(jb))
 	{
-		(void) JsonbIteratorNext(&it, &v, false); /* skip array header */
-		(void) JsonbIteratorNext(&it, &v, false); /* fetch scalar value */
+		(void) JsonbIteratorNext(&it, &v, false);		/* skip array header */
+		(void) JsonbIteratorNext(&it, &v, false);		/* fetch scalar value */
 
 		switch (o->type)
 		{
@@ -3297,8 +3301,8 @@ jsonb_concat(PG_FUNCTION_ARGS)
 	Jsonb	   *out = palloc(VARSIZE(jb1) + VARSIZE(jb2));
 	JsonbParseState *state = NULL;
 	JsonbValue *res;
-	JsonbIterator  *it1,
-				   *it2;
+	JsonbIterator *it1,
+			   *it2;
 
 	/*
 	 * If one of the jsonb is empty, just return other.
@@ -3453,7 +3457,7 @@ jsonb_delete_idx(PG_FUNCTION_ARGS)
 		res = pushJsonbValue(&state, r, r < WJB_BEGIN_ARRAY ? &v : NULL);
 	}
 
-	Assert (res != NULL);
+	Assert(res != NULL);
 
 	PG_RETURN_JSONB(JsonbValueToJsonb(res));
 }
@@ -3497,7 +3501,7 @@ jsonb_replace(PG_FUNCTION_ARGS)
 
 	res = replacePath(&it, path_elems, path_nulls, path_len, &st, 0, newval);
 
-	Assert (res != NULL);
+	Assert(res != NULL);
 
 	PG_RETURN_JSONB(JsonbValueToJsonb(res));
 }
@@ -3541,7 +3545,7 @@ jsonb_delete_path(PG_FUNCTION_ARGS)
 
 	res = replacePath(&it, path_elems, path_nulls, path_len, &st, 0, NULL);
 
-	Assert (res != NULL);
+	Assert(res != NULL);
 
 	PG_RETURN_JSONB(JsonbValueToJsonb(res));
 }
@@ -3687,7 +3691,7 @@ walkJsonb(JsonbIterator **it, JsonbParseState **state, bool stop_at_level_zero)
 {
 	uint32		r,
 				level = 1;
-	JsonbValue  v;
+	JsonbValue	v;
 	JsonbValue *res = NULL;
 
 	while ((r = JsonbIteratorNext(it, &v, false)) != WJB_DONE)
@@ -3758,7 +3762,7 @@ replacePath(JsonbIterator **it, Datum *path_elems,
 static void
 replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 				  int path_len, JsonbParseState **st, int level,
-				  Jsonb *newval, uint32	nelems)
+				  Jsonb *newval, uint32 nelems)
 {
 	JsonbValue	v;
 	int			i;
@@ -3770,7 +3774,8 @@ replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 
 	for (i = 0; i < nelems; i++)
 	{
-		int		r = JsonbIteratorNext(it, &k, true);
+		int			r = JsonbIteratorNext(it, &k, true);
+
 		Assert(r == WJB_KEY);
 
 		if (!done &&
@@ -3780,7 +3785,7 @@ replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 		{
 			if (level == path_len - 1)
 			{
-				r = JsonbIteratorNext(it, &v, true);		/* skip */
+				r = JsonbIteratorNext(it, &v, true);	/* skip */
 				if (newval != NULL)
 				{
 					(void) pushJsonbValue(st, WJB_KEY, &k);
@@ -3801,7 +3806,7 @@ replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 			(void) pushJsonbValue(st, r, r < WJB_BEGIN_ARRAY ? &v : NULL);
 			if (r == WJB_BEGIN_ARRAY || r == WJB_BEGIN_OBJECT)
 			{
-				int		walking_level = 1;
+				int			walking_level = 1;
 
 				while (walking_level != 0)
 				{
@@ -3859,13 +3864,13 @@ replacePathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 	/* iterate over the array elements */
 	for (i = 0; i < npairs; i++)
 	{
-		int		r;
+		int			r;
 
 		if (i == idx && level < path_len)
 		{
 			if (level == path_len - 1)
 			{
-				r = JsonbIteratorNext(it, &v, true);		/* skip */
+				r = JsonbIteratorNext(it, &v, true);	/* skip */
 				if (newval != NULL)
 					addJsonbToParseState(st, newval);
 			}
@@ -3881,7 +3886,7 @@ replacePathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 
 			if (r == WJB_BEGIN_ARRAY || r == WJB_BEGIN_OBJECT)
 			{
-				int		walking_level = 1;
+				int			walking_level = 1;
 
 				while (walking_level != 0)
 				{
diff --git a/src/backend/utils/adt/levenshtein.c b/src/backend/utils/adt/levenshtein.c
index f6e2ca6452a..2c30b6c8e9d 100644
--- a/src/backend/utils/adt/levenshtein.c
+++ b/src/backend/utils/adt/levenshtein.c
@@ -96,8 +96,8 @@ varstr_levenshtein(const char *source, int slen, const char *target, int tlen,
 #endif
 
 	/*
-	 * A common use for Levenshtein distance is to match attributes when building
-	 * diagnostic, user-visible messages.  Restrict the size of
+	 * A common use for Levenshtein distance is to match attributes when
+	 * building diagnostic, user-visible messages.  Restrict the size of
 	 * MAX_LEVENSHTEIN_STRLEN at compile time so that this is guaranteed to
 	 * work.
 	 */
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index 9d53a8b6a32..1705ff0d118 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -419,7 +419,7 @@ PreventAdvisoryLocksInParallelMode(void)
 	if (IsInParallelMode())
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-				 errmsg("cannot use advisory locks during a parallel operation")));
+		   errmsg("cannot use advisory locks during a parallel operation")));
 }
 
 /*
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 61d609f9181..de68cdddf1d 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -187,7 +187,7 @@ pg_terminate_backend(PG_FUNCTION_ARGS)
 	if (r == SIGNAL_BACKEND_NOSUPERUSER)
 		ereport(ERROR,
 				(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-				 (errmsg("must be a superuser to terminate superuser process"))));
+			(errmsg("must be a superuser to terminate superuser process"))));
 
 	if (r == SIGNAL_BACKEND_NOPERMISSION)
 		ereport(ERROR,
diff --git a/src/backend/utils/adt/network_gist.c b/src/backend/utils/adt/network_gist.c
index 0fdb17f947f..756237e751c 100644
--- a/src/backend/utils/adt/network_gist.c
+++ b/src/backend/utils/adt/network_gist.c
@@ -595,10 +595,10 @@ inet_gist_decompress(PG_FUNCTION_ARGS)
 Datum
 inet_gist_fetch(PG_FUNCTION_ARGS)
 {
-	GISTENTRY	*entry = (GISTENTRY *) PG_GETARG_POINTER(0);
-	GistInetKey	*key = DatumGetInetKeyP(entry->key);
-	GISTENTRY	*retval;
-	inet		*dst;
+	GISTENTRY  *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
+	GistInetKey *key = DatumGetInetKeyP(entry->key);
+	GISTENTRY  *retval;
+	inet	   *dst;
 
 	dst = (inet *) palloc0(sizeof(inet));
 
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 3cef3048eb3..7ce41b78888 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -1731,7 +1731,7 @@ numeric_abbrev_abort(int memtupcount, SortSupport ssup)
 		if (trace_sort)
 			elog(LOG,
 				 "numeric_abbrev: aborting abbreviation at cardinality %f"
-				 " below threshold %f after " INT64_FORMAT " values (%d rows)",
+			   " below threshold %f after " INT64_FORMAT " values (%d rows)",
 				 abbr_card, nss->input_count / 10000.0 + 0.5,
 				 nss->input_count, memtupcount);
 #endif
@@ -3408,10 +3408,10 @@ numeric_accum_inv(PG_FUNCTION_ARGS)
 #ifdef HAVE_INT128
 typedef struct Int128AggState
 {
-	bool	calcSumX2;	/* if true, calculate sumX2 */
-	int64	N;			/* count of processed numbers */
-	int128	sumX;		/* sum of processed numbers */
-	int128	sumX2;		/* sum of squares of processed numbers */
+	bool		calcSumX2;		/* if true, calculate sumX2 */
+	int64		N;				/* count of processed numbers */
+	int128		sumX;			/* sum of processed numbers */
+	int128		sumX2;			/* sum of squares of processed numbers */
 } Int128AggState;
 
 /*
@@ -3703,9 +3703,9 @@ Datum
 numeric_poly_sum(PG_FUNCTION_ARGS)
 {
 #ifdef HAVE_INT128
-	PolyNumAggState	*state;
-	Numeric			res;
-	NumericVar		result;
+	PolyNumAggState *state;
+	Numeric		res;
+	NumericVar	result;
 
 	state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
 
@@ -3731,9 +3731,10 @@ Datum
 numeric_poly_avg(PG_FUNCTION_ARGS)
 {
 #ifdef HAVE_INT128
-	PolyNumAggState	   *state;
-	NumericVar			result;
-	Datum				countd, sumd;
+	PolyNumAggState *state;
+	NumericVar	result;
+	Datum		countd,
+				sumd;
 
 	state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
 
@@ -3962,8 +3963,8 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
 #ifdef HAVE_INT128
 static Numeric
 numeric_poly_stddev_internal(Int128AggState *state,
-						bool variance, bool sample,
-						bool *is_null)
+							 bool variance, bool sample,
+							 bool *is_null)
 {
 	NumericAggState numstate;
 	Numeric		res;
@@ -3997,9 +3998,9 @@ Datum
 numeric_poly_var_samp(PG_FUNCTION_ARGS)
 {
 #ifdef HAVE_INT128
-	PolyNumAggState	   *state;
-	Numeric				res;
-	bool				is_null;
+	PolyNumAggState *state;
+	Numeric		res;
+	bool		is_null;
 
 	state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
 
@@ -4018,9 +4019,9 @@ Datum
 numeric_poly_stddev_samp(PG_FUNCTION_ARGS)
 {
 #ifdef HAVE_INT128
-	PolyNumAggState	   *state;
-	Numeric				res;
-	bool				is_null;
+	PolyNumAggState *state;
+	Numeric		res;
+	bool		is_null;
 
 	state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
 
@@ -4039,9 +4040,9 @@ Datum
 numeric_poly_var_pop(PG_FUNCTION_ARGS)
 {
 #ifdef HAVE_INT128
-	PolyNumAggState	   *state;
-	Numeric				res;
-	bool				is_null;
+	PolyNumAggState *state;
+	Numeric		res;
+	bool		is_null;
 
 	state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
 
@@ -4060,9 +4061,9 @@ Datum
 numeric_poly_stddev_pop(PG_FUNCTION_ARGS)
 {
 #ifdef HAVE_INT128
-	PolyNumAggState	   *state;
-	Numeric				res;
-	bool				is_null;
+	PolyNumAggState *state;
+	Numeric		res;
+	bool		is_null;
 
 	state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
 
@@ -5306,10 +5307,10 @@ int64_to_numericvar(int64 val, NumericVar *var)
 static void
 int128_to_numericvar(int128 val, NumericVar *var)
 {
-	uint128			uval,
-					newuval;
-	NumericDigit   *ptr;
-	int				ndigits;
+	uint128		uval,
+				newuval;
+	NumericDigit *ptr;
+	int			ndigits;
 
 	/* int128 can require at most 39 decimal digits; add one for safety */
 	alloc_var(var, 40 / DEC_DIGITS);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index d84969f770b..4be735e918d 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -635,7 +635,7 @@ cache_single_time(char **dst, const char *format, const struct tm * tm)
 	/*
 	 * MAX_L10N_DATA is sufficient buffer space for every known locale, and
 	 * POSIX defines no strftime() errors.  (Buffer space exhaustion is not an
-	 * error.)  An implementation might report errors (e.g. ENOMEM) by
+	 * error.)	An implementation might report errors (e.g. ENOMEM) by
 	 * returning 0 (or, less plausibly, a negative value) and setting errno.
 	 * Report errno just in case the implementation did that, but clear it in
 	 * advance of the call so we don't emit a stale, unrelated errno.
diff --git a/src/backend/utils/adt/pg_upgrade_support.c b/src/backend/utils/adt/pg_upgrade_support.c
index d69fa53567b..883378e5240 100644
--- a/src/backend/utils/adt/pg_upgrade_support.c
+++ b/src/backend/utils/adt/pg_upgrade_support.c
@@ -20,19 +20,19 @@
 #include "utils/builtins.h"
 
 
-Datum binary_upgrade_set_next_pg_type_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_array_pg_type_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_toast_pg_type_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_heap_pg_class_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_index_pg_class_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_toast_pg_class_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_pg_enum_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_pg_authid_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_create_empty_extension(PG_FUNCTION_ARGS);
-
-
-#define CHECK_IS_BINARY_UPGRADE 								\
-do { 															\
+Datum		binary_upgrade_set_next_pg_type_oid(PG_FUNCTION_ARGS);
+Datum		binary_upgrade_set_next_array_pg_type_oid(PG_FUNCTION_ARGS);
+Datum		binary_upgrade_set_next_toast_pg_type_oid(PG_FUNCTION_ARGS);
+Datum		binary_upgrade_set_next_heap_pg_class_oid(PG_FUNCTION_ARGS);
+Datum		binary_upgrade_set_next_index_pg_class_oid(PG_FUNCTION_ARGS);
+Datum		binary_upgrade_set_next_toast_pg_class_oid(PG_FUNCTION_ARGS);
+Datum		binary_upgrade_set_next_pg_enum_oid(PG_FUNCTION_ARGS);
+Datum		binary_upgrade_set_next_pg_authid_oid(PG_FUNCTION_ARGS);
+Datum		binary_upgrade_create_empty_extension(PG_FUNCTION_ARGS);
+
+
+#define CHECK_IS_BINARY_UPGRADE									\
+do {															\
 	if (!IsBinaryUpgrade)										\
 		ereport(ERROR,											\
 				(errcode(ERRCODE_CANT_CHANGE_RUNTIME_PARAM),	\
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index 2b3778b03ad..f7c9bf63338 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -531,14 +531,14 @@ Datum
 pg_stat_get_activity(PG_FUNCTION_ARGS)
 {
 #define PG_STAT_GET_ACTIVITY_COLS	22
-	int					num_backends = pgstat_fetch_stat_numbackends();
-	int					curr_backend;
-	int					pid = PG_ARGISNULL(0) ? -1 : PG_GETARG_INT32(0);
-	ReturnSetInfo	   *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-	TupleDesc			tupdesc;
-	Tuplestorestate	   *tupstore;
-	MemoryContext		per_query_ctx;
-	MemoryContext		oldcontext;
+	int			num_backends = pgstat_fetch_stat_numbackends();
+	int			curr_backend;
+	int			pid = PG_ARGISNULL(0) ? -1 : PG_GETARG_INT32(0);
+	ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+	TupleDesc	tupdesc;
+	Tuplestorestate *tupstore;
+	MemoryContext per_query_ctx;
+	MemoryContext oldcontext;
 
 	/* check to see if caller supports us returning a tuplestore */
 	if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
@@ -628,7 +628,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 
 		if (beentry->st_ssl)
 		{
-			values[16] = BoolGetDatum(true); /* ssl */
+			values[16] = BoolGetDatum(true);	/* ssl */
 			values[17] = CStringGetTextDatum(beentry->st_sslstatus->ssl_version);
 			values[18] = CStringGetTextDatum(beentry->st_sslstatus->ssl_cipher);
 			values[19] = Int32GetDatum(beentry->st_sslstatus->ssl_bits);
@@ -637,7 +637,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 		}
 		else
 		{
-			values[16] = BoolGetDatum(false); /* ssl */
+			values[16] = BoolGetDatum(false);	/* ssl */
 			nulls[17] = nulls[18] = nulls[19] = nulls[20] = nulls[21] = true;
 		}
 
diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c
index 9281529d7a1..3b5529eb302 100644
--- a/src/backend/utils/adt/rangetypes_spgist.c
+++ b/src/backend/utils/adt/rangetypes_spgist.c
@@ -583,7 +583,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
 					 */
 					cmp = adjacent_inner_consistent(typcache, &lower,
 													&centroidUpper,
-											prevCentroid ? &prevUpper : NULL);
+										   prevCentroid ? &prevUpper : NULL);
 					if (cmp > 0)
 						which1 = (1 << 1) | (1 << 4);
 					else if (cmp < 0)
@@ -594,12 +594,12 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
 					/*
 					 * Also search for ranges's adjacent to argument's upper
 					 * bound. They will be found along the line adjacent to
-					 * (and just right of) X=upper, which falls in quadrants
-					 * 3 and 4, or 1 and 2.
+					 * (and just right of) X=upper, which falls in quadrants 3
+					 * and 4, or 1 and 2.
 					 */
 					cmp = adjacent_inner_consistent(typcache, &upper,
 													&centroidLower,
-											prevCentroid ? &prevLower : NULL);
+										   prevCentroid ? &prevLower : NULL);
 					if (cmp > 0)
 						which2 = (1 << 1) | (1 << 2);
 					else if (cmp < 0)
@@ -782,7 +782,7 @@ adjacent_cmp_bounds(TypeCacheEntry *typcache, RangeBound *arg,
 
 	Assert(arg->lower != centroid->lower);
 
-	cmp = range_cmp_bounds(typcache, arg,  centroid);
+	cmp = range_cmp_bounds(typcache, arg, centroid);
 
 	if (centroid->lower)
 	{
@@ -799,11 +799,11 @@ adjacent_cmp_bounds(TypeCacheEntry *typcache, RangeBound *arg,
 		 * With the argument range [..., 500), the adjacent range we're
 		 * searching for is [500, ...):
 		 *
-		 *  ARGUMENT   CENTROID     CMP   ADJ
-		 *  [..., 500) [498, ...)    >    (N)   [500, ...) is to the right
-		 *  [..., 500) [499, ...)    =    (N)   [500, ...) is to the right
-		 *  [..., 500) [500, ...)    <     Y    [500, ...) is to the right
-		 *  [..., 500) [501, ...)    <     N    [500, ...) is to the left
+		 *	ARGUMENT   CENTROID		CMP   ADJ
+		 *	[..., 500) [498, ...)	 >	  (N)	[500, ...) is to the right
+		 *	[..., 500) [499, ...)	 =	  (N)	[500, ...) is to the right
+		 *	[..., 500) [500, ...)	 <	   Y	[500, ...) is to the right
+		 *	[..., 500) [501, ...)	 <	   N	[500, ...) is to the left
 		 *
 		 * So, we must search left when the argument is smaller than, and not
 		 * adjacent, to the centroid. Otherwise search right.
@@ -821,11 +821,11 @@ adjacent_cmp_bounds(TypeCacheEntry *typcache, RangeBound *arg,
 		 * bounds. A matching adjacent upper bound must be *smaller* than the
 		 * argument, but only just.
 		 *
-		 *  ARGUMENT   CENTROID     CMP   ADJ
-		 *  [500, ...) [..., 499)    >    (N)   [..., 500) is to the right
-		 *  [500, ...) [..., 500)    >    (Y)   [..., 500) is to the right
-		 *  [500, ...) [..., 501)    =    (N)   [..., 500) is to the left
-		 *  [500, ...) [..., 502)    <    (N)   [..., 500) is to the left
+		 *	ARGUMENT   CENTROID		CMP   ADJ
+		 *	[500, ...) [..., 499)	 >	  (N)	[..., 500) is to the right
+		 *	[500, ...) [..., 500)	 >	  (Y)	[..., 500) is to the right
+		 *	[500, ...) [..., 501)	 =	  (N)	[..., 500) is to the left
+		 *	[500, ...) [..., 502)	 <	  (N)	[..., 500) is to the left
 		 *
 		 * We must search left when the argument is smaller than or equal to
 		 * the centroid. Otherwise search right. We don't need to check
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 4f35992629e..6a0fcc20dab 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -696,7 +696,7 @@ similar_escape(PG_FUNCTION_ARGS)
 				ereport(ERROR,
 						(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
 						 errmsg("invalid escape string"),
-						 errhint("Escape string must be empty or one character.")));
+				  errhint("Escape string must be empty or one character.")));
 		}
 	}
 
@@ -742,7 +742,8 @@ similar_escape(PG_FUNCTION_ARGS)
 
 		if (elen > 1)
 		{
-			int mblen = pg_mblen(p);
+			int			mblen = pg_mblen(p);
+
 			if (mblen > 1)
 			{
 				/* slow, multi-byte path */
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index f27131edd16..0bfeb5e3fd7 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -466,7 +466,7 @@ format_procedure_parts(Oid procedure_oid, List **objnames, List **objargs)
 	*objargs = NIL;
 	for (i = 0; i < nargs; i++)
 	{
-		Oid		thisargtype = procform->proargtypes.values[i];
+		Oid			thisargtype = procform->proargtypes.values[i];
 
 		*objargs = lappend(*objargs, format_type_be_qualified(thisargtype));
 	}
@@ -1637,7 +1637,7 @@ regroleout(PG_FUNCTION_ARGS)
 }
 
 /*
- *		regrolerecv	- converts external binary format to regrole
+ *		regrolerecv - converts external binary format to regrole
  */
 Datum
 regrolerecv(PG_FUNCTION_ARGS)
@@ -1647,7 +1647,7 @@ regrolerecv(PG_FUNCTION_ARGS)
 }
 
 /*
- *		regrolesend	- converts regrole to binary format
+ *		regrolesend - converts regrole to binary format
  */
 Datum
 regrolesend(PG_FUNCTION_ARGS)
@@ -1680,7 +1680,7 @@ regnamespacein(PG_FUNCTION_ARGS)
 		strspn(nsp_name_or_oid, "0123456789") == strlen(nsp_name_or_oid))
 	{
 		result = DatumGetObjectId(DirectFunctionCall1(oidin,
-										CStringGetDatum(nsp_name_or_oid)));
+										  CStringGetDatum(nsp_name_or_oid)));
 		PG_RETURN_OID(result);
 	}
 
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index f6bec8be9bc..88dd3faf2d9 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -3274,7 +3274,7 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
 		{
 			int			fnum = attnums[idx];
 			char	   *name,
-				   *val;
+					   *val;
 
 			name = SPI_fname(tupdesc, fnum);
 			val = SPI_getvalue(violator, tupdesc, fnum);
@@ -3298,11 +3298,11 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
 						RelationGetRelationName(fk_rel),
 						NameStr(riinfo->conname)),
 				 has_perm ?
-					 errdetail("Key (%s)=(%s) is not present in table \"%s\".",
-							   key_names.data, key_values.data,
-							   RelationGetRelationName(pk_rel)) :
-					 errdetail("Key is not present in table \"%s\".",
-							   RelationGetRelationName(pk_rel)),
+				 errdetail("Key (%s)=(%s) is not present in table \"%s\".",
+						   key_names.data, key_values.data,
+						   RelationGetRelationName(pk_rel)) :
+				 errdetail("Key is not present in table \"%s\".",
+						   RelationGetRelationName(pk_rel)),
 				 errtableconstraint(fk_rel, NameStr(riinfo->conname))));
 	else
 		ereport(ERROR,
@@ -3315,8 +3315,8 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
 			errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
 					  key_names.data, key_values.data,
 					  RelationGetRelationName(fk_rel)) :
-					errdetail("Key is still referenced from table \"%s\".",
-					  RelationGetRelationName(fk_rel)),
+				 errdetail("Key is still referenced from table \"%s\".",
+						   RelationGetRelationName(fk_rel)),
 				 errtableconstraint(fk_rel, NameStr(riinfo->conname))));
 }
 
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 0585251d8fe..c404ae5e4c8 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -106,8 +106,8 @@ typedef struct
 	int			wrapColumn;		/* max line length, or -1 for no limit */
 	int			indentLevel;	/* current indent level for prettyprint */
 	bool		varprefix;		/* TRUE to print prefixes on Vars */
-	ParseExprKind special_exprkind;	/* set only for exprkinds needing */
-									/* special handling */
+	ParseExprKind special_exprkind;		/* set only for exprkinds needing */
+	/* special handling */
 } deparse_context;
 
 /*
@@ -350,7 +350,7 @@ static void make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
 static void make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
 			 int prettyFlags, int wrapColumn);
 static void get_tablesample_def(TableSampleClause *tablesample,
-								deparse_context *context);
+					deparse_context *context);
 static void get_query_def(Query *query, StringInfo buf, List *parentnamespace,
 			  TupleDesc resultDesc,
 			  int prettyFlags, int wrapColumn, int startIndent);
@@ -361,8 +361,8 @@ static void get_select_query_def(Query *query, deparse_context *context,
 static void get_insert_query_def(Query *query, deparse_context *context);
 static void get_update_query_def(Query *query, deparse_context *context);
 static void get_update_query_targetlist_def(Query *query, List *targetList,
-									deparse_context *context,
-									RangeTblEntry *rte);
+								deparse_context *context,
+								RangeTblEntry *rte);
 static void get_delete_query_def(Query *query, deparse_context *context);
 static void get_utility_query_def(Query *query, deparse_context *context);
 static void get_basic_select_query(Query *query, deparse_context *context,
@@ -376,7 +376,7 @@ static Node *get_rule_sortgroupclause(Index ref, List *tlist,
 						 bool force_colno,
 						 deparse_context *context);
 static void get_rule_groupingset(GroupingSet *gset, List *targetlist,
-								 bool omit_parens, deparse_context *context);
+					 bool omit_parens, deparse_context *context);
 static void get_rule_orderby(List *orderList, List *targetList,
 				 bool force_colno, deparse_context *context);
 static void get_rule_windowclause(Query *query, deparse_context *context);
@@ -424,9 +424,9 @@ static void printSubscripts(ArrayRef *aref, deparse_context *context);
 static char *get_relation_name(Oid relid);
 static char *generate_relation_name(Oid relid, List *namespaces);
 static char *generate_function_name(Oid funcid, int nargs,
-							List *argnames, Oid *argtypes,
-							bool has_variadic, bool *use_variadic_p,
-							ParseExprKind special_exprkind);
+					   List *argnames, Oid *argtypes,
+					   bool has_variadic, bool *use_variadic_p,
+					   ParseExprKind special_exprkind);
 static char *generate_operator_name(Oid operid, Oid arg1, Oid arg2);
 static text *string_to_text(char *str);
 static char *flatten_reloptions(Oid relid);
@@ -1963,7 +1963,7 @@ pg_get_functiondef(PG_FUNCTION_ARGS)
 	print_function_trftypes(&buf, proctup);
 
 	appendStringInfo(&buf, "\n LANGUAGE %s\n",
-					 quote_identifier(get_language_name(proc->prolang, false)));
+				  quote_identifier(get_language_name(proc->prolang, false)));
 
 	/* Emit some miscellaneous options on one line */
 	oldlen = buf.len;
@@ -2364,13 +2364,13 @@ is_input_argument(int nth, const char *argmodes)
 static void
 print_function_trftypes(StringInfo buf, HeapTuple proctup)
 {
-	Oid	*trftypes;
-	int	ntypes;
+	Oid		   *trftypes;
+	int			ntypes;
 
 	ntypes = get_func_trftypes(proctup, &trftypes);
 	if (ntypes > 0)
 	{
-		int	i;
+		int			i;
 
 		appendStringInfoString(buf, "\n TRANSFORM ");
 		for (i = 0; i < ntypes; i++)
@@ -4714,7 +4714,7 @@ get_basic_select_query(Query *query, deparse_context *context,
 	/* Add the GROUP BY clause if given */
 	if (query->groupClause != NULL || query->groupingSets != NULL)
 	{
-		ParseExprKind	save_exprkind;
+		ParseExprKind save_exprkind;
 
 		appendContextKeyword(context, " GROUP BY ",
 							 -PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
@@ -5045,13 +5045,13 @@ get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno,
 	expr = (Node *) tle->expr;
 
 	/*
-	 * Use column-number form if requested by caller.  Otherwise, if expression
-	 * is a constant, force it to be dumped with an explicit cast as decoration
-	 * --- this is because a simple integer constant is ambiguous (and will be
-	 * misinterpreted by findTargetlistEntry()) if we dump it without any
-	 * decoration.  If it's anything more complex than a simple Var, then force
-	 * extra parens around it, to ensure it can't be misinterpreted as a cube()
-	 * or rollup() construct.
+	 * Use column-number form if requested by caller.  Otherwise, if
+	 * expression is a constant, force it to be dumped with an explicit cast
+	 * as decoration --- this is because a simple integer constant is
+	 * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
+	 * dump it without any decoration.  If it's anything more complex than a
+	 * simple Var, then force extra parens around it, to ensure it can't be
+	 * misinterpreted as a cube() or rollup() construct.
 	 */
 	if (force_colno)
 	{
@@ -5067,14 +5067,15 @@ get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno,
 		/*
 		 * We must force parens for function-like expressions even if
 		 * PRETTY_PAREN is off, since those are the ones in danger of
-		 * misparsing. For other expressions we need to force them
-		 * only if PRETTY_PAREN is on, since otherwise the expression
-		 * will output them itself. (We can't skip the parens.)
+		 * misparsing. For other expressions we need to force them only if
+		 * PRETTY_PAREN is on, since otherwise the expression will output them
+		 * itself. (We can't skip the parens.)
 		 */
-		bool	need_paren = (PRETTY_PAREN(context)
-							  || IsA(expr, FuncExpr)
-							  || IsA(expr, Aggref)
-							  || IsA(expr, WindowFunc));
+		bool		need_paren = (PRETTY_PAREN(context)
+								  || IsA(expr, FuncExpr)
+								  ||IsA(expr, Aggref)
+								  ||IsA(expr, WindowFunc));
+
 		if (need_paren)
 			appendStringInfoString(context->buf, "(");
 		get_rule_expr(expr, context, true);
@@ -5110,7 +5111,7 @@ get_rule_groupingset(GroupingSet *gset, List *targetlist,
 
 				foreach(l, gset->content)
 				{
-					Index ref = lfirst_int(l);
+					Index		ref = lfirst_int(l);
 
 					appendStringInfoString(buf, sep);
 					get_rule_sortgroupclause(ref, targetlist,
@@ -5502,7 +5503,7 @@ get_insert_query_def(Query *query, deparse_context *context)
 		}
 		else if (confl->constraint != InvalidOid)
 		{
-			char   *constraint = get_constraint_name(confl->constraint);
+			char	   *constraint = get_constraint_name(confl->constraint);
 
 			appendStringInfo(buf, " ON CONSTRAINT %s",
 							 quote_qualified_identifier(NULL, constraint));
@@ -7917,9 +7918,9 @@ get_rule_expr(Node *node, deparse_context *context,
 
 		case T_InferenceElem:
 			{
-				InferenceElem  *iexpr = (InferenceElem *) node;
-				bool			varprefix = context->varprefix;
-				bool			need_parens;
+				InferenceElem *iexpr = (InferenceElem *) node;
+				bool		varprefix = context->varprefix;
+				bool		need_parens;
 
 				/*
 				 * InferenceElem can only refer to target relation, so a
@@ -7948,13 +7949,13 @@ get_rule_expr(Node *node, deparse_context *context,
 
 				if (iexpr->infercollid)
 					appendStringInfo(buf, " COLLATE %s",
-									 generate_collation_name(iexpr->infercollid));
+								generate_collation_name(iexpr->infercollid));
 
 				/* Add the operator class name, if not default */
 				if (iexpr->inferopclass)
 				{
-					Oid		inferopclass = iexpr->inferopclass;
-					Oid		inferopcinputtype = get_opclass_input_type(iexpr->inferopclass);
+					Oid			inferopclass = iexpr->inferopclass;
+					Oid			inferopcinputtype = get_opclass_input_type(iexpr->inferopclass);
 
 					get_opclass_name(inferopclass, inferopcinputtype, buf);
 				}
diff --git a/src/backend/utils/adt/tsquery_op.c b/src/backend/utils/adt/tsquery_op.c
index bd6fc250990..8afd558db33 100644
--- a/src/backend/utils/adt/tsquery_op.c
+++ b/src/backend/utils/adt/tsquery_op.c
@@ -249,6 +249,7 @@ cmp_string(const void *a, const void *b)
 {
 	const char *sa = *((const char **) a);
 	const char *sb = *((const char **) b);
+
 	return strcmp(sa, sb);
 }
 
@@ -300,8 +301,8 @@ tsq_mcontains(PG_FUNCTION_ARGS)
 		result = false;
 	else
 	{
-		int i;
-		int j = 0;
+		int			i;
+		int			j = 0;
 
 		for (i = 0; i < ex_nvalues; i++)
 		{
diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c
index 1d7bb02ca46..ce1d9abddea 100644
--- a/src/backend/utils/adt/txid.c
+++ b/src/backend/utils/adt/txid.c
@@ -142,8 +142,10 @@ cmp_txid(const void *aa, const void *bb)
 static void
 sort_snapshot(TxidSnapshot *snap)
 {
-	txid	last = 0;
-	int		nxip, idx1, idx2;
+	txid		last = 0;
+	int			nxip,
+				idx1,
+				idx2;
 
 	if (snap->nxip > 1)
 	{
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 5fd2bef617f..779729d724a 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -56,14 +56,15 @@ typedef struct
 
 typedef struct
 {
-	char			   *buf1;		/* 1st string, or abbreviation original string buf */
-	char			   *buf2;		/* 2nd string, or abbreviation strxfrm() buf */
-	int					buflen1;
-	int					buflen2;
-	bool				collate_c;
-	hyperLogLogState	abbr_card;	/* Abbreviated key cardinality state */
-	hyperLogLogState	full_card;	/* Full key cardinality state */
-	double				prop_card;	/* Required cardinality proportion */
+	char	   *buf1;			/* 1st string, or abbreviation original string
+								 * buf */
+	char	   *buf2;			/* 2nd string, or abbreviation strxfrm() buf */
+	int			buflen1;
+	int			buflen2;
+	bool		collate_c;
+	hyperLogLogState abbr_card; /* Abbreviated key cardinality state */
+	hyperLogLogState full_card; /* Full key cardinality state */
+	double		prop_card;		/* Required cardinality proportion */
 #ifdef HAVE_LOCALE_T
 	pg_locale_t locale;
 #endif
@@ -82,9 +83,9 @@ typedef struct
 #define PG_RETURN_UNKNOWN_P(x)		PG_RETURN_POINTER(x)
 
 static void btsortsupport_worker(SortSupport ssup, Oid collid);
-static int bttextfastcmp_c(Datum x, Datum y, SortSupport ssup);
-static int bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup);
-static int bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup);
+static int	bttextfastcmp_c(Datum x, Datum y, SortSupport ssup);
+static int	bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup);
+static int	bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup);
 static Datum bttext_abbrev_convert(Datum original, SortSupport ssup);
 static bool bttext_abbrev_abort(int memtupcount, SortSupport ssup);
 static int32 text_length(Datum str);
@@ -1415,8 +1416,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
 		}
 
 		/*
-		 * memcmp() can't tell us which of two unequal strings sorts first, but
-		 * it's a cheap way to tell if they're equal.  Testing shows that
+		 * memcmp() can't tell us which of two unequal strings sorts first,
+		 * but it's a cheap way to tell if they're equal.  Testing shows that
 		 * memcmp() followed by strcoll() is only trivially slower than
 		 * strcoll() by itself, so we don't lose much if this doesn't work out
 		 * very often, and if it does - for example, because there are many
@@ -1726,9 +1727,9 @@ bttextcmp(PG_FUNCTION_ARGS)
 Datum
 bttextsortsupport(PG_FUNCTION_ARGS)
 {
-	SortSupport		ssup = (SortSupport) PG_GETARG_POINTER(0);
-	Oid				collid = ssup->ssup_collation;
-	MemoryContext	oldcontext;
+	SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+	Oid			collid = ssup->ssup_collation;
+	MemoryContext oldcontext;
 
 	oldcontext = MemoryContextSwitchTo(ssup->ssup_cxt);
 
@@ -1742,30 +1743,30 @@ bttextsortsupport(PG_FUNCTION_ARGS)
 static void
 btsortsupport_worker(SortSupport ssup, Oid collid)
 {
-	bool				abbreviate = ssup->abbreviate;
-	bool				collate_c = false;
-	TextSortSupport	   *tss;
+	bool		abbreviate = ssup->abbreviate;
+	bool		collate_c = false;
+	TextSortSupport *tss;
 
 #ifdef HAVE_LOCALE_T
-	pg_locale_t			locale = 0;
+	pg_locale_t locale = 0;
 #endif
 
 	/*
 	 * If possible, set ssup->comparator to a function which can be used to
 	 * directly compare two datums.  If we can do this, we'll avoid the
-	 * overhead of a trip through the fmgr layer for every comparison,
-	 * which can be substantial.
+	 * overhead of a trip through the fmgr layer for every comparison, which
+	 * can be substantial.
 	 *
-	 * Most typically, we'll set the comparator to bttextfastcmp_locale,
-	 * which uses strcoll() to perform comparisons.  However, if LC_COLLATE
-	 * = C, we can make things quite a bit faster with bttextfastcmp_c,
-	 * which uses memcmp() rather than strcoll().
+	 * Most typically, we'll set the comparator to bttextfastcmp_locale, which
+	 * uses strcoll() to perform comparisons.  However, if LC_COLLATE = C, we
+	 * can make things quite a bit faster with bttextfastcmp_c, which uses
+	 * memcmp() rather than strcoll().
 	 *
-	 * There is a further exception on Windows.  When the database encoding
-	 * is UTF-8 and we are not using the C collation, complex hacks are
-	 * required.  We don't currently have a comparator that handles that case,
-	 * so we fall back on the slow method of having the sort code invoke
-	 * bttextcmp() via the fmgr trampoline.
+	 * There is a further exception on Windows.  When the database encoding is
+	 * UTF-8 and we are not using the C collation, complex hacks are required.
+	 * We don't currently have a comparator that handles that case, so we fall
+	 * back on the slow method of having the sort code invoke bttextcmp() via
+	 * the fmgr trampoline.
 	 */
 	if (lc_collate_is_c(collid))
 	{
@@ -1808,13 +1809,13 @@ btsortsupport_worker(SortSupport ssup, Oid collid)
 	 * It's possible that there are platforms where the use of abbreviated
 	 * keys should be disabled at compile time.  Having only 4 byte datums
 	 * could make worst-case performance drastically more likely, for example.
-	 * Moreover, Darwin's strxfrm() implementations is known to not effectively
-	 * concentrate a significant amount of entropy from the original string in
-	 * earlier transformed blobs.  It's possible that other supported platforms
-	 * are similarly encumbered.  However, even in those cases, the abbreviated
-	 * keys optimization may win, and if it doesn't, the "abort abbreviation"
-	 * code may rescue us.  So, for now, we don't disable this anywhere on the
-	 * basis of performance.
+	 * Moreover, Darwin's strxfrm() implementations is known to not
+	 * effectively concentrate a significant amount of entropy from the
+	 * original string in earlier transformed blobs.  It's possible that other
+	 * supported platforms are similarly encumbered.  However, even in those
+	 * cases, the abbreviated keys optimization may win, and if it doesn't,
+	 * the "abort abbreviation" code may rescue us.  So, for now, we don't
+	 * disable this anywhere on the basis of performance.
 	 */
 
 	/*
@@ -1893,16 +1894,16 @@ bttextfastcmp_c(Datum x, Datum y, SortSupport ssup)
 static int
 bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup)
 {
-	text			   *arg1 = DatumGetTextPP(x);
-	text			   *arg2 = DatumGetTextPP(y);
-	TextSortSupport	   *tss = (TextSortSupport *) ssup->ssup_extra;
+	text	   *arg1 = DatumGetTextPP(x);
+	text	   *arg2 = DatumGetTextPP(y);
+	TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
 
 	/* working state */
-	char			   *a1p,
-					   *a2p;
-	int					len1,
-						len2,
-						result;
+	char	   *a1p,
+			   *a2p;
+	int			len1,
+				len2,
+				result;
 
 	a1p = VARDATA_ANY(arg1);
 	a2p = VARDATA_ANY(arg2);
@@ -1943,9 +1944,9 @@ bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup)
 		result = strcoll(tss->buf1, tss->buf2);
 
 	/*
-	 * In some locales strcoll() can claim that nonidentical strings are equal.
-	 * Believing that would be bad news for a number of reasons, so we follow
-	 * Perl's lead and sort "equal" strings according to strcmp().
+	 * In some locales strcoll() can claim that nonidentical strings are
+	 * equal. Believing that would be bad news for a number of reasons, so we
+	 * follow Perl's lead and sort "equal" strings according to strcmp().
 	 */
 	if (result == 0)
 		result = strcmp(tss->buf1, tss->buf2);
@@ -1966,9 +1967,9 @@ done:
 static int
 bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup)
 {
-	char   *a = (char *) &x;
-	char   *b = (char *) &y;
-	int 	result;
+	char	   *a = (char *) &x;
+	char	   *b = (char *) &y;
+	int			result;
 
 	result = memcmp(a, b, sizeof(Datum));
 
@@ -1989,15 +1990,15 @@ bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup)
 static Datum
 bttext_abbrev_convert(Datum original, SortSupport ssup)
 {
-	TextSortSupport	   *tss = (TextSortSupport *) ssup->ssup_extra;
-	text			   *authoritative = DatumGetTextPP(original);
-	char			   *authoritative_data = VARDATA_ANY(authoritative);
+	TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
+	text	   *authoritative = DatumGetTextPP(original);
+	char	   *authoritative_data = VARDATA_ANY(authoritative);
 
 	/* working state */
-	Datum				res;
-	char			   *pres;
-	int					len;
-	uint32				hash;
+	Datum		res;
+	char	   *pres;
+	int			len;
+	uint32		hash;
 
 	/*
 	 * Abbreviated key representation is a pass-by-value Datum that is treated
@@ -2009,8 +2010,8 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
 	len = VARSIZE_ANY_EXHDR(authoritative);
 
 	/*
-	 * If we're using the C collation, use memcmp(), rather than strxfrm(),
-	 * to abbreviate keys.  The full comparator for the C locale is always
+	 * If we're using the C collation, use memcmp(), rather than strxfrm(), to
+	 * abbreviate keys.  The full comparator for the C locale is always
 	 * memcmp(), and we can't risk having this give a different answer.
 	 * Besides, this should be faster, too.
 	 */
@@ -2018,7 +2019,7 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
 		memcpy(pres, authoritative_data, Min(len, sizeof(Datum)));
 	else
 	{
-		Size			bsize;
+		Size		bsize;
 
 		/*
 		 * We're not using the C collation, so fall back on strxfrm.
@@ -2075,8 +2076,8 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
 	/*
 	 * Maintain approximate cardinality of both abbreviated keys and original,
 	 * authoritative keys using HyperLogLog.  Used as cheap insurance against
-	 * the worst case, where we do many string transformations for no saving in
-	 * full strcoll()-based comparisons.  These statistics are used by
+	 * the worst case, where we do many string transformations for no saving
+	 * in full strcoll()-based comparisons.  These statistics are used by
 	 * bttext_abbrev_abort().
 	 *
 	 * First, Hash key proper, or a significant fraction of it.  Mix in length
@@ -2094,8 +2095,8 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
 	/* Hash abbreviated key */
 #if SIZEOF_DATUM == 8
 	{
-		uint32				lohalf,
-							hihalf;
+		uint32		lohalf,
+					hihalf;
 
 		lohalf = (uint32) res;
 		hihalf = (uint32) (res >> 32);
@@ -2118,8 +2119,9 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
 static bool
 bttext_abbrev_abort(int memtupcount, SortSupport ssup)
 {
-	TextSortSupport	   *tss = (TextSortSupport *) ssup->ssup_extra;
-	double				abbrev_distinct, key_distinct;
+	TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
+	double		abbrev_distinct,
+				key_distinct;
 
 	Assert(ssup->abbreviate);
 
@@ -2131,9 +2133,9 @@ bttext_abbrev_abort(int memtupcount, SortSupport ssup)
 	key_distinct = estimateHyperLogLog(&tss->full_card);
 
 	/*
-	 * Clamp cardinality estimates to at least one distinct value.  While NULLs
-	 * are generally disregarded, if only NULL values were seen so far, that
-	 * might misrepresent costs if we failed to clamp.
+	 * Clamp cardinality estimates to at least one distinct value.  While
+	 * NULLs are generally disregarded, if only NULL values were seen so far,
+	 * that might misrepresent costs if we failed to clamp.
 	 */
 	if (abbrev_distinct <= 1.0)
 		abbrev_distinct = 1.0;
@@ -2149,7 +2151,7 @@ bttext_abbrev_abort(int memtupcount, SortSupport ssup)
 #ifdef TRACE_SORT
 	if (trace_sort)
 	{
-		double norm_abbrev_card = abbrev_distinct / (double) memtupcount;
+		double		norm_abbrev_card = abbrev_distinct / (double) memtupcount;
 
 		elog(LOG, "bttext_abbrev: abbrev_distinct after %d: %f "
 			 "(key_distinct: %f, norm_abbrev_card: %f, prop_card: %f)",
@@ -2180,26 +2182,26 @@ bttext_abbrev_abort(int memtupcount, SortSupport ssup)
 		 * When we have exceeded 10,000 tuples, decay required cardinality
 		 * aggressively for next call.
 		 *
-		 * This is useful because the number of comparisons required on average
-		 * increases at a linearithmic rate, and at roughly 10,000 tuples that
-		 * factor will start to dominate over the linear costs of string
-		 * transformation (this is a conservative estimate).  The decay rate is
-		 * chosen to be a little less aggressive than halving -- which (since
-		 * we're called at points at which memtupcount has doubled) would never
-		 * see the cost model actually abort past the first call following a
-		 * decay.  This decay rate is mostly a precaution against a sudden,
-		 * violent swing in how well abbreviated cardinality tracks full key
-		 * cardinality.  The decay also serves to prevent a marginal case from
-		 * being aborted too late, when too much has already been invested in
-		 * string transformation.
+		 * This is useful because the number of comparisons required on
+		 * average increases at a linearithmic rate, and at roughly 10,000
+		 * tuples that factor will start to dominate over the linear costs of
+		 * string transformation (this is a conservative estimate).  The decay
+		 * rate is chosen to be a little less aggressive than halving -- which
+		 * (since we're called at points at which memtupcount has doubled)
+		 * would never see the cost model actually abort past the first call
+		 * following a decay.  This decay rate is mostly a precaution against
+		 * a sudden, violent swing in how well abbreviated cardinality tracks
+		 * full key cardinality.  The decay also serves to prevent a marginal
+		 * case from being aborted too late, when too much has already been
+		 * invested in string transformation.
 		 *
-		 * It's possible for sets of several million distinct strings with mere
-		 * tens of thousands of distinct abbreviated keys to still benefit very
-		 * significantly.  This will generally occur provided each abbreviated
-		 * key is a proxy for a roughly uniform number of the set's full keys.
-		 * If it isn't so, we hope to catch that early and abort.  If it isn't
-		 * caught early, by the time the problem is apparent it's probably not
-		 * worth aborting.
+		 * It's possible for sets of several million distinct strings with
+		 * mere tens of thousands of distinct abbreviated keys to still
+		 * benefit very significantly.  This will generally occur provided
+		 * each abbreviated key is a proxy for a roughly uniform number of the
+		 * set's full keys. If it isn't so, we hope to catch that early and
+		 * abort.  If it isn't caught early, by the time the problem is
+		 * apparent it's probably not worth aborting.
 		 */
 		if (memtupcount > 10000)
 			tss->prop_card *= 0.65;
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 8bb7144ecf9..99bc832ab82 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -1405,7 +1405,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg, bool preserve_whitespace,
 			if (*(utf8string + count))
 			{
 				res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0,
-													   utf8string + count, NULL);
+												   utf8string + count, NULL);
 				if (res_code != 0 || xmlerrcxt->err_occurred)
 					xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_XML_CONTENT,
 								"invalid XML content");
@@ -3697,7 +3697,7 @@ xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj,
 					for (i = 0; i < result; i++)
 					{
 						datum = PointerGetDatum(xml_xmlnodetoxmltype(xpathobj->nodesetval->nodeTab[i],
-																	 xmlerrcxt));
+																 xmlerrcxt));
 						(void) accumArrayResult(astate, datum, false,
 												XMLOID, CurrentMemoryContext);
 					}
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 1907a874588..c5cc4011bf8 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -226,7 +226,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
 		chunk = (InvalidationChunk *)
 			MemoryContextAlloc(CurTransactionContext,
 							   offsetof(InvalidationChunk, msgs) +
-					FIRSTCHUNKSIZE * sizeof(SharedInvalidationMessage));
+						 FIRSTCHUNKSIZE * sizeof(SharedInvalidationMessage));
 		chunk->nitems = 0;
 		chunk->maxitems = FIRSTCHUNKSIZE;
 		chunk->next = *listHdr;
@@ -240,7 +240,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
 		chunk = (InvalidationChunk *)
 			MemoryContextAlloc(CurTransactionContext,
 							   offsetof(InvalidationChunk, msgs) +
-						 chunksize * sizeof(SharedInvalidationMessage));
+							   chunksize * sizeof(SharedInvalidationMessage));
 		chunk->nitems = 0;
 		chunk->maxitems = chunksize;
 		chunk->next = *listHdr;
@@ -333,6 +333,7 @@ AddCatcacheInvalidationMessage(InvalidationListHeader *hdr,
 	msg.cc.id = (int8) id;
 	msg.cc.dbId = dbId;
 	msg.cc.hashValue = hashValue;
+
 	/*
 	 * Define padding bytes in SharedInvalidationMessage structs to be
 	 * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
@@ -712,11 +713,11 @@ PrepareInvalidationState(void)
 	myInfo->my_level = GetCurrentTransactionNestLevel();
 
 	/*
-	 * If there's any previous entry, this one should be for a deeper
-	 * nesting level.
+	 * If there's any previous entry, this one should be for a deeper nesting
+	 * level.
 	 */
 	Assert(transInvalInfo == NULL ||
-		myInfo->my_level > transInvalInfo->my_level);
+		   myInfo->my_level > transInvalInfo->my_level);
 
 	transInvalInfo = myInfo;
 }
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index f259751e157..7b32247d34e 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -3012,8 +3012,8 @@ get_tablesample_method_name(Oid tsmid)
 	tuple = SearchSysCache1(TABLESAMPLEMETHODOID, ObjectIdGetDatum(tsmid));
 	if (HeapTupleIsValid(tuple))
 	{
-		Form_pg_tablesample_method	tup =
-			(Form_pg_tablesample_method) GETSTRUCT(tuple);
+		Form_pg_tablesample_method tup =
+		(Form_pg_tablesample_method) GETSTRUCT(tuple);
 		char	   *result;
 
 		result = pstrdup(NameStr(tup->tsmname));
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 9a26a4efc5a..e6808e75763 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -153,8 +153,8 @@ CreateCachedPlan(Node *raw_parse_tree,
 	CachedPlanSource *plansource;
 	MemoryContext source_context;
 	MemoryContext oldcxt;
-	Oid user_id;
-	int security_context;
+	Oid			user_id;
+	int			security_context;
 
 	Assert(query_string != NULL);		/* required as of 8.4 */
 
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index e745006b73b..f60f3cb234b 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -873,7 +873,7 @@ equalPolicy(RowSecurityPolicy *policy1, RowSecurityPolicy *policy2)
 			return false;
 		if (policy1->hassublinks != policy2->hassublinks)
 			return false;
-		if (strcmp(policy1->policy_name,policy2->policy_name) != 0)
+		if (strcmp(policy1->policy_name, policy2->policy_name) != 0)
 			return false;
 		if (ARR_DIMS(policy1->roles)[0] != ARR_DIMS(policy2->roles)[0])
 			return false;
@@ -906,8 +906,8 @@ equalPolicy(RowSecurityPolicy *policy1, RowSecurityPolicy *policy2)
 static bool
 equalRSDesc(RowSecurityDesc *rsdesc1, RowSecurityDesc *rsdesc2)
 {
-	ListCell 	*lc,
-				*rc;
+	ListCell   *lc,
+			   *rc;
 
 	if (rsdesc1 == NULL && rsdesc2 == NULL)
 		return true;
@@ -922,10 +922,10 @@ equalRSDesc(RowSecurityDesc *rsdesc1, RowSecurityDesc *rsdesc2)
 	/* RelationBuildRowSecurity should build policies in order */
 	forboth(lc, rsdesc1->policies, rc, rsdesc2->policies)
 	{
-		RowSecurityPolicy	   *l = (RowSecurityPolicy *) lfirst(lc);
-		RowSecurityPolicy	   *r = (RowSecurityPolicy *) lfirst(rc);
+		RowSecurityPolicy *l = (RowSecurityPolicy *) lfirst(lc);
+		RowSecurityPolicy *r = (RowSecurityPolicy *) lfirst(rc);
 
-		if (!equalPolicy(l,r))
+		if (!equalPolicy(l, r))
 			return false;
 	}
 
@@ -3460,7 +3460,7 @@ RelationCacheInitializePhase3(void)
 		{
 			RelationBuildRowSecurity(relation);
 
-			Assert (relation->rd_rsdesc != NULL);
+			Assert(relation->rd_rsdesc != NULL);
 			restart = true;
 		}
 
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 7def1be32ae..58f90f672e0 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -634,7 +634,7 @@ static const struct cachedesc cacheinfo[] = {
 		},
 		16
 	},
-	{ReplicationOriginRelationId,	/* REPLORIGNAME */
+	{ReplicationOriginRelationId,		/* REPLORIGNAME */
 		ReplicationOriginNameIndex,
 		1,
 		{
@@ -701,26 +701,26 @@ static const struct cachedesc cacheinfo[] = {
 		4
 	},
 	{TransformRelationId,		/* TRFOID */
-	 TransformOidIndexId,
-	 1,
-	 {
-		 ObjectIdAttributeNumber,
-		 0,
-		 0,
-		 0,
-	 },
-	 16
+		TransformOidIndexId,
+		1,
+		{
+			ObjectIdAttributeNumber,
+			0,
+			0,
+			0,
+		},
+		16
 	},
 	{TransformRelationId,		/* TRFTYPELANG */
-	 TransformTypeLangIndexId,
-	 2,
-	 {
-		 Anum_pg_transform_trftype,
-		 Anum_pg_transform_trflang,
-		 0,
-		 0,
-	 },
-	 16
+		TransformTypeLangIndexId,
+		2,
+		{
+			Anum_pg_transform_trftype,
+			Anum_pg_transform_trflang,
+			0,
+			0,
+		},
+		16
 	},
 	{TSConfigMapRelationId,		/* TSCONFIGMAP */
 		TSConfigMapIndexId,
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index dfd102a1fbd..088c714821b 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -1592,8 +1592,8 @@ FlushErrorState(void)
 void
 ThrowErrorData(ErrorData *edata)
 {
-	ErrorData *newedata;
-	MemoryContext	oldcontext;
+	ErrorData  *newedata;
+	MemoryContext oldcontext;
 
 	if (!errstart(edata->elevel, edata->filename, edata->lineno,
 				  edata->funcname, NULL))
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index 46bc1f238f2..cd3db871e0b 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -702,7 +702,7 @@ Size
 EstimateLibraryStateSpace(void)
 {
 	DynamicFileList *file_scanner;
-	Size	size = 1;
+	Size		size = 1;
 
 	for (file_scanner = file_list;
 		 file_scanner != NULL;
@@ -724,7 +724,7 @@ SerializeLibraryState(Size maxsize, char *start_address)
 		 file_scanner != NULL;
 		 file_scanner = file_scanner->next)
 	{
-		Size len;
+		Size		len;
 
 		len = strlcpy(start_address, file_scanner->filename, maxsize) + 1;
 		Assert(len < maxsize);
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index fccef382497..2b09076b61a 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -886,15 +886,14 @@ int
 get_func_trftypes(HeapTuple procTup,
 				  Oid **p_trftypes)
 {
-
 	Datum		protrftypes;
 	ArrayType  *arr;
 	int			nelems;
-	bool			isNull;
+	bool		isNull;
 
 	protrftypes = SysCacheGetAttr(PROCOID, procTup,
-									 Anum_pg_proc_protrftypes,
-									 &isNull);
+								  Anum_pg_proc_protrftypes,
+								  &isNull);
 	if (!isNull)
 	{
 		/*
@@ -903,7 +902,7 @@ get_func_trftypes(HeapTuple procTup,
 		 * deconstruct_array() since the array data is just going to look like
 		 * a C array of values.
 		 */
-		arr = DatumGetArrayTypeP(protrftypes);		/* ensure not toasted */
+		arr = DatumGetArrayTypeP(protrftypes);	/* ensure not toasted */
 		nelems = ARR_DIMS(arr)[0];
 		if (ARR_NDIM(arr) != 1 ||
 			nelems < 0 ||
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index b0d85af14db..2b53c19fb97 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -246,6 +246,7 @@ SwitchToSharedLatch(void)
 	Assert(MyProc != NULL);
 
 	MyLatch = &MyProc->procLatch;
+
 	/*
 	 * Set the shared latch as the local one might have been set. This
 	 * shouldn't normally be necessary as code is supposed to check the
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index debadf0f94c..aa67f75c0ca 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -1107,7 +1107,7 @@ ShutdownPostgres(int code, Datum arg)
 static void
 StatementTimeoutHandler(void)
 {
-	int sig = SIGINT;
+	int			sig = SIGINT;
 
 	/*
 	 * During authentication the timeout is used to deal with
diff --git a/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl b/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
index fa60cdc55a0..e73ed4d865e 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
@@ -23,7 +23,7 @@ open(FILE, $in_file) || die("cannot open $in_file");
 
 while (<FILE>)
 {
-	next if (! m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
+	next if (!m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
 	$u = $1;
 	$c = $2;
 	$c =~ s/ //g;
@@ -42,7 +42,7 @@ while (<FILE>)
 			printf STDERR "Warning: duplicate GB18030: %08x\n", $code;
 			next;
 		}
-		$arrayu{$utf} = $code;
+		$arrayu{$utf}  = $code;
 		$arrayc{$code} = $utf;
 		$count++;
 	}
diff --git a/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl b/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
index edfb61bcd93..33d108e0251 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
@@ -99,7 +99,7 @@ print FILE "/*\n";
 print FILE " * This file was generated by UCS_to_SHIFT_JIS_2004.pl\n";
 print FILE " */\n";
 print FILE
-  "static const pg_utf_to_local_combined ULmapSHIFT_JIS_2004_combined[] = {\n";
+"static const pg_utf_to_local_combined ULmapSHIFT_JIS_2004_combined[] = {\n";
 
 for $index (sort { $a cmp $b } keys(%array1))
 {
@@ -212,7 +212,7 @@ print FILE "/*\n";
 print FILE " * This file was generated by UCS_to_SHIFT_JIS_2004.pl\n";
 print FILE " */\n";
 print FILE
-  "static const pg_local_to_utf_combined LUmapSHIFT_JIS_2004_combined[] = {\n";
+"static const pg_local_to_utf_combined LUmapSHIFT_JIS_2004_combined[] = {\n";
 
 for $index (sort { $a <=> $b } keys(%array1))
 {
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index 1d9b10f8a7a..09002a77d8a 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -22,7 +22,7 @@ typedef struct
 } codes_t;
 
 /* map Big5 Level 1 to CNS 11643-1992 Plane 1 */
-static const codes_t big5Level1ToCnsPlane1[25] = {	/* range */
+static const codes_t big5Level1ToCnsPlane1[25] = {		/* range */
 	{0xA140, 0x2121},
 	{0xA1F6, 0x2258},
 	{0xA1F7, 0x2257},
@@ -51,7 +51,7 @@ static const codes_t big5Level1ToCnsPlane1[25] = {	/* range */
 };
 
 /* map CNS 11643-1992 Plane 1 to Big5 Level 1 */
-static const codes_t cnsPlane1ToBig5Level1[26] = {	/* range */
+static const codes_t cnsPlane1ToBig5Level1[26] = {		/* range */
 	{0x2121, 0xA140},
 	{0x2257, 0xA1F7},
 	{0x2258, 0xA1F6},
@@ -81,7 +81,7 @@ static const codes_t cnsPlane1ToBig5Level1[26] = {	/* range */
 };
 
 /* map Big5 Level 2 to CNS 11643-1992 Plane 2 */
-static const codes_t big5Level2ToCnsPlane2[48] = {	/* range */
+static const codes_t big5Level2ToCnsPlane2[48] = {		/* range */
 	{0xC940, 0x2121},
 	{0xc94a, 0x0000},
 	{0xC94B, 0x212B},
@@ -133,7 +133,7 @@ static const codes_t big5Level2ToCnsPlane2[48] = {	/* range */
 };
 
 /* map CNS 11643-1992 Plane 2 to Big5 Level 2 */
-static const codes_t cnsPlane2ToBig5Level2[49] = {	/* range */
+static const codes_t cnsPlane2ToBig5Level2[49] = {		/* range */
 	{0x2121, 0xC940},
 	{0x212B, 0xC94B},
 	{0x214C, 0xC9BE},
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 3038d7c9dda..be7ba4f29d8 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -666,11 +666,12 @@ const char *const config_type_names[] =
 
 typedef struct
 {
-	char	unit[MAX_UNIT_LEN + 1];	/* unit, as a string, like "kB" or "min" */
-	int		base_unit;		/* GUC_UNIT_XXX */
-	int		multiplier;		/* If positive, multiply the value with this for
-							 * unit -> base_unit conversion.  If negative,
-							 * divide (with the absolute value) */
+	char		unit[MAX_UNIT_LEN + 1]; /* unit, as a string, like "kB" or
+										 * "min" */
+	int			base_unit;		/* GUC_UNIT_XXX */
+	int			multiplier;		/* If positive, multiply the value with this
+								 * for unit -> base_unit conversion.  If
+								 * negative, divide (with the absolute value) */
 } unit_conversion;
 
 /* Ensure that the constants in the tables don't overflow or underflow */
@@ -684,58 +685,56 @@ typedef struct
 #error XLOG_SEG_SIZE must be between 1MB and 1GB
 #endif
 
-static const char *memory_units_hint =
-	gettext_noop("Valid units for this parameter are \"kB\", \"MB\", \"GB\", and \"TB\".");
+static const char *memory_units_hint = gettext_noop("Valid units for this parameter are \"kB\", \"MB\", \"GB\", and \"TB\".");
 
 static const unit_conversion memory_unit_conversion_table[] =
 {
-	{ "TB",		GUC_UNIT_KB,	 	1024*1024*1024 },
-	{ "GB",		GUC_UNIT_KB,	 	1024*1024 },
-	{ "MB",		GUC_UNIT_KB,	 	1024 },
-	{ "kB",		GUC_UNIT_KB,	 	1 },
+	{"TB", GUC_UNIT_KB, 1024 * 1024 * 1024},
+	{"GB", GUC_UNIT_KB, 1024 * 1024},
+	{"MB", GUC_UNIT_KB, 1024},
+	{"kB", GUC_UNIT_KB, 1},
 
-	{ "TB",		GUC_UNIT_BLOCKS,	(1024*1024*1024) / (BLCKSZ / 1024) },
-	{ "GB",		GUC_UNIT_BLOCKS,	(1024*1024) / (BLCKSZ / 1024) },
-	{ "MB",		GUC_UNIT_BLOCKS,	1024 / (BLCKSZ / 1024) },
-	{ "kB",		GUC_UNIT_BLOCKS,	-(BLCKSZ / 1024) },
+	{"TB", GUC_UNIT_BLOCKS, (1024 * 1024 * 1024) / (BLCKSZ / 1024)},
+	{"GB", GUC_UNIT_BLOCKS, (1024 * 1024) / (BLCKSZ / 1024)},
+	{"MB", GUC_UNIT_BLOCKS, 1024 / (BLCKSZ / 1024)},
+	{"kB", GUC_UNIT_BLOCKS, -(BLCKSZ / 1024)},
 
-	{ "TB",		GUC_UNIT_XBLOCKS,	(1024*1024*1024) / (XLOG_BLCKSZ / 1024) },
-	{ "GB",		GUC_UNIT_XBLOCKS,	(1024*1024) / (XLOG_BLCKSZ / 1024) },
-	{ "MB",		GUC_UNIT_XBLOCKS,	1024 / (XLOG_BLCKSZ / 1024) },
-	{ "kB",		GUC_UNIT_XBLOCKS,	-(XLOG_BLCKSZ / 1024) },
+	{"TB", GUC_UNIT_XBLOCKS, (1024 * 1024 * 1024) / (XLOG_BLCKSZ / 1024)},
+	{"GB", GUC_UNIT_XBLOCKS, (1024 * 1024) / (XLOG_BLCKSZ / 1024)},
+	{"MB", GUC_UNIT_XBLOCKS, 1024 / (XLOG_BLCKSZ / 1024)},
+	{"kB", GUC_UNIT_XBLOCKS, -(XLOG_BLCKSZ / 1024)},
 
-	{ "TB",		GUC_UNIT_XSEGS,		(1024*1024*1024) / (XLOG_SEG_SIZE / 1024) },
-	{ "GB",		GUC_UNIT_XSEGS,		(1024*1024) / (XLOG_SEG_SIZE / 1024) },
-	{ "MB",		GUC_UNIT_XSEGS,		-(XLOG_SEG_SIZE / (1024 * 1024)) },
-	{ "kB",		GUC_UNIT_XSEGS,		-(XLOG_SEG_SIZE / 1024) },
+	{"TB", GUC_UNIT_XSEGS, (1024 * 1024 * 1024) / (XLOG_SEG_SIZE / 1024)},
+	{"GB", GUC_UNIT_XSEGS, (1024 * 1024) / (XLOG_SEG_SIZE / 1024)},
+	{"MB", GUC_UNIT_XSEGS, -(XLOG_SEG_SIZE / (1024 * 1024))},
+	{"kB", GUC_UNIT_XSEGS, -(XLOG_SEG_SIZE / 1024)},
 
-	{ "" }		/* end of table marker */
+	{""}						/* end of table marker */
 };
 
-static const char *time_units_hint =
-	gettext_noop("Valid units for this parameter are \"ms\", \"s\", \"min\", \"h\", and \"d\".");
+static const char *time_units_hint = gettext_noop("Valid units for this parameter are \"ms\", \"s\", \"min\", \"h\", and \"d\".");
 
 static const unit_conversion time_unit_conversion_table[] =
 {
-	{ "d",		GUC_UNIT_MS,	1000 * 60 * 60 * 24 },
-	{ "h",		GUC_UNIT_MS,	1000 * 60 * 60 },
-	{ "min", 	GUC_UNIT_MS,	1000 * 60},
-	{ "s",		GUC_UNIT_MS,	1000 },
-	{ "ms",		GUC_UNIT_MS,	1 },
-
-	{ "d",		GUC_UNIT_S,		60 * 60 * 24 },
-	{ "h",		GUC_UNIT_S,		60 * 60 },
-	{ "min", 	GUC_UNIT_S,		60 },
-	{ "s",		GUC_UNIT_S,		1 },
-	{ "ms", 	GUC_UNIT_S,	 	-1000 },
-
-	{ "d", 		GUC_UNIT_MIN,	60 * 24 },
-	{ "h", 		GUC_UNIT_MIN,	60 },
-	{ "min", 	GUC_UNIT_MIN,	1 },
-	{ "s", 		GUC_UNIT_MIN,	-60 },
-	{ "ms", 	GUC_UNIT_MIN,	-1000 * 60 },
-
-	{ "" }		/* end of table marker */
+	{"d", GUC_UNIT_MS, 1000 * 60 * 60 * 24},
+	{"h", GUC_UNIT_MS, 1000 * 60 * 60},
+	{"min", GUC_UNIT_MS, 1000 * 60},
+	{"s", GUC_UNIT_MS, 1000},
+	{"ms", GUC_UNIT_MS, 1},
+
+	{"d", GUC_UNIT_S, 60 * 60 * 24},
+	{"h", GUC_UNIT_S, 60 * 60},
+	{"min", GUC_UNIT_S, 60},
+	{"s", GUC_UNIT_S, 1},
+	{"ms", GUC_UNIT_S, -1000},
+
+	{"d", GUC_UNIT_MIN, 60 * 24},
+	{"h", GUC_UNIT_MIN, 60},
+	{"min", GUC_UNIT_MIN, 1},
+	{"s", GUC_UNIT_MIN, -60},
+	{"ms", GUC_UNIT_MIN, -1000 * 60},
+
+	{""}						/* end of table marker */
 };
 
 /*
@@ -993,8 +992,8 @@ static struct config_bool ConfigureNamesBool[] =
 
 	{
 		{"wal_compression", PGC_USERSET, WAL_SETTINGS,
-			 gettext_noop("Compresses full-page writes written in WAL file."),
-			 NULL
+			gettext_noop("Compresses full-page writes written in WAL file."),
+			NULL
 		},
 		&wal_compression,
 		false,
@@ -3685,10 +3684,10 @@ static int	num_guc_variables;
  */
 typedef struct ConfigFileVariable
 {
-	char	*name;
-	char	*value;
-	char	*filename;
-	int		sourceline;
+	char	   *name;
+	char	   *value;
+	char	   *filename;
+	int			sourceline;
 } ConfigFileVariable;
 static struct ConfigFileVariable *guc_file_variables;
 
@@ -5160,7 +5159,7 @@ convert_to_base_unit(int64 value, const char *unit,
 					 int base_unit, int64 *base_value)
 {
 	const unit_conversion *table;
-	int 		i;
+	int			i;
 
 	if (base_unit & GUC_UNIT_MEMORY)
 		table = memory_unit_conversion_table;
@@ -5207,9 +5206,9 @@ convert_from_base_unit(int64 base_value, int base_unit,
 		if (base_unit == table[i].base_unit)
 		{
 			/*
-			 * Accept the first conversion that divides the value evenly.
-			 * We assume that the conversions for each base unit are ordered
-			 * from greatest unit to the smallest!
+			 * Accept the first conversion that divides the value evenly. We
+			 * assume that the conversions for each base unit are ordered from
+			 * greatest unit to the smallest!
 			 */
 			if (table[i].multiplier < 0)
 			{
@@ -5278,7 +5277,7 @@ parse_int(const char *value, int *result, int flags, const char **hintmsg)
 		bool		converted = false;
 
 		if ((flags & GUC_UNIT) == 0)
-			return false;	/* this setting does not accept a unit */
+			return false;		/* this setting does not accept a unit */
 
 		unitlen = 0;
 		while (*endptr != '\0' && !isspace((unsigned char) *endptr) &&
@@ -5694,7 +5693,7 @@ set_config_option(const char *name, const char *value,
 	if (IsInParallelMode() && changeVal && action != GUC_ACTION_SAVE)
 		ereport(elevel,
 				(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-				 errmsg("cannot set parameters during a parallel operation")));
+			   errmsg("cannot set parameters during a parallel operation")));
 
 	record = find_option(name, true, elevel);
 	if (record == NULL)
@@ -7017,7 +7016,7 @@ ExecSetVariableStmt(VariableSetStmt *stmt, bool isTopLevel)
 	if (IsInParallelMode())
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-				 errmsg("cannot set parameters during a parallel operation")));
+			   errmsg("cannot set parameters during a parallel operation")));
 
 	switch (stmt->kind)
 	{
diff --git a/src/backend/utils/misc/rls.c b/src/backend/utils/misc/rls.c
index 066ac21a58c..44cb3743034 100644
--- a/src/backend/utils/misc/rls.c
+++ b/src/backend/utils/misc/rls.c
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * rls.c
- *        RLS-related utility functions.
+ *		  RLS-related utility functions.
  *
  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *        src/backend/utils/misc/rls.c
+ *		  src/backend/utils/misc/rls.c
  *
  *-------------------------------------------------------------------------
 */
@@ -24,7 +24,7 @@
 #include "utils/syscache.h"
 
 
-extern int check_enable_rls(Oid relid, Oid checkAsUser, bool noError);
+extern int	check_enable_rls(Oid relid, Oid checkAsUser, bool noError);
 
 /*
  * check_enable_rls
@@ -48,10 +48,10 @@ extern int check_enable_rls(Oid relid, Oid checkAsUser, bool noError);
 int
 check_enable_rls(Oid relid, Oid checkAsUser, bool noError)
 {
-	HeapTuple		tuple;
-	Form_pg_class	classform;
-	bool			relrowsecurity;
-	Oid				user_id = checkAsUser ? checkAsUser : GetUserId();
+	HeapTuple	tuple;
+	Form_pg_class classform;
+	bool		relrowsecurity;
+	Oid			user_id = checkAsUser ? checkAsUser : GetUserId();
 
 	tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
 	if (!HeapTupleIsValid(tuple))
@@ -88,25 +88,24 @@ check_enable_rls(Oid relid, Oid checkAsUser, bool noError)
 
 	/*
 	 * If the row_security GUC is 'off' then check if the user has permission
-	 * to bypass it.  Note that we have already handled the case where the user
-	 * is the table owner above.
+	 * to bypass it.  Note that we have already handled the case where the
+	 * user is the table owner above.
 	 *
-	 * Note that row_security is always considered 'on' when querying
-	 * through a view or other cases where checkAsUser is true, so skip this
-	 * if checkAsUser is in use.
+	 * Note that row_security is always considered 'on' when querying through
+	 * a view or other cases where checkAsUser is true, so skip this if
+	 * checkAsUser is in use.
 	 */
 	if (!checkAsUser && row_security == ROW_SECURITY_OFF)
 	{
 		if (has_bypassrls_privilege(user_id))
 			/* OK to bypass */
 			return RLS_NONE_ENV;
+		else if (noError)
+			return RLS_ENABLED;
 		else
-			if (noError)
-				return RLS_ENABLED;
-			else
-				ereport(ERROR,
-						(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-					 errmsg("insufficient privilege to bypass row security.")));
+			ereport(ERROR,
+					(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+				  errmsg("insufficient privilege to bypass row security.")));
 	}
 
 	/* RLS should be fully enabled for this relation. */
diff --git a/src/backend/utils/misc/sampling.c b/src/backend/utils/misc/sampling.c
index 69479a5fc8c..aaf1d6c4108 100644
--- a/src/backend/utils/misc/sampling.c
+++ b/src/backend/utils/misc/sampling.c
@@ -150,7 +150,7 @@ reservoir_get_next_S(ReservoirState rs, double t, int n)
 		double		V,
 					quot;
 
-		V = sampler_random_fract(rs->randstate); /* Generate V */
+		V = sampler_random_fract(rs->randstate);		/* Generate V */
 		S = 0;
 		t += 1;
 		/* Note: "num" in Vitter's code is always equal to t - n */
@@ -276,7 +276,7 @@ anl_init_selection_state(int n)
 double
 anl_get_next_S(double t, int n, double *stateptr)
 {
-	double result;
+	double		result;
 
 	oldrs.W = *stateptr;
 	result = reservoir_get_next_S(&oldrs, t, n);
diff --git a/src/backend/utils/sort/sortsupport.c b/src/backend/utils/sort/sortsupport.c
index a70966ec995..ffef9658e45 100644
--- a/src/backend/utils/sort/sortsupport.c
+++ b/src/backend/utils/sort/sortsupport.c
@@ -102,8 +102,8 @@ FinishSortSupportFunction(Oid opfamily, Oid opcintype, SortSupport ssup)
 	if (OidIsValid(sortSupportFunction))
 	{
 		/*
-		 * The sort support function can provide a comparator, but it can
-		 * also choose not to so (e.g. based on the selected collation).
+		 * The sort support function can provide a comparator, but it can also
+		 * choose not to so (e.g. based on the selected collation).
 		 */
 		OidFunctionCall1(sortSupportFunction, PointerGetDatum(ssup));
 	}
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 3d5da444a64..435041afa1c 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -356,11 +356,12 @@ struct Tuplesortstate
 
 	/*
 	 * Additional state for managing "abbreviated key" sortsupport routines
-	 * (which currently may be used by all cases except the Datum sort case and
-	 * hash index case).  Tracks the intervals at which the optimization's
+	 * (which currently may be used by all cases except the Datum sort case
+	 * and hash index case).  Tracks the intervals at which the optimization's
 	 * effectiveness is tested.
 	 */
-	int64		abbrevNext;		/* Tuple # at which to next check applicability */
+	int64		abbrevNext;		/* Tuple # at which to next check
+								 * applicability */
 
 	/*
 	 * These variables are specific to the CLUSTER case; they are set by
@@ -660,9 +661,9 @@ tuplesort_begin_heap(TupleDesc tupDesc,
 
 	/*
 	 * The "onlyKey" optimization cannot be used with abbreviated keys, since
-	 * tie-breaker comparisons may be required.  Typically, the optimization is
-	 * only of value to pass-by-value types anyway, whereas abbreviated keys
-	 * are typically only of value to pass-by-reference types.
+	 * tie-breaker comparisons may be required.  Typically, the optimization
+	 * is only of value to pass-by-value types anyway, whereas abbreviated
+	 * keys are typically only of value to pass-by-reference types.
 	 */
 	if (nkeys == 1 && !state->sortKeys->abbrev_converter)
 		state->onlyKey = state->sortKeys;
@@ -678,9 +679,9 @@ tuplesort_begin_cluster(TupleDesc tupDesc,
 						int workMem, bool randomAccess)
 {
 	Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
-	ScanKey			indexScanKey;
+	ScanKey		indexScanKey;
 	MemoryContext oldcontext;
-	int				i;
+	int			i;
 
 	Assert(indexRel->rd_rel->relam == BTREE_AM_OID);
 
@@ -771,9 +772,9 @@ tuplesort_begin_index_btree(Relation heapRel,
 							int workMem, bool randomAccess)
 {
 	Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
-	ScanKey			indexScanKey;
+	ScanKey		indexScanKey;
 	MemoryContext oldcontext;
-	int				i;
+	int			i;
 
 	oldcontext = MemoryContextSwitchTo(state->sortcontext);
 
@@ -929,9 +930,9 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
 
 	/*
 	 * The "onlyKey" optimization cannot be used with abbreviated keys, since
-	 * tie-breaker comparisons may be required.  Typically, the optimization is
-	 * only of value to pass-by-value types anyway, whereas abbreviated keys
-	 * are typically only of value to pass-by-reference types.
+	 * tie-breaker comparisons may be required.  Typically, the optimization
+	 * is only of value to pass-by-value types anyway, whereas abbreviated
+	 * keys are typically only of value to pass-by-reference types.
 	 */
 	if (!state->sortKeys->abbrev_converter)
 		state->onlyKey = state->sortKeys;
@@ -1277,7 +1278,7 @@ tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
 	else
 	{
 		/* Abort abbreviation */
-		int		i;
+		int			i;
 
 		stup.datum1 = original;
 
@@ -1285,13 +1286,13 @@ tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel,
 		 * Set state to be consistent with never trying abbreviation.
 		 *
 		 * Alter datum1 representation in already-copied tuples, so as to
-		 * ensure a consistent representation (current tuple was just handled).
-		 * Note that we rely on all tuples copied so far actually being
-		 * contained within memtuples array.
+		 * ensure a consistent representation (current tuple was just
+		 * handled). Note that we rely on all tuples copied so far actually
+		 * being contained within memtuples array.
 		 */
 		for (i = 0; i < state->memtupcount; i++)
 		{
-			SortTuple *mtup = &state->memtuples[i];
+			SortTuple  *mtup = &state->memtuples[i];
 
 			tuple = mtup->tuple;
 			mtup->datum1 = index_getattr(tuple,
@@ -1325,8 +1326,8 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
 	 * control, and possibly abbreviated. The copied value is pointed to by
 	 * stup.tuple and is treated as the canonical copy (e.g. to return via
 	 * tuplesort_getdatum or when writing to tape); stup.datum1 gets the
-	 * abbreviated value if abbreviation is happening, otherwise it's identical
-	 * to stup.tuple.
+	 * abbreviated value if abbreviation is happening, otherwise it's
+	 * identical to stup.tuple.
 	 */
 
 	if (isNull || state->datumTypeByVal)
@@ -1337,7 +1338,7 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
 	}
 	else
 	{
-		Datum	original = datumCopy(val, false, state->datumTypeLen);
+		Datum		original = datumCopy(val, false, state->datumTypeLen);
 
 		stup.isnull1 = false;
 		stup.tuple = DatumGetPointer(original);
@@ -1356,7 +1357,7 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
 		else
 		{
 			/* Abort abbreviation */
-			int		i;
+			int			i;
 
 			stup.datum1 = original;
 
@@ -1364,13 +1365,13 @@ tuplesort_putdatum(Tuplesortstate *state, Datum val, bool isNull)
 			 * Set state to be consistent with never trying abbreviation.
 			 *
 			 * Alter datum1 representation in already-copied tuples, so as to
-			 * ensure a consistent representation (current tuple was just handled).
-			 * Note that we rely on all tuples copied so far actually being
-			 * contained within memtuples array.
+			 * ensure a consistent representation (current tuple was just
+			 * handled). Note that we rely on all tuples copied so far
+			 * actually being contained within memtuples array.
 			 */
 			for (i = 0; i < state->memtupcount; i++)
 			{
-				SortTuple *mtup = &state->memtuples[i];
+				SortTuple  *mtup = &state->memtuples[i];
 
 				mtup->datum1 = PointerGetDatum(mtup->tuple);
 			}
@@ -1524,8 +1525,8 @@ consider_abort_common(Tuplesortstate *state)
 		state->abbrevNext *= 2;
 
 		/*
-		 * Check opclass-supplied abbreviation abort routine.  It may
-		 * indicate that abbreviation should not proceed.
+		 * Check opclass-supplied abbreviation abort routine.  It may indicate
+		 * that abbreviation should not proceed.
 		 */
 		if (!state->sortKeys->abbrev_abort(state->memtupcount,
 										   state->sortKeys))
@@ -2231,9 +2232,9 @@ mergeruns(Tuplesortstate *state)
 	{
 		/*
 		 * If there are multiple runs to be merged, when we go to read back
-		 * tuples from disk, abbreviated keys will not have been stored, and we
-		 * don't care to regenerate them.  Disable abbreviation from this point
-		 * on.
+		 * tuples from disk, abbreviated keys will not have been stored, and
+		 * we don't care to regenerate them.  Disable abbreviation from this
+		 * point on.
 		 */
 		state->sortKeys->abbrev_converter = NULL;
 		state->sortKeys->comparator = state->sortKeys->abbrev_full_comparator;
@@ -3121,7 +3122,7 @@ copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
 	 * MinimalTuple using the exported interface for that.
 	 */
 	TupleTableSlot *slot = (TupleTableSlot *) tup;
-	Datum			original;
+	Datum		original;
 	MinimalTuple tuple;
 	HeapTupleData htup;
 
@@ -3157,7 +3158,7 @@ copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
 	else
 	{
 		/* Abort abbreviation */
-		int		i;
+		int			i;
 
 		stup->datum1 = original;
 
@@ -3165,18 +3166,18 @@ copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup)
 		 * Set state to be consistent with never trying abbreviation.
 		 *
 		 * Alter datum1 representation in already-copied tuples, so as to
-		 * ensure a consistent representation (current tuple was just handled).
-		 * Note that we rely on all tuples copied so far actually being
-		 * contained within memtuples array.
+		 * ensure a consistent representation (current tuple was just
+		 * handled). Note that we rely on all tuples copied so far actually
+		 * being contained within memtuples array.
 		 */
 		for (i = 0; i < state->memtupcount; i++)
 		{
-			SortTuple *mtup = &state->memtuples[i];
+			SortTuple  *mtup = &state->memtuples[i];
 
 			htup.t_len = ((MinimalTuple) mtup->tuple)->t_len +
-							MINIMAL_TUPLE_OFFSET;
+				MINIMAL_TUPLE_OFFSET;
 			htup.t_data = (HeapTupleHeader) ((char *) mtup->tuple -
-							MINIMAL_TUPLE_OFFSET);
+											 MINIMAL_TUPLE_OFFSET);
 
 			mtup->datum1 = heap_getattr(&htup,
 										state->sortKeys[0].ssup_attno,
@@ -3247,7 +3248,7 @@ static int
 comparetup_cluster(const SortTuple *a, const SortTuple *b,
 				   Tuplesortstate *state)
 {
-	SortSupport	sortKey = state->sortKeys;
+	SortSupport sortKey = state->sortKeys;
 	HeapTuple	ltup;
 	HeapTuple	rtup;
 	TupleDesc	tupDesc;
@@ -3364,6 +3365,7 @@ copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
 	tuple = heap_copytuple(tuple);
 	stup->tuple = (void *) tuple;
 	USEMEM(state, GetMemoryChunkSpace(tuple));
+
 	/*
 	 * set up first-column key value, and potentially abbreviate, if it's a
 	 * simple column
@@ -3396,7 +3398,7 @@ copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
 	else
 	{
 		/* Abort abbreviation */
-		int		i;
+		int			i;
 
 		stup->datum1 = original;
 
@@ -3404,17 +3406,17 @@ copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup)
 		 * Set state to be consistent with never trying abbreviation.
 		 *
 		 * Alter datum1 representation in already-copied tuples, so as to
-		 * ensure a consistent representation (current tuple was just handled).
-		 * Note that we rely on all tuples copied so far actually being
-		 * contained within memtuples array.
+		 * ensure a consistent representation (current tuple was just
+		 * handled). Note that we rely on all tuples copied so far actually
+		 * being contained within memtuples array.
 		 */
 		for (i = 0; i < state->memtupcount; i++)
 		{
-			SortTuple *mtup = &state->memtuples[i];
+			SortTuple  *mtup = &state->memtuples[i];
 
 			tuple = (HeapTuple) mtup->tuple;
 			mtup->datum1 = heap_getattr(tuple,
-										state->indexInfo->ii_KeyAttrNumbers[0],
+									  state->indexInfo->ii_KeyAttrNumbers[0],
 										state->tupDesc,
 										&stup->isnull1);
 		}
@@ -3487,10 +3489,10 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b,
 {
 	/*
 	 * This is similar to comparetup_heap(), but expects index tuples.  There
-	 * is also special handling for enforcing uniqueness, and special treatment
-	 * for equal keys at the end.
+	 * is also special handling for enforcing uniqueness, and special
+	 * treatment for equal keys at the end.
 	 */
-	SortSupport	sortKey = state->sortKeys;
+	SortSupport sortKey = state->sortKeys;
 	IndexTuple	tuple1;
 	IndexTuple	tuple2;
 	int			keysz;
@@ -3582,7 +3584,7 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b,
 				 errmsg("could not create unique index \"%s\"",
 						RelationGetRelationName(state->indexRel)),
 				 key_desc ? errdetail("Key %s is duplicated.", key_desc) :
-							errdetail("Duplicate keys exist."),
+				 errdetail("Duplicate keys exist."),
 				 errtableconstraint(state->heapRel,
 								 RelationGetRelationName(state->indexRel))));
 	}
@@ -3698,7 +3700,7 @@ copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
 	else
 	{
 		/* Abort abbreviation */
-		int		i;
+		int			i;
 
 		stup->datum1 = original;
 
@@ -3706,13 +3708,13 @@ copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup)
 		 * Set state to be consistent with never trying abbreviation.
 		 *
 		 * Alter datum1 representation in already-copied tuples, so as to
-		 * ensure a consistent representation (current tuple was just handled).
-		 * Note that we rely on all tuples copied so far actually being
-		 * contained within memtuples array.
+		 * ensure a consistent representation (current tuple was just
+		 * handled). Note that we rely on all tuples copied so far actually
+		 * being contained within memtuples array.
 		 */
 		for (i = 0; i < state->memtupcount; i++)
 		{
-			SortTuple *mtup = &state->memtuples[i];
+			SortTuple  *mtup = &state->memtuples[i];
 
 			tuple = (IndexTuple) mtup->tuple;
 			mtup->datum1 = index_getattr(tuple,
@@ -3770,7 +3772,7 @@ readtup_index(Tuplesortstate *state, SortTuple *stup,
 static int
 comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
 {
-	int		compare;
+	int			compare;
 
 	compare = ApplySortComparator(a->datum1, a->isnull1,
 								  b->datum1, b->isnull1,
@@ -3782,7 +3784,7 @@ comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
 
 	if (state->sortKeys->abbrev_converter)
 		compare = ApplySortAbbrevFullComparator(PointerGetDatum(a->tuple), a->isnull1,
-												PointerGetDatum(b->tuple), b->isnull1,
+									   PointerGetDatum(b->tuple), b->isnull1,
 												state->sortKeys);
 
 	return compare;
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index cc5409b8803..bb2f3295a44 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -121,6 +121,7 @@ HeapTupleHeaderGetCmax(HeapTupleHeader tup)
 	CommandId	cid = HeapTupleHeaderGetRawCommandId(tup);
 
 	Assert(!(tup->t_infomask & HEAP_MOVED));
+
 	/*
 	 * Because GetUpdateXid() performs memory allocations if xmax is a
 	 * multixact we can't Assert() if we're inside a critical section. This
@@ -128,7 +129,7 @@ HeapTupleHeaderGetCmax(HeapTupleHeader tup)
 	 * things too much.
 	 */
 	Assert(CritSectionCount > 0 ||
-		   TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(tup)));
+	  TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(tup)));
 
 	if (tup->t_infomask & HEAP_COMBOCID)
 		return GetRealCmax(cid);
@@ -317,7 +318,7 @@ SerializeComboCIDState(Size maxsize, char *start_address)
 	char	   *endptr;
 
 	/* First, we store the number of currently-existing ComboCIDs. */
-	* (int *) start_address = usedComboCids;
+	*(int *) start_address = usedComboCids;
 
 	/* If maxsize is too small, throw an error. */
 	endptr = start_address + sizeof(int) +
@@ -347,7 +348,7 @@ RestoreComboCIDState(char *comboCIDstate)
 	Assert(!comboCids && !comboHash);
 
 	/* First, we retrieve the number of ComboCIDs that were serialized. */
-	num_elements = * (int *) comboCIDstate;
+	num_elements = *(int *) comboCIDstate;
 	keydata = (ComboCidKeyData *) (comboCIDstate + sizeof(int));
 
 	/* Use GetComboCommandId to restore each ComboCID. */
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index f4bdabfd791..2f0e9cda8c3 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -131,7 +131,7 @@ static ActiveSnapshotElt *ActiveSnapshot = NULL;
 static int xmin_cmp(const pairingheap_node *a, const pairingheap_node *b,
 		 void *arg);
 
-static pairingheap RegisteredSnapshots = { &xmin_cmp, NULL, NULL };
+static pairingheap RegisteredSnapshots = {&xmin_cmp, NULL, NULL};
 
 /* first GetTransactionSnapshot call in a transaction? */
 bool		FirstSnapshotSet = false;
@@ -313,10 +313,10 @@ GetNonHistoricCatalogSnapshot(Oid relid)
 {
 	/*
 	 * If the caller is trying to scan a relation that has no syscache, no
-	 * catcache invalidations will be sent when it is updated.  For a few
-	 * key relations, snapshot invalidations are sent instead.  If we're
-	 * trying to scan a relation for which neither catcache nor snapshot
-	 * invalidations are sent, we must refresh the snapshot every time.
+	 * catcache invalidations will be sent when it is updated.  For a few key
+	 * relations, snapshot invalidations are sent instead.  If we're trying to
+	 * scan a relation for which neither catcache nor snapshot invalidations
+	 * are sent, we must refresh the snapshot every time.
 	 */
 	if (!CatalogSnapshotStale && !RelationInvalidatesSnapshotsOnly(relid) &&
 		!RelationHasSysCache(relid))
@@ -587,7 +587,9 @@ PushCopiedSnapshot(Snapshot snapshot)
 void
 UpdateActiveSnapshotCommandId(void)
 {
-	CommandId	save_curcid, curcid;
+	CommandId	save_curcid,
+				curcid;
+
 	Assert(ActiveSnapshot != NULL);
 	Assert(ActiveSnapshot->as_snap->active_count == 1);
 	Assert(ActiveSnapshot->as_snap->regd_count == 0);
@@ -772,7 +774,7 @@ xmin_cmp(const pairingheap_node *a, const pairingheap_node *b, void *arg)
 static void
 SnapshotResetXmin(void)
 {
-	Snapshot minSnapshot;
+	Snapshot	minSnapshot;
 
 	if (ActiveSnapshot != NULL)
 		return;
@@ -897,7 +899,8 @@ AtEOXact_Snapshot(bool isCommit)
 		 */
 		foreach(lc, exportedSnapshots)
 		{
-			Snapshot snap = (Snapshot) lfirst(lc);
+			Snapshot	snap = (Snapshot) lfirst(lc);
+
 			pairingheap_remove(&RegisteredSnapshots, &snap->ph_node);
 		}
 
@@ -1472,8 +1475,8 @@ EstimateSnapshotSpace(Snapshot snap)
 
 /*
  * SerializeSnapshot
- * 		Dumps the serialized snapshot (extracted from given snapshot) onto the
- * 		memory location at start_address.
+ *		Dumps the serialized snapshot (extracted from given snapshot) onto the
+ *		memory location at start_address.
  */
 void
 SerializeSnapshot(Snapshot snapshot, char *start_address)
@@ -1494,9 +1497,9 @@ SerializeSnapshot(Snapshot snapshot, char *start_address)
 	serialized_snapshot->curcid = snapshot->curcid;
 
 	/*
-	 * Ignore the SubXID array if it has overflowed, unless the snapshot
-	 * was taken during recovey - in that case, top-level XIDs are in subxip
-	 * as well, and we mustn't lose them.
+	 * Ignore the SubXID array if it has overflowed, unless the snapshot was
+	 * taken during recovey - in that case, top-level XIDs are in subxip as
+	 * well, and we mustn't lose them.
 	 */
 	if (serialized_snapshot->suboverflowed && !snapshot->takenDuringRecovery)
 		serialized_snapshot->subxcnt = 0;
@@ -1514,8 +1517,8 @@ SerializeSnapshot(Snapshot snapshot, char *start_address)
 	 */
 	if (snapshot->subxcnt > 0)
 	{
-		Size subxipoff = sizeof(SerializedSnapshotData) +
-			snapshot->xcnt * sizeof(TransactionId);
+		Size		subxipoff = sizeof(SerializedSnapshotData) +
+		snapshot->xcnt * sizeof(TransactionId);
 
 		memcpy((TransactionId *) ((char *) serialized_snapshot + subxipoff),
 			   snapshot->subxip, snapshot->subxcnt * sizeof(TransactionId));
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index b4284d6d94f..de7b3fc80cf 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -405,6 +405,7 @@ HeapTupleSatisfiesToast(HeapTuple htup, Snapshot snapshot,
 				}
 			}
 		}
+
 		/*
 		 * An invalid Xmin can be left behind by a speculative insertion that
 		 * is cancelled by super-deleting the tuple.  We shouldn't see any of
@@ -550,7 +551,7 @@ HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
 				if (!TransactionIdIsCurrentTransactionId(xmax))
 				{
 					if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple),
-																	   false))
+											 false))
 						return HeapTupleBeingUpdated;
 					return HeapTupleMayBeUpdated;
 				}
@@ -820,10 +821,10 @@ HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot,
 		else if (TransactionIdIsInProgress(HeapTupleHeaderGetRawXmin(tuple)))
 		{
 			/*
-			 * Return the speculative token to caller.  Caller can worry
-			 * about xmax, since it requires a conclusively locked row
-			 * version, and a concurrent update to this tuple is a conflict
-			 * of its purposes.
+			 * Return the speculative token to caller.  Caller can worry about
+			 * xmax, since it requires a conclusively locked row version, and
+			 * a concurrent update to this tuple is a conflict of its
+			 * purposes.
 			 */
 			if (HeapTupleHeaderIsSpeculative(tuple))
 			{
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index 2d0ea7bad86..5dd2887d12e 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -236,7 +236,7 @@ usage(void)
 	printf(_("  -D, --pgdata=DIRECTORY receive base backup into directory\n"));
 	printf(_("  -F, --format=p|t       output format (plain (default), tar)\n"));
 	printf(_("  -r, --max-rate=RATE    maximum transfer rate to transfer data directory\n"
-			 "                         (in kB/s, or use suffix \"k\" or \"M\")\n"));
+	  "                         (in kB/s, or use suffix \"k\" or \"M\")\n"));
 	printf(_("  -R, --write-recovery-conf\n"
 			 "                         write recovery.conf after backup\n"));
 	printf(_("  -T, --tablespace-mapping=OLDDIR=NEWDIR\n"
@@ -1255,7 +1255,7 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
 						 * failures on related directories.
 						 */
 						if (!((pg_str_endswith(filename, "/pg_xlog") ||
-							   pg_str_endswith(filename, "/archive_status")) &&
+							 pg_str_endswith(filename, "/archive_status")) &&
 							  errno == EEXIST))
 						{
 							fprintf(stderr,
@@ -1278,12 +1278,12 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
 					 *
 					 * It's most likely a link in pg_tblspc directory, to the
 					 * location of a tablespace. Apply any tablespace mapping
-					 * given on the command line (--tablespace-mapping).
-					 * (We blindly apply the mapping without checking that
-					 * the link really is inside pg_tblspc. We don't expect
-					 * there to be other symlinks in a data directory, but
-					 * if there are, you can call it an undocumented feature
-					 * that you can map them too.)
+					 * given on the command line (--tablespace-mapping). (We
+					 * blindly apply the mapping without checking that the
+					 * link really is inside pg_tblspc. We don't expect there
+					 * to be other symlinks in a data directory, but if there
+					 * are, you can call it an undocumented feature that you
+					 * can map them too.)
 					 */
 					filename[strlen(filename) - 1] = '\0';		/* Remove trailing slash */
 
@@ -1659,7 +1659,7 @@ BaseBackup(void)
 				 fastcheckpoint ? "FAST" : "",
 				 includewal ? "NOWAIT" : "",
 				 maxrate_clause ? maxrate_clause : "",
-				 format == 't'  ? "TABLESPACE_MAP": "");
+				 format == 't' ? "TABLESPACE_MAP" : "");
 
 	if (PQsendQuery(conn, basebkp) == 0)
 	{
diff --git a/src/bin/pg_basebackup/pg_receivexlog.c b/src/bin/pg_basebackup/pg_receivexlog.c
index 71fb94578ed..5d964e4ee6b 100644
--- a/src/bin/pg_basebackup/pg_receivexlog.c
+++ b/src/bin/pg_basebackup/pg_receivexlog.c
@@ -43,7 +43,7 @@ static bool synchronous = false;
 
 
 static void usage(void);
-static DIR* get_destination_dir(char *dest_folder);
+static DIR *get_destination_dir(char *dest_folder);
 static void close_destination_dir(DIR *dest_dir, char *dest_folder);
 static XLogRecPtr FindStreamingStart(uint32 *tli);
 static void StreamLog(void);
@@ -128,10 +128,10 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
 /*
  * Get destination directory.
  */
-static DIR*
+static DIR *
 get_destination_dir(char *dest_folder)
 {
-	DIR *dir;
+	DIR		   *dir;
 
 	Assert(dest_folder != NULL);
 	dir = opendir(dest_folder);
@@ -274,8 +274,10 @@ FindStreamingStart(uint32 *tli)
 static void
 StreamLog(void)
 {
-	XLogRecPtr	startpos, serverpos;
-	TimeLineID	starttli, servertli;
+	XLogRecPtr	startpos,
+				serverpos;
+	TimeLineID	starttli,
+				servertli;
 
 	/*
 	 * Connect in replication mode to the server
@@ -513,7 +515,8 @@ main(int argc, char **argv)
 	 */
 	if (!do_drop_slot)
 	{
-		DIR *dir = get_destination_dir(basedir);
+		DIR		   *dir = get_destination_dir(basedir);
+
 		close_destination_dir(dir, basedir);
 	}
 
@@ -538,8 +541,8 @@ main(int argc, char **argv)
 		disconnect_and_exit(1);
 
 	/*
-	 * Check that there is a database associated with connection, none
-	 * should be defined in this context.
+	 * Check that there is a database associated with connection, none should
+	 * be defined in this context.
 	 */
 	if (db_name)
 	{
@@ -577,8 +580,8 @@ main(int argc, char **argv)
 	}
 
 	/*
-	 * Don't close the connection here so that subsequent StreamLog()
-	 * can reuse it.
+	 * Don't close the connection here so that subsequent StreamLog() can
+	 * reuse it.
 	 */
 
 	while (true)
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index 8caedff2b09..3c60626541f 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -36,26 +36,26 @@ static bool still_sending = true;		/* feedback still needs to be sent? */
 static PGresult *HandleCopyStream(PGconn *conn, XLogRecPtr startpos,
 				 uint32 timeline, char *basedir,
 			   stream_stop_callback stream_stop, int standby_message_timeout,
-				  char *partial_suffix, XLogRecPtr *stoppos,
-				  bool synchronous, bool mark_done);
-static int CopyStreamPoll(PGconn *conn, long timeout_ms);
-static int CopyStreamReceive(PGconn *conn, long timeout, char **buffer);
+				 char *partial_suffix, XLogRecPtr *stoppos,
+				 bool synchronous, bool mark_done);
+static int	CopyStreamPoll(PGconn *conn, long timeout_ms);
+static int	CopyStreamReceive(PGconn *conn, long timeout, char **buffer);
 static bool ProcessKeepaliveMsg(PGconn *conn, char *copybuf, int len,
-								XLogRecPtr blockpos, int64 *last_status);
+					XLogRecPtr blockpos, int64 *last_status);
 static bool ProcessXLogDataMsg(PGconn *conn, char *copybuf, int len,
-							   XLogRecPtr *blockpos, uint32 timeline,
-							   char *basedir, stream_stop_callback stream_stop,
-							   char *partial_suffix, bool mark_done);
+				   XLogRecPtr *blockpos, uint32 timeline,
+				   char *basedir, stream_stop_callback stream_stop,
+				   char *partial_suffix, bool mark_done);
 static PGresult *HandleEndOfCopyStream(PGconn *conn, char *copybuf,
-									   XLogRecPtr blockpos, char *basedir, char *partial_suffix,
-									   XLogRecPtr *stoppos, bool mark_done);
+					XLogRecPtr blockpos, char *basedir, char *partial_suffix,
+					  XLogRecPtr *stoppos, bool mark_done);
 static bool CheckCopyStreamStop(PGconn *conn, XLogRecPtr blockpos,
-								uint32 timeline, char *basedir,
-								stream_stop_callback stream_stop,
-								char *partial_suffix, XLogRecPtr *stoppos,
-								bool mark_done);
+					uint32 timeline, char *basedir,
+					stream_stop_callback stream_stop,
+					char *partial_suffix, XLogRecPtr *stoppos,
+					bool mark_done);
 static long CalculateCopyStreamSleeptime(int64 now, int standby_message_timeout,
-										 int64 last_status);
+							 int64 last_status);
 
 static bool ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos,
 						 uint32 *timeline);
@@ -63,7 +63,7 @@ static bool ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos,
 static bool
 mark_file_as_archived(const char *basedir, const char *fname)
 {
-	int fd;
+	int			fd;
 	static char tmppath[MAXPGPATH];
 
 	snprintf(tmppath, sizeof(tmppath), "%s/archive_status/%s.done",
@@ -831,15 +831,15 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
 		 * Check if we should continue streaming, or abort at this point.
 		 */
 		if (!CheckCopyStreamStop(conn, blockpos, timeline, basedir,
-								stream_stop, partial_suffix, stoppos,
-								mark_done))
+								 stream_stop, partial_suffix, stoppos,
+								 mark_done))
 			goto error;
 
 		now = feGetCurrentTimestamp();
 
 		/*
-		 * If synchronous option is true, issue sync command as soon as
-		 * there are WAL data which has not been flushed yet.
+		 * If synchronous option is true, issue sync command as soon as there
+		 * are WAL data which has not been flushed yet.
 		 */
 		if (synchronous && lastFlushPosition < blockpos && walfile != -1)
 		{
@@ -886,9 +886,10 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
 				goto error;
 			if (r == -2)
 			{
-				PGresult	*res = HandleEndOfCopyStream(conn, copybuf, blockpos,
-														 basedir, partial_suffix,
-														 stoppos, mark_done);
+				PGresult   *res = HandleEndOfCopyStream(conn, copybuf, blockpos,
+													 basedir, partial_suffix,
+														stoppos, mark_done);
+
 				if (res == NULL)
 					goto error;
 				else
@@ -910,7 +911,8 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
 					goto error;
 
 				/*
-				 * Check if we should continue streaming, or abort at this point.
+				 * Check if we should continue streaming, or abort at this
+				 * point.
 				 */
 				if (!CheckCopyStreamStop(conn, blockpos, timeline, basedir,
 										 stream_stop, partial_suffix, stoppos,
@@ -925,8 +927,8 @@ HandleCopyStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
 			}
 
 			/*
-			 * Process the received data, and any subsequent data we
-			 * can read without blocking.
+			 * Process the received data, and any subsequent data we can read
+			 * without blocking.
 			 */
 			r = CopyStreamReceive(conn, 0, &copybuf);
 		}
@@ -972,7 +974,7 @@ CopyStreamPoll(PGconn *conn, long timeout_ms)
 
 	ret = select(PQsocket(conn) + 1, &input_mask, NULL, NULL, timeoutptr);
 	if (ret == 0 || (ret < 0 && errno == EINTR))
-		return 0;		/* Got a timeout or signal */
+		return 0;				/* Got a timeout or signal */
 	else if (ret < 0)
 	{
 		fprintf(stderr, _("%s: select() failed: %s\n"),
@@ -1009,12 +1011,12 @@ CopyStreamReceive(PGconn *conn, long timeout, char **buffer)
 	if (rawlen == 0)
 	{
 		/*
-		 * No data available. Wait for some to appear, but not longer than
-		 * the specified timeout, so that we can ping the server.
+		 * No data available. Wait for some to appear, but not longer than the
+		 * specified timeout, so that we can ping the server.
 		 */
 		if (timeout != 0)
 		{
-			int		ret;
+			int			ret;
 
 			ret = CopyStreamPoll(conn, timeout);
 			if (ret <= 0)
@@ -1061,13 +1063,12 @@ ProcessKeepaliveMsg(PGconn *conn, char *copybuf, int len,
 	int64		now;
 
 	/*
-	 * Parse the keepalive message, enclosed in the CopyData message.
-	 * We just check if the server requested a reply, and ignore the
-	 * rest.
+	 * Parse the keepalive message, enclosed in the CopyData message. We just
+	 * check if the server requested a reply, and ignore the rest.
 	 */
-	pos = 1;			/* skip msgtype 'k' */
-	pos += 8;			/* skip walEnd */
-	pos += 8;			/* skip sendTime */
+	pos = 1;					/* skip msgtype 'k' */
+	pos += 8;					/* skip walEnd */
+	pos += 8;					/* skip sendTime */
 
 	if (len < pos + 1)
 	{
@@ -1084,11 +1085,11 @@ ProcessKeepaliveMsg(PGconn *conn, char *copybuf, int len,
 			walfile != -1)
 		{
 			/*
-			 * If a valid flush location needs to be reported,
-			 * flush the current WAL file so that the latest flush
-			 * location is sent back to the server. This is necessary to
-			 * see whether the last WAL data has been successfully
-			 * replicated or not, at the normal shutdown of the server.
+			 * If a valid flush location needs to be reported, flush the
+			 * current WAL file so that the latest flush location is sent back
+			 * to the server. This is necessary to see whether the last WAL
+			 * data has been successfully replicated or not, at the normal
+			 * shutdown of the server.
 			 */
 			if (fsync(walfile) != 0)
 			{
@@ -1123,21 +1124,21 @@ ProcessXLogDataMsg(PGconn *conn, char *copybuf, int len,
 	int			hdr_len;
 
 	/*
-	 * Once we've decided we don't want to receive any more, just
-	 * ignore any subsequent XLogData messages.
+	 * Once we've decided we don't want to receive any more, just ignore any
+	 * subsequent XLogData messages.
 	 */
 	if (!(still_sending))
 		return true;
 
 	/*
-	 * Read the header of the XLogData message, enclosed in the
-	 * CopyData message. We only need the WAL location field
-	 * (dataStart), the rest of the header is ignored.
+	 * Read the header of the XLogData message, enclosed in the CopyData
+	 * message. We only need the WAL location field (dataStart), the rest of
+	 * the header is ignored.
 	 */
-	hdr_len = 1;		/* msgtype 'w' */
-	hdr_len += 8;		/* dataStart */
-	hdr_len += 8;		/* walEnd */
-	hdr_len += 8;		/* sendTime */
+	hdr_len = 1;				/* msgtype 'w' */
+	hdr_len += 8;				/* dataStart */
+	hdr_len += 8;				/* walEnd */
+	hdr_len += 8;				/* sendTime */
 	if (len < hdr_len)
 	{
 		fprintf(stderr, _("%s: streaming header too small: %d\n"),
@@ -1150,8 +1151,8 @@ ProcessXLogDataMsg(PGconn *conn, char *copybuf, int len,
 	xlogoff = *blockpos % XLOG_SEG_SIZE;
 
 	/*
-	 * Verify that the initial location in the stream matches where we
-	 * think we are.
+	 * Verify that the initial location in the stream matches where we think
+	 * we are.
 	 */
 	if (walfile == -1)
 	{
@@ -1208,7 +1209,7 @@ ProcessXLogDataMsg(PGconn *conn, char *copybuf, int len,
 				  bytes_to_write) != bytes_to_write)
 		{
 			fprintf(stderr,
-					_("%s: could not write %u bytes to WAL file \"%s\": %s\n"),
+				  _("%s: could not write %u bytes to WAL file \"%s\": %s\n"),
 					progname, bytes_to_write, current_walfile_name,
 					strerror(errno));
 			return false;
@@ -1252,15 +1253,15 @@ ProcessXLogDataMsg(PGconn *conn, char *copybuf, int len,
  */
 static PGresult *
 HandleEndOfCopyStream(PGconn *conn, char *copybuf,
-					  XLogRecPtr blockpos, char *basedir, char *partial_suffix,
+					XLogRecPtr blockpos, char *basedir, char *partial_suffix,
 					  XLogRecPtr *stoppos, bool mark_done)
 {
 	PGresult   *res = PQgetResult(conn);
 
 	/*
-	 * The server closed its end of the copy stream.  If we haven't
-	 * closed ours already, we need to do so now, unless the server
-	 * threw an error, in which case we don't.
+	 * The server closed its end of the copy stream.  If we haven't closed
+	 * ours already, we need to do so now, unless the server threw an error,
+	 * in which case we don't.
 	 */
 	if (still_sending)
 	{
diff --git a/src/bin/pg_basebackup/receivelog.h b/src/bin/pg_basebackup/receivelog.h
index a957aea4d99..b38e993c1aa 100644
--- a/src/bin/pg_basebackup/receivelog.h
+++ b/src/bin/pg_basebackup/receivelog.h
@@ -34,4 +34,4 @@ extern bool ReceiveXlogStream(PGconn *conn,
 				  bool synchronous,
 				  bool mark_done);
 
-#endif	/* RECEIVELOG_H */
+#endif   /* RECEIVELOG_H */
diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c
index de37511ef1b..ac84e6d360e 100644
--- a/src/bin/pg_basebackup/streamutil.c
+++ b/src/bin/pg_basebackup/streamutil.c
@@ -241,7 +241,8 @@ RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli,
 				  XLogRecPtr *startpos, char **db_name)
 {
 	PGresult   *res;
-	uint32		hi, lo;
+	uint32		hi,
+				lo;
 
 	/* Check connection existence */
 	Assert(conn != NULL);
@@ -279,7 +280,7 @@ RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli,
 		if (sscanf(PQgetvalue(res, 0, 2), "%X/%X", &hi, &lo) != 2)
 		{
 			fprintf(stderr,
-					_("%s: could not parse transaction log location \"%s\"\n"),
+				  _("%s: could not parse transaction log location \"%s\"\n"),
 					progname, PQgetvalue(res, 0, 2));
 
 			PQclear(res);
@@ -289,7 +290,7 @@ RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli,
 	}
 
 	/* Get database name, only available in 9.4 and newer versions */
-	if  (db_name != NULL)
+	if (db_name != NULL)
 	{
 		if (PQnfields(res) < 4)
 			fprintf(stderr,
@@ -297,7 +298,7 @@ RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli,
 					progname, PQntuples(res), PQnfields(res), 1, 4);
 
 		if (PQgetisnull(res, 0, 3))
-			*db_name =  NULL;
+			*db_name = NULL;
 		else
 			*db_name = pg_strdup(PQgetvalue(res, 0, 3));
 	}
@@ -358,12 +359,13 @@ CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin,
 	/* Get LSN start position if necessary */
 	if (startpos != NULL)
 	{
-		uint32		hi, lo;
+		uint32		hi,
+					lo;
 
 		if (sscanf(PQgetvalue(res, 0, 1), "%X/%X", &hi, &lo) != 2)
 		{
 			fprintf(stderr,
-					_("%s: could not parse transaction log location \"%s\"\n"),
+				  _("%s: could not parse transaction log location \"%s\"\n"),
 					progname, PQgetvalue(res, 0, 1));
 
 			destroyPQExpBuffer(query);
diff --git a/src/bin/pg_basebackup/streamutil.h b/src/bin/pg_basebackup/streamutil.h
index 6845662b426..01ab5660a14 100644
--- a/src/bin/pg_basebackup/streamutil.h
+++ b/src/bin/pg_basebackup/streamutil.h
@@ -32,13 +32,13 @@ extern PGconn *GetConnection(void);
 
 /* Replication commands */
 extern bool CreateReplicationSlot(PGconn *conn, const char *slot_name,
-								  const char *plugin, XLogRecPtr *startpos,
-								  bool is_physical);
+					  const char *plugin, XLogRecPtr *startpos,
+					  bool is_physical);
 extern bool DropReplicationSlot(PGconn *conn, const char *slot_name);
 extern bool RunIdentifySystem(PGconn *conn, char **sysid,
-							  TimeLineID *starttli,
-							  XLogRecPtr *startpos,
-							  char **db_name);
+				  TimeLineID *starttli,
+				  XLogRecPtr *startpos,
+				  char **db_name);
 extern int64 feGetCurrentTimestamp(void);
 extern void feTimestampDifference(int64 start_time, int64 stop_time,
 					  long *secs, int *microsecs);
@@ -48,4 +48,4 @@ extern bool feTimestampDifferenceExceeds(int64 start_time, int64 stop_time,
 extern void fe_sendint64(int64 i, char *buf);
 extern int64 fe_recvint64(char *buf);
 
-#endif	/* STREAMUTIL_H */
+#endif   /* STREAMUTIL_H */
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 0e4bd12aff5..3476ea686ac 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -49,11 +49,11 @@ command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup", '-Ft' ],
 	'tar format');
 ok(-f "$tempdir/tarbackup/base.tar", 'backup tar was created');
 
-my $superlongname = "superlongname_" . ("x"x100);
+my $superlongname = "superlongname_" . ("x" x 100);
 
 system_or_bail 'touch', "$tempdir/pgdata/$superlongname";
 command_fails([ 'pg_basebackup', '-D', "$tempdir/tarbackup_l1", '-Ft' ],
-			  'pg_basebackup tar with long name fails');
+	'pg_basebackup tar with long name fails');
 unlink "$tempdir/pgdata/$superlongname";
 
 # Create a temporary directory in the system location and symlink it
@@ -64,7 +64,8 @@ my $shorter_tempdir = tempdir_short . "/tempdir";
 symlink "$tempdir", $shorter_tempdir;
 
 mkdir "$tempdir/tblspc1";
-psql 'postgres', "CREATE TABLESPACE tblspc1 LOCATION '$shorter_tempdir/tblspc1';";
+psql 'postgres',
+  "CREATE TABLESPACE tblspc1 LOCATION '$shorter_tempdir/tblspc1';";
 psql 'postgres', "CREATE TABLE test1 (a int) TABLESPACE tblspc1;";
 command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup2", '-Ft' ],
 	'tar format with tablespaces');
@@ -77,14 +78,12 @@ command_fails(
 	'plain format with tablespaces fails without tablespace mapping');
 
 command_ok(
-	[   'pg_basebackup',    '-D',
-		"$tempdir/backup1", '-Fp',
+	[   'pg_basebackup', '-D', "$tempdir/backup1", '-Fp',
 		"-T$shorter_tempdir/tblspc1=$tempdir/tbackup/tblspc1" ],
 	'plain format with tablespaces succeeds with tablespace mapping');
 ok(-d "$tempdir/tbackup/tblspc1", 'tablespace was relocated');
 opendir(my $dh, "$tempdir/pgdata/pg_tblspc") or die;
-ok( (   grep
-		{
+ok( (   grep {
 			-l "$tempdir/backup1/pg_tblspc/$_"
 			  and readlink "$tempdir/backup1/pg_tblspc/$_" eq
 			  "$tempdir/tbackup/tblspc1"
@@ -95,10 +94,10 @@ closedir $dh;
 mkdir "$tempdir/tbl=spc2";
 psql 'postgres', "DROP TABLE test1;";
 psql 'postgres', "DROP TABLESPACE tblspc1;";
-psql 'postgres', "CREATE TABLESPACE tblspc2 LOCATION '$shorter_tempdir/tbl=spc2';";
+psql 'postgres',
+  "CREATE TABLESPACE tblspc2 LOCATION '$shorter_tempdir/tbl=spc2';";
 command_ok(
-	[   'pg_basebackup',    '-D',
-		"$tempdir/backup3", '-Fp',
+	[   'pg_basebackup', '-D', "$tempdir/backup3", '-Fp',
 		"-T$shorter_tempdir/tbl\\=spc2=$tempdir/tbackup/tbl\\=spc2" ],
 	'mapping tablespace with = sign in path');
 ok(-d "$tempdir/tbackup/tbl=spc2", 'tablespace with = sign was relocated');
@@ -126,7 +125,8 @@ command_fails(
 	'-T with invalid format fails');
 
 mkdir "$tempdir/$superlongname";
-psql 'postgres', "CREATE TABLESPACE tblspc3 LOCATION '$tempdir/$superlongname';";
+psql 'postgres',
+  "CREATE TABLESPACE tblspc3 LOCATION '$tempdir/$superlongname';";
 command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup_l3", '-Ft' ],
-			  'pg_basebackup tar with long symlink target');
+	'pg_basebackup tar with long symlink target');
 psql 'postgres', "DROP TABLESPACE tblspc3;";
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index 6a67cb7fcac..74764fabdaf 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -176,7 +176,7 @@ write_eventlog(int level, const char *line)
 	if (evtHandle == INVALID_HANDLE_VALUE)
 	{
 		evtHandle = RegisterEventSource(NULL,
-						event_source ? event_source : DEFAULT_EVENT_SOURCE);
+						 event_source ? event_source : DEFAULT_EVENT_SOURCE);
 		if (evtHandle == NULL)
 		{
 			evtHandle = INVALID_HANDLE_VALUE;
@@ -263,7 +263,8 @@ get_pgpid(bool is_status_request)
 		/*
 		 * The Linux Standard Base Core Specification 3.1 says this should
 		 * return '4, program or service status is unknown'
-		 * https://refspecs.linuxbase.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
+		 * https://refspecs.linuxbase.org/LSB_3.1.0/LSB-Core-generic/LSB-Core-g
+		 * eneric/iniscrptact.html
 		 */
 		exit(is_status_request ? 4 : 1);
 	}
@@ -1600,10 +1601,10 @@ pgwin32_ServiceMain(DWORD argc, LPTSTR *argv)
 			{
 				/*
 				 * status.dwCheckPoint can be incremented by
-				 * test_postmaster_connection(true), so it might not
-				 * start from 0.
+				 * test_postmaster_connection(true), so it might not start
+				 * from 0.
 				 */
-				int maxShutdownCheckPoint = status.dwCheckPoint + 12;;
+				int			maxShutdownCheckPoint = status.dwCheckPoint + 12;;
 
 				kill(postmasterPID, SIGINT);
 
@@ -2215,7 +2216,7 @@ main(int argc, char **argv)
 						post_opts = pg_strdup(optarg);
 					else
 					{
-						char *old_post_opts = post_opts;
+						char	   *old_post_opts = post_opts;
 
 						post_opts = psprintf("%s %s", old_post_opts, optarg);
 						free(old_post_opts);
diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl
index 17309e8fcb9..6c9ec5c717a 100644
--- a/src/bin/pg_ctl/t/001_start_stop.pl
+++ b/src/bin/pg_ctl/t/001_start_stop.pl
@@ -3,7 +3,7 @@ use warnings;
 use TestLib;
 use Test::More tests => 17;
 
-my $tempdir = TestLib::tempdir;
+my $tempdir       = TestLib::tempdir;
 my $tempdir_short = TestLib::tempdir_short;
 
 program_help_ok('pg_ctl');
@@ -11,7 +11,7 @@ program_version_ok('pg_ctl');
 program_options_handling_ok('pg_ctl');
 
 command_exit_is([ 'pg_ctl', 'start', '-D', "$tempdir/nonexistent" ],
-				1, 'pg_ctl start with nonexistent directory');
+	1, 'pg_ctl start with nonexistent directory');
 
 command_ok([ 'pg_ctl', 'initdb', '-D', "$tempdir/data" ], 'pg_ctl initdb');
 command_ok(
diff --git a/src/bin/pg_ctl/t/002_status.pl b/src/bin/pg_ctl/t/002_status.pl
index b8cbbdaed53..055885495ab 100644
--- a/src/bin/pg_ctl/t/002_status.pl
+++ b/src/bin/pg_ctl/t/002_status.pl
@@ -3,7 +3,7 @@ use warnings;
 use TestLib;
 use Test::More tests => 3;
 
-my $tempdir = TestLib::tempdir;
+my $tempdir       = TestLib::tempdir;
 my $tempdir_short = TestLib::tempdir_short;
 
 command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/nonexistent" ],
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index d39abf9242e..687cbaaf7c2 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -126,8 +126,8 @@ static const CatalogId nilCatalogId = {0, 0};
 
 static void help(const char *progname);
 static void setup_connection(Archive *AH, DumpOptions *dopt,
-				const char *dumpencoding, const char *dumpsnapshot,
-				char *use_role);
+				 const char *dumpencoding, const char *dumpsnapshot,
+				 char *use_role);
 static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
 static void expand_schema_name_patterns(Archive *fout,
 							SimpleStringList *patterns,
@@ -671,7 +671,7 @@ main(int argc, char **argv)
 	/* check the version when a snapshot is explicitly specified by user */
 	if (dumpsnapshot && fout->remoteVersion < 90200)
 		exit_horribly(NULL,
-			"Exported snapshots are not supported by this server version.\n");
+		   "Exported snapshots are not supported by this server version.\n");
 
 	/* Find the last built-in OID, if needed */
 	if (fout->remoteVersion < 70300)
@@ -1052,8 +1052,8 @@ setup_connection(Archive *AH, DumpOptions *dopt, const char *dumpencoding,
 							"SET TRANSACTION ISOLATION LEVEL SERIALIZABLE");
 
 	/*
-	 * define an export snapshot, either chosen by user or needed for
-	 * parallel dump.
+	 * define an export snapshot, either chosen by user or needed for parallel
+	 * dump.
 	 */
 	if (dumpsnapshot)
 		AH->sync_snapshot_id = strdup(dumpsnapshot);
@@ -1061,6 +1061,7 @@ setup_connection(Archive *AH, DumpOptions *dopt, const char *dumpencoding,
 	if (AH->sync_snapshot_id)
 	{
 		PQExpBuffer query = createPQExpBuffer();
+
 		appendPQExpBuffer(query, "SET TRANSACTION SNAPSHOT ");
 		appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
 		ExecuteSqlStatement(AH, query->data);
@@ -2841,8 +2842,8 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
 
 		/*
 		 * Get row security enabled information for the table. We represent
-		 * RLS enabled on a table by creating PolicyInfo object with an
-		 * empty policy.
+		 * RLS enabled on a table by creating PolicyInfo object with an empty
+		 * policy.
 		 */
 		if (tbinfo->rowsec)
 		{
@@ -2882,8 +2883,8 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
 						  "SELECT oid, tableoid, pol.polname, pol.polcmd, "
 						  "CASE WHEN pol.polroles = '{0}' THEN 'PUBLIC' ELSE "
 						  "   pg_catalog.array_to_string(ARRAY(SELECT pg_catalog.quote_ident(rolname) from pg_catalog.pg_roles WHERE oid = ANY(pol.polroles)), ', ') END AS polroles, "
-						  "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
-				"pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
+			 "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid) AS polqual, "
+						  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid) AS polwithcheck "
 						  "FROM pg_catalog.pg_policy pol "
 						  "WHERE polrelid = '%u'",
 						  tbinfo->dobj.catId.oid);
@@ -2895,8 +2896,8 @@ getPolicies(Archive *fout, TableInfo tblinfo[], int numTables)
 		{
 			/*
 			 * No explicit policies to handle (only the default-deny policy,
-			 * which is handled as part of the table definition).  Clean up and
-			 * return.
+			 * which is handled as part of the table definition).  Clean up
+			 * and return.
 			 */
 			PQclear(res);
 			continue;
@@ -2959,9 +2960,9 @@ dumpPolicy(Archive *fout, DumpOptions *dopt, PolicyInfo *polinfo)
 		return;
 
 	/*
-	 * If polname is NULL, then this record is just indicating that ROW
-	 * LEVEL SECURITY is enabled for the table. Dump as ALTER TABLE <table>
-	 * ENABLE ROW LEVEL SECURITY.
+	 * If polname is NULL, then this record is just indicating that ROW LEVEL
+	 * SECURITY is enabled for the table. Dump as ALTER TABLE <table> ENABLE
+	 * ROW LEVEL SECURITY.
 	 */
 	if (polinfo->polname == NULL)
 	{
@@ -3046,7 +3047,7 @@ binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
 
 	appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, must preserve pg_type oid\n");
 	appendPQExpBuffer(upgrade_buffer,
-	 "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
+					  "SELECT pg_catalog.binary_upgrade_set_next_pg_type_oid('%u'::pg_catalog.oid);\n\n",
 					  pg_type_oid);
 
 	/* we only support old >= 8.3 for binary upgrades */
@@ -6597,7 +6598,7 @@ getTransforms(Archive *fout, int *numTransforms)
 	int			ntups;
 	int			i;
 	PQExpBuffer query;
-	TransformInfo   *transforminfo;
+	TransformInfo *transforminfo;
 	int			i_tableoid;
 	int			i_oid;
 	int			i_trftype;
@@ -8462,7 +8463,7 @@ dumpExtension(Archive *fout, DumpOptions *dopt, ExtensionInfo *extinfo)
 		appendPQExpBuffer(q, "DROP EXTENSION IF EXISTS %s;\n", qextname);
 
 		appendPQExpBufferStr(q,
-							 "SELECT pg_catalog.binary_upgrade_create_empty_extension(");
+				 "SELECT pg_catalog.binary_upgrade_create_empty_extension(");
 		appendStringLiteralAH(q, extinfo->dobj.name, fout);
 		appendPQExpBufferStr(q, ", ");
 		appendStringLiteralAH(q, extinfo->namespace, fout);
@@ -9367,7 +9368,7 @@ dumpDomain(Archive *fout, DumpOptions *dopt, TypeInfo *tyinfo)
 	for (i = 0; i < tyinfo->nDomChecks; i++)
 	{
 		ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
-		PQExpBuffer	labelq = createPQExpBuffer();
+		PQExpBuffer labelq = createPQExpBuffer();
 
 		appendPQExpBuffer(labelq, "CONSTRAINT %s ",
 						  fmtId(domcheck->dobj.name));
@@ -10451,8 +10452,8 @@ dumpFunc(Archive *fout, DumpOptions *dopt, FuncInfo *finfo)
 
 	if (protrftypes != NULL && strcmp(protrftypes, "") != 0)
 	{
-		Oid *typeids = palloc(FUNC_MAX_ARGS * sizeof(Oid));
-		int i;
+		Oid		   *typeids = palloc(FUNC_MAX_ARGS * sizeof(Oid));
+		int			i;
 
 		appendPQExpBufferStr(q, " TRANSFORM ");
 		parseOidArray(protrftypes, typeids, FUNC_MAX_ARGS);
@@ -10461,7 +10462,7 @@ dumpFunc(Archive *fout, DumpOptions *dopt, FuncInfo *finfo)
 			if (i != 0)
 				appendPQExpBufferStr(q, ", ");
 			appendPQExpBuffer(q, "FOR TYPE %s",
-							  getFormattedTypeName(fout, typeids[i], zeroAsNone));
+						 getFormattedTypeName(fout, typeids[i], zeroAsNone));
 		}
 	}
 
@@ -10729,11 +10730,11 @@ dumpTransform(Archive *fout, DumpOptions *dopt, TransformInfo *transform)
 	lanname = get_language_name(fout, transform->trflang);
 
 	appendPQExpBuffer(delqry, "DROP TRANSFORM FOR %s LANGUAGE %s;\n",
-					  getFormattedTypeName(fout, transform->trftype, zeroAsNone),
+				  getFormattedTypeName(fout, transform->trftype, zeroAsNone),
 					  lanname);
 
 	appendPQExpBuffer(defqry, "CREATE TRANSFORM FOR %s LANGUAGE %s (",
-					  getFormattedTypeName(fout, transform->trftype, zeroAsNone),
+				  getFormattedTypeName(fout, transform->trftype, zeroAsNone),
 					  lanname);
 
 	if (!transform->trffromsql && !transform->trftosql)
@@ -10747,11 +10748,10 @@ dumpTransform(Archive *fout, DumpOptions *dopt, TransformInfo *transform)
 
 			/*
 			 * Always qualify the function name, in case it is not in
-			 * pg_catalog schema (format_function_signature won't qualify
-			 * it).
+			 * pg_catalog schema (format_function_signature won't qualify it).
 			 */
 			appendPQExpBuffer(defqry, "FROM SQL WITH FUNCTION %s.%s",
-							  fmtId(fromsqlFuncInfo->dobj.namespace->dobj.name), fsig);
+					fmtId(fromsqlFuncInfo->dobj.namespace->dobj.name), fsig);
 			free(fsig);
 		}
 		else
@@ -10769,11 +10769,10 @@ dumpTransform(Archive *fout, DumpOptions *dopt, TransformInfo *transform)
 
 			/*
 			 * Always qualify the function name, in case it is not in
-			 * pg_catalog schema (format_function_signature won't qualify
-			 * it).
+			 * pg_catalog schema (format_function_signature won't qualify it).
 			 */
 			appendPQExpBuffer(defqry, "TO SQL WITH FUNCTION %s.%s",
-							  fmtId(tosqlFuncInfo->dobj.namespace->dobj.name), fsig);
+					  fmtId(tosqlFuncInfo->dobj.namespace->dobj.name), fsig);
 			free(fsig);
 		}
 		else
@@ -10783,7 +10782,7 @@ dumpTransform(Archive *fout, DumpOptions *dopt, TransformInfo *transform)
 	appendPQExpBuffer(defqry, ");\n");
 
 	appendPQExpBuffer(labelq, "TRANSFORM FOR %s LANGUAGE %s",
-					  getFormattedTypeName(fout, transform->trftype, zeroAsNone),
+				  getFormattedTypeName(fout, transform->trftype, zeroAsNone),
 					  lanname);
 
 	if (dopt->binary_upgrade)
@@ -14012,9 +14011,9 @@ dumpTableSchema(Archive *fout, DumpOptions *dopt, TableInfo *tbinfo)
 		 * here, also updating their attlen/attalign values so that the
 		 * dropped column can be skipped properly.  (We do not bother with
 		 * restoring the original attbyval setting.)  Also, inheritance
-		 * relationships are set up by doing ALTER TABLE INHERIT rather than using
-		 * an INHERITS clause --- the latter would possibly mess up the column
-		 * order.  That also means we have to take care about setting
+		 * relationships are set up by doing ALTER TABLE INHERIT rather than
+		 * using an INHERITS clause --- the latter would possibly mess up the
+		 * column order.  That also means we have to take care about setting
 		 * attislocal correctly, plus fix up any inherited CHECK constraints.
 		 * Analogously, we set up typed tables using ALTER TABLE / OF here.
 		 */
@@ -15473,28 +15472,28 @@ dumpRule(Archive *fout, DumpOptions *dopt, RuleInfo *rinfo)
  *
  * 1. Identify objects which are members of extensions
  *
- *    Generally speaking, this is to mark them as *not* being dumped, as most
- *    extension objects are created by the single CREATE EXTENSION command.
- *    The one exception is binary upgrades with pg_upgrade will still dump the
- *    non-table objects.
+ *	  Generally speaking, this is to mark them as *not* being dumped, as most
+ *	  extension objects are created by the single CREATE EXTENSION command.
+ *	  The one exception is binary upgrades with pg_upgrade will still dump the
+ *	  non-table objects.
  *
  * 2. Identify and create dump records for extension configuration tables.
  *
- *    Extensions can mark tables as "configuration", which means that the user
- *    is able and expected to modify those tables after the extension has been
- *    loaded.  For these tables, we dump out only the data- the structure is
- *    expected to be handled at CREATE EXTENSION time, including any indexes or
- *    foreign keys, which brings us to-
+ *	  Extensions can mark tables as "configuration", which means that the user
+ *	  is able and expected to modify those tables after the extension has been
+ *	  loaded.  For these tables, we dump out only the data- the structure is
+ *	  expected to be handled at CREATE EXTENSION time, including any indexes or
+ *	  foreign keys, which brings us to-
  *
  * 3. Record FK dependencies between configuration tables.
  *
- *    Due to the FKs being created at CREATE EXTENSION time and therefore before
- *    the data is loaded, we have to work out what the best order for reloading
- *    the data is, to avoid FK violations when the tables are restored.  This is
- *    not perfect- we can't handle circular dependencies and if any exist they
- *    will cause an invalid dump to be produced (though at least all of the data
- *    is included for a user to manually restore).  This is currently documented
- *    but perhaps we can provide a better solution in the future.
+ *	  Due to the FKs being created at CREATE EXTENSION time and therefore before
+ *	  the data is loaded, we have to work out what the best order for reloading
+ *	  the data is, to avoid FK violations when the tables are restored.  This is
+ *	  not perfect- we can't handle circular dependencies and if any exist they
+ *	  will cause an invalid dump to be produced (though at least all of the data
+ *	  is included for a user to manually restore).  This is currently documented
+ *	  but perhaps we can provide a better solution in the future.
  */
 void
 getExtensionMembership(Archive *fout, DumpOptions *dopt, ExtensionInfo extinfo[],
@@ -15691,21 +15690,20 @@ getExtensionMembership(Archive *fout, DumpOptions *dopt, ExtensionInfo extinfo[]
 	}
 
 	/*
-	 * Now that all the TableInfoData objects have been created for all
-	 * the extensions, check their FK dependencies and register them to
-	 * try and dump the data out in an order which they can be restored
-	 * in.
+	 * Now that all the TableInfoData objects have been created for all the
+	 * extensions, check their FK dependencies and register them to try and
+	 * dump the data out in an order which they can be restored in.
 	 *
 	 * Note that this is not a problem for user tables as their FKs are
 	 * recreated after the data has been loaded.
 	 */
 	printfPQExpBuffer(query,
-			"SELECT conrelid, confrelid "
-			"FROM pg_constraint "
-				"JOIN pg_depend ON (objid = confrelid) "
-			"WHERE contype = 'f' "
-			"AND refclassid = 'pg_extension'::regclass "
-			"AND classid = 'pg_class'::regclass;");
+					  "SELECT conrelid, confrelid "
+					  "FROM pg_constraint "
+					  "JOIN pg_depend ON (objid = confrelid) "
+					  "WHERE contype = 'f' "
+					  "AND refclassid = 'pg_extension'::regclass "
+					  "AND classid = 'pg_class'::regclass;");
 
 	res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
 	ntups = PQntuples(res);
@@ -15716,8 +15714,10 @@ getExtensionMembership(Archive *fout, DumpOptions *dopt, ExtensionInfo extinfo[]
 	/* Now get the dependencies and register them */
 	for (i = 0; i < ntups; i++)
 	{
-		Oid			conrelid, confrelid;
-		TableInfo  *reftable, *contable;
+		Oid			conrelid,
+					confrelid;
+		TableInfo  *reftable,
+				   *contable;
 
 		conrelid = atooid(PQgetvalue(res, i, i_conrelid));
 		confrelid = atooid(PQgetvalue(res, i, i_confrelid));
@@ -15731,8 +15731,8 @@ getExtensionMembership(Archive *fout, DumpOptions *dopt, ExtensionInfo extinfo[]
 			continue;
 
 		/*
-		 * Make referencing TABLE_DATA object depend on the
-		 * referenced table's TABLE_DATA object.
+		 * Make referencing TABLE_DATA object depend on the referenced table's
+		 * TABLE_DATA object.
 		 */
 		addObjectDependency(&contable->dataObj->dobj,
 							reftable->dataObj->dobj.dumpId);
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 4c796ad6a76..009dba5c9d7 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -471,7 +471,7 @@ typedef struct _policyInfo
 {
 	DumpableObject dobj;
 	TableInfo  *poltable;
-	char	   *polname;	/* null indicates RLS is enabled on rel */
+	char	   *polname;		/* null indicates RLS is enabled on rel */
 	char	   *polcmd;
 	char	   *polroles;
 	char	   *polqual;
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 2c72e19f2d3..c6b9326cb15 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -1422,7 +1422,7 @@ dumpCreateDB(PGconn *conn)
 		{
 			appendPQExpBufferStr(buf, "-- For binary upgrade, set datfrozenxid and datminmxid.\n");
 			appendPQExpBuffer(buf, "UPDATE pg_catalog.pg_database "
-							"SET datfrozenxid = '%u', datminmxid = '%u' "
+							  "SET datfrozenxid = '%u', datminmxid = '%u' "
 							  "WHERE datname = ",
 							  dbfrozenxid, dbminmxid);
 			appendStringLiteralConn(buf, dbname, conn);
diff --git a/src/bin/pg_resetxlog/pg_resetxlog.c b/src/bin/pg_resetxlog/pg_resetxlog.c
index 393d5801540..6ffe795348d 100644
--- a/src/bin/pg_resetxlog/pg_resetxlog.c
+++ b/src/bin/pg_resetxlog/pg_resetxlog.c
@@ -914,9 +914,9 @@ FindEndOfXLOG(void)
 			XLogSegNo	segno;
 
 			/*
-			 * Note: We don't use XLogFromFileName here, because we want
-			 * to use the segment size from the control file, not the size
-			 * the pg_resetxlog binary was compiled with
+			 * Note: We don't use XLogFromFileName here, because we want to
+			 * use the segment size from the control file, not the size the
+			 * pg_resetxlog binary was compiled with
 			 */
 			sscanf(xlde->d_name, "%08X%08X%08X", &tli, &log, &seg);
 			segno = ((uint64) log) * segs_per_xlogid + seg;
diff --git a/src/bin/pg_rewind/RewindTest.pm b/src/bin/pg_rewind/RewindTest.pm
index 6ea2f871aab..5219ec967ae 100644
--- a/src/bin/pg_rewind/RewindTest.pm
+++ b/src/bin/pg_rewind/RewindTest.pm
@@ -67,8 +67,8 @@ our @EXPORT = qw(
 # for debugging purposes,
 my $testroot = tempdir;
 
-our $test_master_datadir="$testroot/data_master";
-our $test_standby_datadir="$testroot/data_standby";
+our $test_master_datadir  = "$testroot/data_master";
+our $test_standby_datadir = "$testroot/data_standby";
 
 mkdir $testroot;
 
@@ -76,14 +76,14 @@ mkdir $testroot;
 mkdir "regress_log";
 
 # Define non-conflicting ports for both nodes.
-my $port_master=$ENV{PGPORT};
-my $port_standby=$port_master + 1;
+my $port_master  = $ENV{PGPORT};
+my $port_standby = $port_master + 1;
 
 my $log_path;
 my $tempdir_short;
 
-my $connstr_master="port=$port_master";
-my $connstr_standby="port=$port_standby";
+my $connstr_master  = "port=$port_master";
+my $connstr_standby = "port=$port_standby";
 
 $ENV{PGDATABASE} = "postgres";
 
@@ -109,19 +109,25 @@ sub check_query
 	my ($stdout, $stderr);
 
 	# we want just the output, no formatting
-	my $result = run ['psql', '-q', '-A', '-t', '--no-psqlrc',
-					  '-d', $connstr_master,
-					  '-c' , $query],
-					 '>', \$stdout, '2>', \$stderr;
+	my $result = run [
+		'psql',          '-q', '-A', '-t', '--no-psqlrc', '-d',
+		$connstr_master, '-c', $query ],
+	  '>', \$stdout, '2>', \$stderr;
+
 	# We don't use ok() for the exit code and stderr, because we want this
 	# check to be just a single test.
-	if (!$result) {
-		fail ("$test_name: psql exit code");
-	} elsif ($stderr ne '') {
+	if (!$result)
+	{
+		fail("$test_name: psql exit code");
+	}
+	elsif ($stderr ne '')
+	{
 		diag $stderr;
-		fail ("$test_name: psql no stderr");
-	} else {
-		is ($stdout, $expected_stdout, "$test_name: query result matches");
+		fail("$test_name: psql no stderr");
+	}
+	else
+	{
+		is($stdout, $expected_stdout, "$test_name: query result matches");
 	}
 }
 
@@ -131,12 +137,12 @@ sub poll_query_until
 	my ($query, $connstr) = @_;
 
 	my $max_attempts = 30;
-	my $attempts = 0;
+	my $attempts     = 0;
 	my ($stdout, $stderr);
 
 	while ($attempts < $max_attempts)
 	{
-		my $cmd = ['psql', '-At', '-c', "$query", '-d', "$connstr" ];
+		my $cmd = [ 'psql', '-At', '-c', "$query", '-d', "$connstr" ];
 		my $result = run $cmd, '>', \$stdout, '2>', \$stderr;
 
 		chomp($stdout);
@@ -158,7 +164,7 @@ sub poll_query_until
 
 sub append_to_file
 {
-	my($filename, $str) = @_;
+	my ($filename, $str) = @_;
 
 	open my $fh, ">>", $filename or die "could not open file $filename";
 	print $fh $str;
@@ -167,10 +173,10 @@ sub append_to_file
 
 sub init_rewind_test
 {
-	my $testname = shift;
+	my $testname  = shift;
 	my $test_mode = shift;
 
-	$log_path="regress_log/pg_rewind_log_${testname}_${test_mode}";
+	$log_path = "regress_log/pg_rewind_log_${testname}_${test_mode}";
 
 	remove_tree $log_path;
 }
@@ -184,7 +190,8 @@ sub setup_cluster
 	standard_initdb($test_master_datadir);
 
 	# Custom parameters for master's postgresql.conf
-	append_to_file("$test_master_datadir/postgresql.conf", qq(
+	append_to_file(
+		"$test_master_datadir/postgresql.conf", qq(
 wal_level = hot_standby
 max_wal_senders = 2
 wal_keep_segments = 20
@@ -197,38 +204,47 @@ max_connections = 10
 ));
 
 	# Accept replication connections on master
-	append_to_file("$test_master_datadir/pg_hba.conf", qq(
+	append_to_file(
+		"$test_master_datadir/pg_hba.conf", qq(
 local replication all trust
 ));
 
-	system_or_bail("pg_ctl -w -D $test_master_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_master\" start >>$log_path 2>&1");
+	system_or_bail(
+"pg_ctl -w -D $test_master_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_master\" start >>$log_path 2>&1"
+	);
 
 	#### Now run the test-specific parts to initialize the master before setting
 	# up standby
-	$ENV{PGHOST}         = $tempdir_short;
+	$ENV{PGHOST} = $tempdir_short;
 }
 
 sub create_standby
 {
+
 	# Set up standby with necessary parameter
 	remove_tree $test_standby_datadir;
 
 	# Base backup is taken with xlog files included
-	system_or_bail("pg_basebackup -D $test_standby_datadir -p $port_master -x >>$log_path 2>&1");
-	append_to_file("$test_standby_datadir/recovery.conf", qq(
+	system_or_bail(
+"pg_basebackup -D $test_standby_datadir -p $port_master -x >>$log_path 2>&1");
+	append_to_file(
+		"$test_standby_datadir/recovery.conf", qq(
 primary_conninfo='$connstr_master application_name=rewind_standby'
 standby_mode=on
 recovery_target_timeline='latest'
 ));
 
 	# Start standby
-	system_or_bail("pg_ctl -w -D $test_standby_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_standby\" start >>$log_path 2>&1");
+	system_or_bail(
+"pg_ctl -w -D $test_standby_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_standby\" start >>$log_path 2>&1"
+	);
 
 	# Wait until the standby has caught up with the primary, by polling
 	# pg_stat_replication.
-	my $caughtup_query = "SELECT pg_current_xlog_location() = replay_location FROM pg_stat_replication WHERE application_name = 'rewind_standby';";
+	my $caughtup_query =
+"SELECT pg_current_xlog_location() = replay_location FROM pg_stat_replication WHERE application_name = 'rewind_standby';";
 	poll_query_until($caughtup_query, $connstr_master)
-		or die "Timed out while waiting for standby to catch up";
+	  or die "Timed out while waiting for standby to catch up";
 }
 
 sub promote_standby
@@ -239,9 +255,10 @@ sub promote_standby
 	# Now promote slave and insert some new data on master, this will put
 	# the master out-of-sync with the standby. Wait until the standby is
 	# out of recovery mode, and is ready to accept read-write connections.
-	system_or_bail("pg_ctl -w -D $test_standby_datadir promote >>$log_path 2>&1");
+	system_or_bail(
+		"pg_ctl -w -D $test_standby_datadir promote >>$log_path 2>&1");
 	poll_query_until("SELECT NOT pg_is_in_recovery()", $connstr_standby)
-		or die "Timed out while waiting for promotion of standby";
+	  or die "Timed out while waiting for promotion of standby";
 
 	# Force a checkpoint after the promotion. pg_rewind looks at the control
 	# file todetermine what timeline the server is on, and that isn't updated
@@ -257,7 +274,8 @@ sub run_pg_rewind
 	my $test_mode = shift;
 
 	# Stop the master and be ready to perform the rewind
-	system_or_bail("pg_ctl -w -D $test_master_datadir stop -m fast >>$log_path 2>&1");
+	system_or_bail(
+		"pg_ctl -w -D $test_master_datadir stop -m fast >>$log_path 2>&1");
 
 	# At this point, the rewind processing is ready to run.
 	# We now have a very simple scenario with a few diverged WAL record.
@@ -266,47 +284,67 @@ sub run_pg_rewind
 
 	# Keep a temporary postgresql.conf for master node or it would be
 	# overwritten during the rewind.
-	copy("$test_master_datadir/postgresql.conf", "$testroot/master-postgresql.conf.tmp");
+	copy(
+		"$test_master_datadir/postgresql.conf",
+		"$testroot/master-postgresql.conf.tmp");
+
 	# Now run pg_rewind
 	if ($test_mode eq "local")
 	{
+
 		# Do rewind using a local pgdata as source
 		# Stop the master and be ready to perform the rewind
-		system_or_bail("pg_ctl -w -D $test_standby_datadir stop -m fast >>$log_path 2>&1");
-		my $result =
-			run(['pg_rewind',
-				 "--debug",
-				 "--source-pgdata=$test_standby_datadir",
-				 "--target-pgdata=$test_master_datadir"],
-				'>>', $log_path, '2>&1');
-		ok ($result, 'pg_rewind local');
+		system_or_bail(
+			"pg_ctl -w -D $test_standby_datadir stop -m fast >>$log_path 2>&1"
+		);
+		my $result = run(
+			[   'pg_rewind',
+				"--debug",
+				"--source-pgdata=$test_standby_datadir",
+				"--target-pgdata=$test_master_datadir" ],
+			'>>',
+			$log_path,
+			'2>&1');
+		ok($result, 'pg_rewind local');
 	}
 	elsif ($test_mode eq "remote")
 	{
+
 		# Do rewind using a remote connection as source
-		my $result =
-			run(['pg_rewind',
-				 "--source-server", "port=$port_standby dbname=postgres",
-				 "--target-pgdata=$test_master_datadir"],
-				'>>', $log_path, '2>&1');
-		ok ($result, 'pg_rewind remote');
-	} else {
+		my $result = run(
+			[   'pg_rewind',
+				"--source-server",
+				"port=$port_standby dbname=postgres",
+				"--target-pgdata=$test_master_datadir" ],
+			'>>',
+			$log_path,
+			'2>&1');
+		ok($result, 'pg_rewind remote');
+	}
+	else
+	{
+
 		# Cannot come here normally
 		die("Incorrect test mode specified");
 	}
 
 	# Now move back postgresql.conf with old settings
-	move("$testroot/master-postgresql.conf.tmp", "$test_master_datadir/postgresql.conf");
+	move(
+		"$testroot/master-postgresql.conf.tmp",
+		"$test_master_datadir/postgresql.conf");
 
 	# Plug-in rewound node to the now-promoted standby node
-	append_to_file("$test_master_datadir/recovery.conf", qq(
+	append_to_file(
+		"$test_master_datadir/recovery.conf", qq(
 primary_conninfo='port=$port_standby'
 standby_mode=on
 recovery_target_timeline='latest'
 ));
 
 	# Restart the master to check that rewind went correctly
-	system_or_bail("pg_ctl -w -D $test_master_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_master\" start >>$log_path 2>&1");
+	system_or_bail(
+"pg_ctl -w -D $test_master_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_master\" start >>$log_path 2>&1"
+	);
 
 	#### Now run the test-specific parts to check the result
 }
@@ -316,11 +354,13 @@ sub clean_rewind_test
 {
 	if ($test_master_datadir)
 	{
-		system "pg_ctl -D $test_master_datadir -s -m immediate stop 2> /dev/null";
+		system
+		  "pg_ctl -D $test_master_datadir -s -m immediate stop 2> /dev/null";
 	}
 	if ($test_standby_datadir)
 	{
-		system "pg_ctl -D $test_standby_datadir -s -m immediate stop 2> /dev/null";
+		system
+		  "pg_ctl -D $test_standby_datadir -s -m immediate stop 2> /dev/null";
 	}
 }
 
diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c
index 1a56866fd4a..cb2bf4d1a0e 100644
--- a/src/bin/pg_rewind/filemap.c
+++ b/src/bin/pg_rewind/filemap.c
@@ -185,13 +185,13 @@ process_source_file(const char *path, file_type_t type, size_t newsize,
 				 *
 				 * If it's smaller in the target, it means that it has been
 				 * truncated in the target, or enlarged in the source, or
-				 * both. If it was truncated in the target, we need to copy the
-				 * missing tail from the source system. If it was enlarged in
-				 * the source system, there will be WAL records in the source
-				 * system for the new blocks, so we wouldn't need to copy them
-				 * here. But we don't know which scenario we're dealing with,
-				 * and there's no harm in copying the missing blocks now, so
-				 * do it now.
+				 * both. If it was truncated in the target, we need to copy
+				 * the missing tail from the source system. If it was enlarged
+				 * in the source system, there will be WAL records in the
+				 * source system for the new blocks, so we wouldn't need to
+				 * copy them here. But we don't know which scenario we're
+				 * dealing with, and there's no harm in copying the missing
+				 * blocks now, so do it now.
 				 *
 				 * If it's the same size, do nothing here. Any blocks modified
 				 * in the target will be copied based on parsing the target
@@ -370,6 +370,7 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
 				break;
 
 			case FILE_ACTION_COPY_TAIL:
+
 				/*
 				 * skip the modified block if it is part of the "tail" that
 				 * we're copying anyway.
@@ -391,8 +392,8 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno)
 		/*
 		 * If we don't have any record of this file in the file map, it means
 		 * that it's a relation that doesn't exist in the source system, and
-		 * it was subsequently removed in the target system, too. We can safely
-		 * ignore it.
+		 * it was subsequently removed in the target system, too. We can
+		 * safely ignore it.
 		 */
 	}
 }
diff --git a/src/bin/pg_rewind/filemap.h b/src/bin/pg_rewind/filemap.h
index 73113ecdebb..9943ec5f26f 100644
--- a/src/bin/pg_rewind/filemap.h
+++ b/src/bin/pg_rewind/filemap.h
@@ -23,13 +23,13 @@
  */
 typedef enum
 {
-	FILE_ACTION_CREATE,		/* create local directory or symbolic link */
-	FILE_ACTION_COPY,		/* copy whole file, overwriting if exists */
-	FILE_ACTION_COPY_TAIL,	/* copy tail from 'oldsize' to 'newsize' */
-	FILE_ACTION_NONE,		/* no action (we might still copy modified blocks
-							 * based on the parsed WAL) */
-	FILE_ACTION_TRUNCATE,	/* truncate local file to 'newsize' bytes */
-	FILE_ACTION_REMOVE		/* remove local file / directory / symlink */
+	FILE_ACTION_CREATE,			/* create local directory or symbolic link */
+	FILE_ACTION_COPY,			/* copy whole file, overwriting if exists */
+	FILE_ACTION_COPY_TAIL,		/* copy tail from 'oldsize' to 'newsize' */
+	FILE_ACTION_NONE,			/* no action (we might still copy modified
+								 * blocks based on the parsed WAL) */
+	FILE_ACTION_TRUNCATE,		/* truncate local file to 'newsize' bytes */
+	FILE_ACTION_REMOVE			/* remove local file / directory / symlink */
 } file_action_t;
 
 typedef enum
@@ -51,10 +51,10 @@ typedef struct file_entry_t
 	size_t		newsize;
 	bool		isrelfile;		/* is it a relation data file? */
 
-	datapagemap_t	pagemap;
+	datapagemap_t pagemap;
 
 	/* for a symlink */
-	char		*link_target;
+	char	   *link_target;
 
 	struct file_entry_t *next;
 } file_entry_t;
@@ -72,16 +72,16 @@ typedef struct filemap_t
 	/*
 	 * After processing all the remote files, the entries in the linked list
 	 * are moved to this array. After processing local files, too, all the
-	 * local entries are added to the array by filemap_finalize, and sorted
-	 * in the final order. After filemap_finalize, all the entries are in
-	 * the array, and the linked list is empty.
+	 * local entries are added to the array by filemap_finalize, and sorted in
+	 * the final order. After filemap_finalize, all the entries are in the
+	 * array, and the linked list is empty.
 	 */
 	file_entry_t **array;
 	int			narray;			/* current length of array */
 
 	/*
-	 * Summary information. total_size is the total size of the source cluster,
-	 * and fetch_size is the number of bytes that needs to be copied.
+	 * Summary information. total_size is the total size of the source
+	 * cluster, and fetch_size is the number of bytes that needs to be copied.
 	 */
 	uint64		total_size;
 	uint64		fetch_size;
diff --git a/src/bin/pg_rewind/parsexlog.c b/src/bin/pg_rewind/parsexlog.c
index 9c112701e53..fca771d8cbd 100644
--- a/src/bin/pg_rewind/parsexlog.c
+++ b/src/bin/pg_rewind/parsexlog.c
@@ -236,7 +236,7 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
 {
 	XLogPageReadPrivate *private = (XLogPageReadPrivate *) xlogreader->private_data;
 	uint32		targetPageOff;
-	XLogSegNo	targetSegNo PG_USED_FOR_ASSERTS_ONLY;
+	XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
 
 	XLByteToSeg(targetPagePtr, targetSegNo);
 	targetPageOff = targetPagePtr % XLogSegSize;
@@ -315,10 +315,10 @@ extractPageInfo(XLogReaderState *record)
 		/*
 		 * New databases can be safely ignored. It won't be present in the
 		 * source system, so it will be deleted. There's one corner-case,
-		 * though: if a new, different, database is also created in the
-		 * source system, we'll see that the files already exist and not copy
-		 * them. That's OK, though; WAL replay of creating the new database,
-		 * from the source systems's WAL, will re-copy the new database,
+		 * though: if a new, different, database is also created in the source
+		 * system, we'll see that the files already exist and not copy them.
+		 * That's OK, though; WAL replay of creating the new database, from
+		 * the source systems's WAL, will re-copy the new database,
 		 * overwriting the database created in the target system.
 		 */
 	}
diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c
index d3ae7674d73..8088be4fab6 100644
--- a/src/bin/pg_rewind/pg_rewind.c
+++ b/src/bin/pg_rewind/pg_rewind.c
@@ -490,15 +490,15 @@ createBackupLabel(XLogRecPtr startpoint, TimeLineID starttli, XLogRecPtr checkpo
 				   "BACKUP METHOD: pg_rewind\n"
 				   "BACKUP FROM: standby\n"
 				   "START TIME: %s\n",
-				   /* omit LABEL: line */
-				   (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename,
+	/* omit LABEL: line */
+			  (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename,
 				   (uint32) (checkpointloc >> 32), (uint32) checkpointloc,
 				   strfbuf);
 	if (len >= sizeof(buf))
-		pg_fatal("backup label buffer too small\n"); /* shouldn't happen */
+		pg_fatal("backup label buffer too small\n");	/* shouldn't happen */
 
 	/* TODO: move old file out of the way, if any. */
-	open_target_file("backup_label", true); /* BACKUP_LABEL_FILE */
+	open_target_file("backup_label", true);		/* BACKUP_LABEL_FILE */
 	write_target_range(buf, 0, len);
 }
 
diff --git a/src/bin/pg_rewind/t/001_basic.pl b/src/bin/pg_rewind/t/001_basic.pl
index a1d679f6b8f..f60368bd307 100644
--- a/src/bin/pg_rewind/t/001_basic.pl
+++ b/src/bin/pg_rewind/t/001_basic.pl
@@ -32,8 +32,11 @@ sub run_test
 
 	# Insert additional data on master that will be replicated to standby
 	master_psql("INSERT INTO tbl1 values ('in master, before promotion')");
-	master_psql("INSERT INTO trunc_tbl values ('in master, before promotion')");
-	master_psql("INSERT INTO tail_tbl SELECT g, 'in master, before promotion: ' || g FROM generate_series(1, 10000) g");
+	master_psql(
+		"INSERT INTO trunc_tbl values ('in master, before promotion')");
+	master_psql(
+"INSERT INTO tail_tbl SELECT g, 'in master, before promotion: ' || g FROM generate_series(1, 10000) g"
+	);
 
 	master_psql('CHECKPOINT');
 
@@ -50,7 +53,9 @@ sub run_test
 
 	# Insert enough rows to trunc_tbl to extend the file. pg_rewind should
 	# truncate it back to the old size.
-	master_psql("INSERT INTO trunc_tbl SELECT 'in master, after promotion: ' || g FROM generate_series(1, 10000) g");
+	master_psql(
+"INSERT INTO trunc_tbl SELECT 'in master, after promotion: ' || g FROM generate_series(1, 10000) g"
+	);
 
 	# Truncate tail_tbl. pg_rewind should copy back the truncated part
 	# (We cannot use an actual TRUNCATE command here, as that creates a
@@ -60,20 +65,23 @@ sub run_test
 
 	RewindTest::run_pg_rewind($test_mode);
 
-	check_query('SELECT * FROM tbl1',
+	check_query(
+		'SELECT * FROM tbl1',
 		qq(in master
 in master, before promotion
 in standby, after promotion
 ),
 		'table content');
 
-	check_query('SELECT * FROM trunc_tbl',
+	check_query(
+		'SELECT * FROM trunc_tbl',
 		qq(in master
 in master, before promotion
 ),
 		'truncation');
 
-	check_query('SELECT count(*) FROM tail_tbl',
+	check_query(
+		'SELECT count(*) FROM tail_tbl',
 		qq(10001
 ),
 		'tail-copy');
diff --git a/src/bin/pg_rewind/t/002_databases.pl b/src/bin/pg_rewind/t/002_databases.pl
index be1e1948a7f..7564fa98a53 100644
--- a/src/bin/pg_rewind/t/002_databases.pl
+++ b/src/bin/pg_rewind/t/002_databases.pl
@@ -25,20 +25,22 @@ sub run_test
 	# Create databases in the old master and the new promoted standby.
 	master_psql('CREATE DATABASE master_afterpromotion');
 	standby_psql('CREATE DATABASE standby_afterpromotion');
+
 	# The clusters are now diverged.
 
 	RewindTest::run_pg_rewind($test_mode);
 
 	# Check that the correct databases are present after pg_rewind.
-	check_query('SELECT datname FROM pg_database',
-			   qq(template1
+	check_query(
+		'SELECT datname FROM pg_database',
+		qq(template1
 template0
 postgres
 inmaster
 beforepromotion
 standby_afterpromotion
 ),
-			   'database names');
+		'database names');
 
 	RewindTest::clean_rewind_test();
 }
diff --git a/src/bin/pg_rewind/t/003_extrafiles.pl b/src/bin/pg_rewind/t/003_extrafiles.pl
index ed50659195b..9a952685be9 100644
--- a/src/bin/pg_rewind/t/003_extrafiles.pl
+++ b/src/bin/pg_rewind/t/003_extrafiles.pl
@@ -24,44 +24,58 @@ sub run_test
 	append_to_file "$test_master_datadir/tst_both_dir/both_file1", "in both1";
 	append_to_file "$test_master_datadir/tst_both_dir/both_file2", "in both2";
 	mkdir "$test_master_datadir/tst_both_dir/both_subdir/";
-	append_to_file "$test_master_datadir/tst_both_dir/both_subdir/both_file3", "in both3";
+	append_to_file "$test_master_datadir/tst_both_dir/both_subdir/both_file3",
+	  "in both3";
 
 	RewindTest::create_standby();
 
 	# Create different subdirs and files in master and standby
 
 	mkdir "$test_standby_datadir/tst_standby_dir";
-	append_to_file "$test_standby_datadir/tst_standby_dir/standby_file1", "in standby1";
-	append_to_file "$test_standby_datadir/tst_standby_dir/standby_file2", "in standby2";
+	append_to_file "$test_standby_datadir/tst_standby_dir/standby_file1",
+	  "in standby1";
+	append_to_file "$test_standby_datadir/tst_standby_dir/standby_file2",
+	  "in standby2";
 	mkdir "$test_standby_datadir/tst_standby_dir/standby_subdir/";
-	append_to_file "$test_standby_datadir/tst_standby_dir/standby_subdir/standby_file3", "in standby3";
+	append_to_file
+	  "$test_standby_datadir/tst_standby_dir/standby_subdir/standby_file3",
+	  "in standby3";
 
 	mkdir "$test_master_datadir/tst_master_dir";
-	append_to_file "$test_master_datadir/tst_master_dir/master_file1", "in master1";
-	append_to_file "$test_master_datadir/tst_master_dir/master_file2", "in master2";
+	append_to_file "$test_master_datadir/tst_master_dir/master_file1",
+	  "in master1";
+	append_to_file "$test_master_datadir/tst_master_dir/master_file2",
+	  "in master2";
 	mkdir "$test_master_datadir/tst_master_dir/master_subdir/";
-	append_to_file "$test_master_datadir/tst_master_dir/master_subdir/master_file3", "in master3";
+	append_to_file
+	  "$test_master_datadir/tst_master_dir/master_subdir/master_file3",
+	  "in master3";
 
 	RewindTest::promote_standby();
 	RewindTest::run_pg_rewind($test_mode);
 
 	# List files in the data directory after rewind.
 	my @paths;
-	find(sub {push @paths, $File::Find::name if $File::Find::name =~ m/.*tst_.*/},
-		 $test_master_datadir);
+	find(
+		sub {
+			push @paths, $File::Find::name
+			  if $File::Find::name =~ m/.*tst_.*/;
+		},
+		$test_master_datadir);
 	@paths = sort @paths;
-	is_deeply(\@paths,
-			  ["$test_master_datadir/tst_both_dir",
-			   "$test_master_datadir/tst_both_dir/both_file1",
-			   "$test_master_datadir/tst_both_dir/both_file2",
-			   "$test_master_datadir/tst_both_dir/both_subdir",
-			   "$test_master_datadir/tst_both_dir/both_subdir/both_file3",
-			   "$test_master_datadir/tst_standby_dir",
-			   "$test_master_datadir/tst_standby_dir/standby_file1",
-			   "$test_master_datadir/tst_standby_dir/standby_file2",
-			   "$test_master_datadir/tst_standby_dir/standby_subdir",
-			   "$test_master_datadir/tst_standby_dir/standby_subdir/standby_file3"],
-			  "file lists match");
+	is_deeply(
+		\@paths,
+		[   "$test_master_datadir/tst_both_dir",
+			"$test_master_datadir/tst_both_dir/both_file1",
+			"$test_master_datadir/tst_both_dir/both_file2",
+			"$test_master_datadir/tst_both_dir/both_subdir",
+			"$test_master_datadir/tst_both_dir/both_subdir/both_file3",
+			"$test_master_datadir/tst_standby_dir",
+			"$test_master_datadir/tst_standby_dir/standby_file1",
+			"$test_master_datadir/tst_standby_dir/standby_file2",
+			"$test_master_datadir/tst_standby_dir/standby_subdir",
+"$test_master_datadir/tst_standby_dir/standby_subdir/standby_file3" ],
+		"file lists match");
 
 	RewindTest::clean_rewind_test();
 }
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 99c66be7fb4..5a91871c359 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -317,16 +317,16 @@ equivalent_locale(int category, const char *loca, const char *locb)
 	int			lenb;
 
 	/*
-	 * If the names are equal, the locales are equivalent. Checking this
-	 * first avoids calling setlocale() in the common case that the names
-	 * are equal. That's a good thing, if setlocale() is buggy, for example.
+	 * If the names are equal, the locales are equivalent. Checking this first
+	 * avoids calling setlocale() in the common case that the names are equal.
+	 * That's a good thing, if setlocale() is buggy, for example.
 	 */
 	if (pg_strcasecmp(loca, locb) == 0)
 		return true;
 
 	/*
-	 * Not identical. Canonicalize both names, remove the encoding parts,
-	 * and try again.
+	 * Not identical. Canonicalize both names, remove the encoding parts, and
+	 * try again.
 	 */
 	canona = get_canonical_locale_name(category, loca);
 	chara = strrchr(canona, '.');
@@ -512,7 +512,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
 		{
 			/* reproduce warning from CREATE TABLESPACE that is in the log */
 			pg_log(PG_WARNING,
-				"\nWARNING:  user-defined tablespace locations should not be inside the data directory, e.g. %s\n", old_tablespace_dir);
+				   "\nWARNING:  user-defined tablespace locations should not be inside the data directory, e.g. %s\n", old_tablespace_dir);
 
 			/* Unlink file in case it is left over from a previous run. */
 			unlink(*deletion_script_file_name);
@@ -611,8 +611,8 @@ check_is_install_user(ClusterInfo *cluster)
 
 	/*
 	 * We only allow the install user in the new cluster (see comment below)
-	 * and we preserve pg_authid.oid, so this must be the install user in
-	 * the old cluster too.
+	 * and we preserve pg_authid.oid, so this must be the install user in the
+	 * old cluster too.
 	 */
 	if (PQntuples(res) != 1 ||
 		atooid(PQgetvalue(res, 0, 1)) != BOOTSTRAP_SUPERUSERID)
@@ -681,10 +681,13 @@ check_proper_datallowconn(ClusterInfo *cluster)
 		}
 		else
 		{
-			/* avoid datallowconn == false databases from being skipped on restore */
+			/*
+			 * avoid datallowconn == false databases from being skipped on
+			 * restore
+			 */
 			if (strcmp(datallowconn, "f") == 0)
 				pg_fatal("All non-template0 databases must allow connections, "
-						 "i.e. their pg_database.datallowconn must be true\n");
+					   "i.e. their pg_database.datallowconn must be true\n");
 		}
 	}
 
@@ -873,7 +876,7 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
 		"			'pg_catalog.regconfig'::pg_catalog.regtype, "
 								"			'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
 								"		c.relnamespace = n.oid AND "
-							  "		n.nspname NOT IN ('pg_catalog', 'information_schema')");
+								"		n.nspname NOT IN ('pg_catalog', 'information_schema')");
 
 		ntups = PQntuples(res);
 		i_nspname = PQfnumber(res, "nspname");
@@ -964,7 +967,7 @@ check_for_jsonb_9_4_usage(ClusterInfo *cluster)
 								"		c.relnamespace = n.oid AND "
 		/* exclude possible orphaned temp tables */
 								"  		n.nspname !~ '^pg_temp_' AND "
-							  "		n.nspname NOT IN ('pg_catalog', 'information_schema')");
+								"		n.nspname NOT IN ('pg_catalog', 'information_schema')");
 
 		ntups = PQntuples(res);
 		i_nspname = PQfnumber(res, "nspname");
@@ -999,7 +1002,7 @@ check_for_jsonb_9_4_usage(ClusterInfo *cluster)
 	{
 		pg_log(PG_REPORT, "fatal\n");
 		pg_fatal("Your installation contains one of the JSONB data types in user tables.\n"
-		 "The internal format of JSONB changed during 9.4 beta so this cluster cannot currently\n"
+				 "The internal format of JSONB changed during 9.4 beta so this cluster cannot currently\n"
 				 "be upgraded.  You can remove the problem tables and restart the upgrade.  A list\n"
 				 "of the problem columns is in the file:\n"
 				 "    %s\n\n", output_path);
diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c
index 2c20e847ac0..6d6f84d7252 100644
--- a/src/bin/pg_upgrade/dump.c
+++ b/src/bin/pg_upgrade/dump.c
@@ -111,7 +111,7 @@ optionally_create_toast_tables(void)
 								"FROM	pg_catalog.pg_class c, "
 								"		pg_catalog.pg_namespace n "
 								"WHERE	c.relnamespace = n.oid AND "
-							  "		n.nspname NOT IN ('pg_catalog', 'information_schema') AND "
+								"		n.nspname NOT IN ('pg_catalog', 'information_schema') AND "
 								"c.relkind IN ('r', 'm') AND "
 								"c.reltoastrelid = 0");
 
@@ -122,12 +122,12 @@ optionally_create_toast_tables(void)
 		{
 			/* enable auto-oid-numbered TOAST creation if needed */
 			PQclear(executeQueryOrDie(conn, "SELECT pg_catalog.binary_upgrade_set_next_toast_pg_class_oid('%d'::pg_catalog.oid);",
-					OPTIONALLY_CREATE_TOAST_OID));
+									  OPTIONALLY_CREATE_TOAST_OID));
 
 			/* dummy command that also triggers check for required TOAST table */
 			PQclear(executeQueryOrDie(conn, "ALTER TABLE %s.%s RESET (binary_upgrade_dummy_option);",
-					quote_identifier(PQgetvalue(res, rowno, i_nspname)),
-					quote_identifier(PQgetvalue(res, rowno, i_relname))));
+						 quote_identifier(PQgetvalue(res, rowno, i_nspname)),
+					   quote_identifier(PQgetvalue(res, rowno, i_relname))));
 		}
 
 		PQclear(res);
diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c
index c0a56012090..e158c9ff8b0 100644
--- a/src/bin/pg_upgrade/info.c
+++ b/src/bin/pg_upgrade/info.c
@@ -38,16 +38,16 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
 				 int *nmaps, const char *old_pgdata, const char *new_pgdata)
 {
 	FileNameMap *maps;
-	int			old_relnum, new_relnum;
+	int			old_relnum,
+				new_relnum;
 	int			num_maps = 0;
 
 	maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) *
 									 old_db->rel_arr.nrels);
 
 	/*
-	 * The old database shouldn't have more relations than the new one.
-	 * We force the new cluster to have a TOAST table if the old table
-	 * had one.
+	 * The old database shouldn't have more relations than the new one. We
+	 * force the new cluster to have a TOAST table if the old table had one.
 	 */
 	if (old_db->rel_arr.nrels > new_db->rel_arr.nrels)
 		pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n",
@@ -62,15 +62,15 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
 
 		/*
 		 * It is possible that the new cluster has a TOAST table for a table
-		 * that didn't need one in the old cluster, e.g. 9.0 to 9.1 changed the
-		 * NUMERIC length computation.  Therefore, if we have a TOAST table
-		 * in the new cluster that doesn't match, skip over it and continue
-		 * processing.  It is possible this TOAST table used an OID that was
-		 * reserved in the old cluster, but we have no way of testing that,
-		 * and we would have already gotten an error at the new cluster schema
-		 * creation stage.  Fortunately, since we only restore the OID counter
-		 * after schema restore, and restore in OID order via pg_dump, a
-		 * conflict would only happen if the new TOAST table had a very low
+		 * that didn't need one in the old cluster, e.g. 9.0 to 9.1 changed
+		 * the NUMERIC length computation.  Therefore, if we have a TOAST
+		 * table in the new cluster that doesn't match, skip over it and
+		 * continue processing.  It is possible this TOAST table used an OID
+		 * that was reserved in the old cluster, but we have no way of testing
+		 * that, and we would have already gotten an error at the new cluster
+		 * schema creation stage.  Fortunately, since we only restore the OID
+		 * counter after schema restore, and restore in OID order via pg_dump,
+		 * a conflict would only happen if the new TOAST table had a very low
 		 * OID.  However, TOAST tables created long after initial table
 		 * creation can have any OID, particularly after OID wraparound.
 		 */
@@ -330,75 +330,77 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
 	 */
 
 	snprintf(query, sizeof(query),
-		/* get regular heap */
-			"WITH regular_heap (reloid) AS ( "
-			"	SELECT c.oid "
-			"	FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n "
-			"		   ON c.relnamespace = n.oid "
-			"	LEFT OUTER JOIN pg_catalog.pg_index i "
-			"		   ON c.oid = i.indexrelid "
-			"	WHERE relkind IN ('r', 'm', 'i', 'S') AND "
-		/*
-		 * pg_dump only dumps valid indexes;  testing indisready is necessary in
-		 * 9.2, and harmless in earlier/later versions.
-		 */
-			"		i.indisvalid IS DISTINCT FROM false AND "
-			"		i.indisready IS DISTINCT FROM false AND "
-		/* exclude possible orphaned temp tables */
-			"	  ((n.nspname !~ '^pg_temp_' AND "
-			"	    n.nspname !~ '^pg_toast_temp_' AND "
-		/* skip pg_toast because toast index have relkind == 'i', not 't' */
-			"	    n.nspname NOT IN ('pg_catalog', 'information_schema', "
-			"							'binary_upgrade', 'pg_toast') AND "
-			"		  c.oid >= %u) OR "
-			"	  (n.nspname = 'pg_catalog' AND "
-	"    relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ))), "
-		/*
-		 * We have to gather the TOAST tables in later steps because we
-		 * can't schema-qualify TOAST tables.
-		 */
-		 /* get TOAST heap */
-			"	toast_heap (reloid) AS ( "
-			"	SELECT reltoastrelid "
-			"	FROM regular_heap JOIN pg_catalog.pg_class c "
-			"		ON regular_heap.reloid = c.oid "
-			"		AND c.reltoastrelid != %u), "
-		 /* get indexes on regular and TOAST heap */
-			"	all_index (reloid) AS ( "
-			"	SELECT indexrelid "
-			"	FROM pg_index "
-			"	WHERE indisvalid "
-			"    AND indrelid IN (SELECT reltoastrelid "
-			"        FROM (SELECT reloid FROM regular_heap "
-			"			   UNION ALL "
-			"			   SELECT reloid FROM toast_heap) all_heap "
-			"            JOIN pg_catalog.pg_class c "
-			"            ON all_heap.reloid = c.oid "
-			"            AND c.reltoastrelid != %u)) "
-		/* get all rels */
-			"SELECT c.oid, n.nspname, c.relname, "
-			"	c.relfilenode, c.reltablespace, %s "
-			"FROM (SELECT reloid FROM regular_heap "
-			"	   UNION ALL "
-			"	   SELECT reloid FROM toast_heap  "
-			"	   UNION ALL "
-			"	   SELECT reloid FROM all_index) all_rels "
-			"  JOIN pg_catalog.pg_class c "
-			"		ON all_rels.reloid = c.oid "
-			"  JOIN pg_catalog.pg_namespace n "
-			"	   ON c.relnamespace = n.oid "
-			"  LEFT OUTER JOIN pg_catalog.pg_tablespace t "
-			"	   ON c.reltablespace = t.oid "
+	/* get regular heap */
+			 "WITH regular_heap (reloid) AS ( "
+			 "	SELECT c.oid "
+			 "	FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n "
+			 "		   ON c.relnamespace = n.oid "
+			 "	LEFT OUTER JOIN pg_catalog.pg_index i "
+			 "		   ON c.oid = i.indexrelid "
+			 "	WHERE relkind IN ('r', 'm', 'i', 'S') AND "
+
+	/*
+	 * pg_dump only dumps valid indexes;  testing indisready is necessary in
+	 * 9.2, and harmless in earlier/later versions.
+	 */
+			 "		i.indisvalid IS DISTINCT FROM false AND "
+			 "		i.indisready IS DISTINCT FROM false AND "
+	/* exclude possible orphaned temp tables */
+			 "	  ((n.nspname !~ '^pg_temp_' AND "
+			 "	    n.nspname !~ '^pg_toast_temp_' AND "
+	/* skip pg_toast because toast index have relkind == 'i', not 't' */
+			 "	    n.nspname NOT IN ('pg_catalog', 'information_schema', "
+			 "							'binary_upgrade', 'pg_toast') AND "
+			 "		  c.oid >= %u) OR "
+			 "	  (n.nspname = 'pg_catalog' AND "
+			 "    relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ))), "
+
+	/*
+	 * We have to gather the TOAST tables in later steps because we can't
+	 * schema-qualify TOAST tables.
+	 */
+	/* get TOAST heap */
+			 "	toast_heap (reloid) AS ( "
+			 "	SELECT reltoastrelid "
+			 "	FROM regular_heap JOIN pg_catalog.pg_class c "
+			 "		ON regular_heap.reloid = c.oid "
+			 "		AND c.reltoastrelid != %u), "
+	/* get indexes on regular and TOAST heap */
+			 "	all_index (reloid) AS ( "
+			 "	SELECT indexrelid "
+			 "	FROM pg_index "
+			 "	WHERE indisvalid "
+			 "    AND indrelid IN (SELECT reltoastrelid "
+			 "        FROM (SELECT reloid FROM regular_heap "
+			 "			   UNION ALL "
+			 "			   SELECT reloid FROM toast_heap) all_heap "
+			 "            JOIN pg_catalog.pg_class c "
+			 "            ON all_heap.reloid = c.oid "
+			 "            AND c.reltoastrelid != %u)) "
+	/* get all rels */
+			 "SELECT c.oid, n.nspname, c.relname, "
+			 "	c.relfilenode, c.reltablespace, %s "
+			 "FROM (SELECT reloid FROM regular_heap "
+			 "	   UNION ALL "
+			 "	   SELECT reloid FROM toast_heap  "
+			 "	   UNION ALL "
+			 "	   SELECT reloid FROM all_index) all_rels "
+			 "  JOIN pg_catalog.pg_class c "
+			 "		ON all_rels.reloid = c.oid "
+			 "  JOIN pg_catalog.pg_namespace n "
+			 "	   ON c.relnamespace = n.oid "
+			 "  LEFT OUTER JOIN pg_catalog.pg_tablespace t "
+			 "	   ON c.reltablespace = t.oid "
 	/* we preserve pg_class.oid so we sort by it to match old/new */
-			"ORDER BY 1;",
-			FirstNormalObjectId,
+			 "ORDER BY 1;",
+			 FirstNormalObjectId,
 	/* does pg_largeobject_metadata need to be migrated? */
-			(GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ?
-	"" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'",
-	InvalidOid, InvalidOid,
+			 (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ?
+	 "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'",
+			 InvalidOid, InvalidOid,
 	/* 9.2 removed the spclocation column */
-			(GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
-			"t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
+			 (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
+			 "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
 
 	res = executeQueryOrDie(conn, "%s", query);
 
diff --git a/src/bin/pg_upgrade/option.c b/src/bin/pg_upgrade/option.c
index b8510561350..90f1401549b 100644
--- a/src/bin/pg_upgrade/option.c
+++ b/src/bin/pg_upgrade/option.c
@@ -142,7 +142,7 @@ parseCommandLine(int argc, char *argv[])
 					old_cluster.pgopts = pg_strdup(optarg);
 				else
 				{
-					char *old_pgopts = old_cluster.pgopts;
+					char	   *old_pgopts = old_cluster.pgopts;
 
 					old_cluster.pgopts = psprintf("%s %s", old_pgopts, optarg);
 					free(old_pgopts);
@@ -155,7 +155,7 @@ parseCommandLine(int argc, char *argv[])
 					new_cluster.pgopts = pg_strdup(optarg);
 				else
 				{
-					char *new_pgopts = new_cluster.pgopts;
+					char	   *new_pgopts = new_cluster.pgopts;
 
 					new_cluster.pgopts = psprintf("%s %s", new_pgopts, optarg);
 					free(new_pgopts);
@@ -249,13 +249,15 @@ parseCommandLine(int argc, char *argv[])
 							 "PGDATANEW", "-D", "new cluster data resides");
 
 #ifdef WIN32
+
 	/*
 	 * On Windows, initdb --sync-only will fail with a "Permission denied"
-	 * error on file pg_upgrade_utility.log if pg_upgrade is run inside
-	 * the new cluster directory, so we do a check here.
+	 * error on file pg_upgrade_utility.log if pg_upgrade is run inside the
+	 * new cluster directory, so we do a check here.
 	 */
 	{
-		char	cwd[MAXPGPATH], new_cluster_pgdata[MAXPGPATH];
+		char		cwd[MAXPGPATH],
+					new_cluster_pgdata[MAXPGPATH];
 
 		strlcpy(new_cluster_pgdata, new_cluster.pgdata, MAXPGPATH);
 		canonicalize_path(new_cluster_pgdata);
diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c
index 4e6a9f91be6..8cdfaf35eff 100644
--- a/src/bin/pg_upgrade/pg_upgrade.c
+++ b/src/bin/pg_upgrade/pg_upgrade.c
@@ -333,8 +333,8 @@ create_new_objects(void)
 	check_ok();
 
 	/*
-	 * We don't have minmxids for databases or relations in pre-9.3
-	 * clusters, so set those after we have restores the schemas.
+	 * We don't have minmxids for databases or relations in pre-9.3 clusters,
+	 * so set those after we have restores the schemas.
 	 */
 	if (GET_MAJOR_VERSION(old_cluster.major_version) < 903)
 		set_frozenxids(true);
@@ -473,7 +473,7 @@ copy_clog_xlog_xid(void)
 	/* now reset the wal archives in the new cluster */
 	prep_status("Resetting WAL archives");
 	exec_prog(UTILITY_LOG_FILE, NULL, true,
-			  /* use timeline 1 to match controldata and no WAL history file */
+	/* use timeline 1 to match controldata and no WAL history file */
 			  "\"%s/pg_resetxlog\" -l 00000001%s \"%s\"", new_cluster.bindir,
 			  old_cluster.controldata.nextxlogfile + 8,
 			  new_cluster.pgdata);
diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h
index aecf0df30c2..13aa891d59d 100644
--- a/src/bin/pg_upgrade/pg_upgrade.h
+++ b/src/bin/pg_upgrade/pg_upgrade.h
@@ -329,7 +329,7 @@ extern OSInfo os_info;
 /* check.c */
 
 void		output_check_banner(bool live_check);
-void check_and_dump_old_cluster(bool live_check);
+void		check_and_dump_old_cluster(bool live_check);
 void		check_new_cluster(void);
 void		report_clusters_compatible(void);
 void		issue_warnings(void);
@@ -358,7 +358,7 @@ void		optionally_create_toast_tables(void);
 
 #define EXEC_PSQL_ARGS "--echo-queries --set ON_ERROR_STOP=on --no-psqlrc --dbname=template1"
 
-bool		exec_prog(const char *log_file, const char *opt_log_file,
+bool exec_prog(const char *log_file, const char *opt_log_file,
 		  bool throw_error, const char *fmt,...) pg_attribute_printf(4, 5);
 void		verify_directories(void);
 bool		pid_lock_file_exists(const char *datadir);
@@ -471,7 +471,7 @@ void		pg_putenv(const char *var, const char *val);
 
 void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster,
 										 bool check_mode);
-void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster);
+void		old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster);
 
 /* parallel.c */
 void parallel_exec_prog(const char *log_file, const char *opt_log_file,
diff --git a/src/bin/pg_upgrade/relfilenode.c b/src/bin/pg_upgrade/relfilenode.c
index 7b3215af566..c22df429492 100644
--- a/src/bin/pg_upgrade/relfilenode.c
+++ b/src/bin/pg_upgrade/relfilenode.c
@@ -35,10 +35,10 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
 	  user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
 
 	/*
-	 * Transferring files by tablespace is tricky because a single database can
-	 * use multiple tablespaces.  For non-parallel mode, we just pass a NULL
-	 * tablespace path, which matches all tablespaces.  In parallel mode, we
-	 * pass the default tablespace and all user-created tablespaces and let
+	 * Transferring files by tablespace is tricky because a single database
+	 * can use multiple tablespaces.  For non-parallel mode, we just pass a
+	 * NULL tablespace path, which matches all tablespaces.  In parallel mode,
+	 * we pass the default tablespace and all user-created tablespaces and let
 	 * those operations happen in parallel.
 	 */
 	if (user_opts.jobs <= 1)
diff --git a/src/bin/pg_upgrade/server.c b/src/bin/pg_upgrade/server.c
index 8d8e7d70734..8c6b6da5153 100644
--- a/src/bin/pg_upgrade/server.c
+++ b/src/bin/pg_upgrade/server.c
@@ -204,11 +204,12 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
 	/*
 	 * Since PG 9.1, we have used -b to disable autovacuum.  For earlier
 	 * releases, setting autovacuum=off disables cleanup vacuum and analyze,
-	 * but freeze vacuums can still happen, so we set autovacuum_freeze_max_age
-	 * to its maximum.  (autovacuum_multixact_freeze_max_age was introduced
-	 * after 9.1, so there is no need to set that.)  We assume all datfrozenxid
-	 * and relfrozenxid values are less than a gap of 2000000000 from the current
-	 * xid counter, so autovacuum will not touch them.
+	 * but freeze vacuums can still happen, so we set
+	 * autovacuum_freeze_max_age to its maximum.
+	 * (autovacuum_multixact_freeze_max_age was introduced after 9.1, so there
+	 * is no need to set that.)  We assume all datfrozenxid and relfrozenxid
+	 * values are less than a gap of 2000000000 from the current xid counter,
+	 * so autovacuum will not touch them.
 	 *
 	 * Turn off durability requirements to improve object creation speed, and
 	 * we only modify the new cluster, so only use it there.  If there is a
diff --git a/src/bin/pg_upgrade/version.c b/src/bin/pg_upgrade/version.c
index e3e7387c92d..9954daea17e 100644
--- a/src/bin/pg_upgrade/version.c
+++ b/src/bin/pg_upgrade/version.c
@@ -167,9 +167,9 @@ old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster)
 	{
 		pg_log(PG_REPORT, "fatal\n");
 		pg_fatal("Your installation contains the \"line\" data type in user tables.  This\n"
-		"data type changed its internal and input/output format between your old\n"
+				 "data type changed its internal and input/output format between your old\n"
 				 "and new clusters so this cluster cannot currently be upgraded.  You can\n"
-		"remove the problem tables and restart the upgrade.  A list of the problem\n"
+				 "remove the problem tables and restart the upgrade.  A list of the problem\n"
 				 "columns is in the file:\n"
 				 "    %s\n\n", output_path);
 	}
diff --git a/src/bin/pg_xlogdump/pg_xlogdump.c b/src/bin/pg_xlogdump/pg_xlogdump.c
index e9cbbd264de..c0a68167844 100644
--- a/src/bin/pg_xlogdump/pg_xlogdump.c
+++ b/src/bin/pg_xlogdump/pg_xlogdump.c
@@ -494,7 +494,10 @@ XLogDumpStatsRow(const char *name,
 				 uint64 fpi_len, uint64 total_fpi_len,
 				 uint64 tot_len, uint64 total_len)
 {
-	double		n_pct, rec_len_pct, fpi_len_pct, tot_len_pct;
+	double		n_pct,
+				rec_len_pct,
+				fpi_len_pct,
+				tot_len_pct;
 
 	n_pct = 0;
 	if (total_count != 0)
@@ -528,12 +531,14 @@ XLogDumpStatsRow(const char *name,
 static void
 XLogDumpDisplayStats(XLogDumpConfig *config, XLogDumpStats *stats)
 {
-	int			ri, rj;
+	int			ri,
+				rj;
 	uint64		total_count = 0;
 	uint64		total_rec_len = 0;
 	uint64		total_fpi_len = 0;
 	uint64		total_len = 0;
-	double		rec_len_pct, fpi_len_pct;
+	double		rec_len_pct,
+				fpi_len_pct;
 
 	/* ---
 	 * Make a first pass to calculate column totals:
@@ -551,11 +556,11 @@ XLogDumpDisplayStats(XLogDumpConfig *config, XLogDumpStats *stats)
 		total_rec_len += stats->rmgr_stats[ri].rec_len;
 		total_fpi_len += stats->rmgr_stats[ri].fpi_len;
 	}
-	total_len = total_rec_len+total_fpi_len;
+	total_len = total_rec_len + total_fpi_len;
 
 	/*
-	 * 27 is strlen("Transaction/COMMIT_PREPARED"),
-	 * 20 is strlen(2^64), 8 is strlen("(100.00%)")
+	 * 27 is strlen("Transaction/COMMIT_PREPARED"), 20 is strlen(2^64), 8 is
+	 * strlen("(100.00%)")
 	 */
 
 	printf("%-27s %20s %8s %20s %8s %20s %8s %20s %8s\n"
@@ -565,7 +570,10 @@ XLogDumpDisplayStats(XLogDumpConfig *config, XLogDumpStats *stats)
 
 	for (ri = 0; ri < RM_NEXT_ID; ri++)
 	{
-		uint64		count, rec_len, fpi_len, tot_len;
+		uint64		count,
+					rec_len,
+					fpi_len,
+					tot_len;
 		const RmgrDescData *desc = &RmgrDescTable[ri];
 
 		if (!config->stats_per_record)
@@ -610,10 +618,10 @@ XLogDumpDisplayStats(XLogDumpConfig *config, XLogDumpStats *stats)
 		   "", "--------", "", "--------", "", "--------", "", "--------");
 
 	/*
-	 * The percentages in earlier rows were calculated against the
-	 * column total, but the ones that follow are against the row total.
-	 * Note that these are displayed with a % symbol to differentiate
-	 * them from the earlier ones, and are thus up to 9 characters long.
+	 * The percentages in earlier rows were calculated against the column
+	 * total, but the ones that follow are against the row total. Note that
+	 * these are displayed with a % symbol to differentiate them from the
+	 * earlier ones, and are thus up to 9 characters long.
 	 */
 
 	rec_len_pct = 0;
diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 8b8b5911d63..6f35db4763b 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -100,7 +100,7 @@ static int	pthread_join(pthread_t th, void **thread_return);
 #define LOG_STEP_SECONDS	5	/* seconds between log messages */
 #define DEFAULT_NXACTS	10		/* default nxacts */
 
-#define MIN_GAUSSIAN_THRESHOLD		2.0	/* minimum threshold for gauss */
+#define MIN_GAUSSIAN_THRESHOLD		2.0 /* minimum threshold for gauss */
 
 int			nxacts = 0;			/* number of transactions per client */
 int			duration = 0;		/* duration in seconds */
@@ -244,7 +244,8 @@ typedef struct
 	int64		throttle_trigger;		/* previous/next throttling (us) */
 	int64		throttle_lag;	/* total transaction lag behind throttling */
 	int64		throttle_lag_max;		/* max transaction lag */
-	int64		throttle_latency_skipped; /* lagging transactions skipped */
+	int64		throttle_latency_skipped;		/* lagging transactions
+												 * skipped */
 	int64		latency_late;	/* late transactions */
 } TState;
 
@@ -296,8 +297,8 @@ typedef struct
 
 	long		start_time;		/* when does the interval start */
 	int			cnt;			/* number of transactions */
-	int			skipped;		/* number of transactions skipped under
-								 * --rate and --latency-limit */
+	int			skipped;		/* number of transactions skipped under --rate
+								 * and --latency-limit */
 
 	double		min_latency;	/* min/max latencies */
 	double		max_latency;
@@ -389,7 +390,7 @@ usage(void)
 		 "  -f, --file=FILENAME      read transaction script from FILENAME\n"
 		   "  -j, --jobs=NUM           number of threads (default: 1)\n"
 		   "  -l, --log                write transaction times to log file\n"
-		   "  -L, --latency-limit=NUM  count transactions lasting more than NUM ms\n"
+	"  -L, --latency-limit=NUM  count transactions lasting more than NUM ms\n"
 		   "                           as late.\n"
 		   "  -M, --protocol=simple|extended|prepared\n"
 		   "                           protocol for submitting queries (default: simple)\n"
@@ -509,19 +510,22 @@ getrand(TState *thread, int64 min, int64 max)
 static int64
 getExponentialRand(TState *thread, int64 min, int64 max, double threshold)
 {
-	double cut, uniform, rand;
+	double		cut,
+				uniform,
+				rand;
+
 	Assert(threshold > 0.0);
 	cut = exp(-threshold);
 	/* erand in [0, 1), uniform in (0, 1] */
 	uniform = 1.0 - pg_erand48(thread->random_state);
+
 	/*
-	 * inner expresion in (cut, 1] (if threshold > 0),
-	 * rand in [0, 1)
+	 * inner expresion in (cut, 1] (if threshold > 0), rand in [0, 1)
 	 */
 	Assert((1.0 - cut) != 0.0);
-	rand = - log(cut + (1.0 - cut) * uniform) / threshold;
+	rand = -log(cut + (1.0 - cut) * uniform) / threshold;
 	/* return int64 random number within between min and max */
-	return min + (int64)((max - min + 1) * rand);
+	return min + (int64) ((max - min + 1) * rand);
 }
 
 /* random number generator: gaussian distribution from min to max inclusive */
@@ -532,34 +536,37 @@ getGaussianRand(TState *thread, int64 min, int64 max, double threshold)
 	double		rand;
 
 	/*
-	 * Get user specified random number from this loop, with
-	 * -threshold < stdev <= threshold
+	 * Get user specified random number from this loop, with -threshold <
+	 * stdev <= threshold
 	 *
 	 * This loop is executed until the number is in the expected range.
 	 *
 	 * As the minimum threshold is 2.0, the probability of looping is low:
-	 * sqrt(-2 ln(r)) <= 2 => r >= e^{-2} ~ 0.135, then when taking the average
-	 * sinus multiplier as 2/pi, we have a 8.6% looping probability in the
-	 * worst case. For a 5.0 threshold value, the looping probability
-	 * is about e^{-5} * 2 / pi ~ 0.43%.
+	 * sqrt(-2 ln(r)) <= 2 => r >= e^{-2} ~ 0.135, then when taking the
+	 * average sinus multiplier as 2/pi, we have a 8.6% looping probability in
+	 * the worst case. For a 5.0 threshold value, the looping probability is
+	 * about e^{-5} * 2 / pi ~ 0.43%.
 	 */
 	do
 	{
 		/*
 		 * pg_erand48 generates [0,1), but for the basic version of the
 		 * Box-Muller transform the two uniformly distributed random numbers
-		 * are expected in (0, 1] (see http://en.wikipedia.org/wiki/Box_muller)
+		 * are expected in (0, 1] (see
+		 * http://en.wikipedia.org/wiki/Box_muller)
 		 */
-		double rand1 = 1.0 - pg_erand48(thread->random_state);
-		double rand2 = 1.0 - pg_erand48(thread->random_state);
+		double		rand1 = 1.0 - pg_erand48(thread->random_state);
+		double		rand2 = 1.0 - pg_erand48(thread->random_state);
 
 		/* Box-Muller basic form transform */
-		double var_sqrt = sqrt(-2.0 * log(rand1));
+		double		var_sqrt = sqrt(-2.0 * log(rand1));
+
 		stdev = var_sqrt * sin(2.0 * M_PI * rand2);
 
 		/*
-		 * we may try with cos, but there may be a bias induced if the previous
-		 * value fails the test. To be on the safe side, let us try over.
+		 * we may try with cos, but there may be a bias induced if the
+		 * previous value fails the test. To be on the safe side, let us try
+		 * over.
 		 */
 	}
 	while (stdev < -threshold || stdev >= threshold);
@@ -568,7 +575,7 @@ getGaussianRand(TState *thread, int64 min, int64 max, double threshold)
 	rand = (stdev + threshold) / (threshold * 2.0);
 
 	/* return int64 random number within between min and max */
-	return min + (int64)((max - min + 1) * rand);
+	return min + (int64) ((max - min + 1) * rand);
 }
 
 /*
@@ -582,7 +589,7 @@ getPoissonRand(TState *thread, int64 center)
 	 * Use inverse transform sampling to generate a value > 0, such that the
 	 * expected (i.e. average) value is the given argument.
 	 */
-	double uniform;
+	double		uniform;
 
 	/* erand in [0, 1), uniform in (0, 1] */
 	uniform = 1.0 - pg_erand48(thread->random_state);
@@ -918,7 +925,7 @@ evaluateExpr(CState *st, PgBenchExpr *expr, int64 *retval)
 				if ((var = getVariable(st, expr->u.variable.varname)) == NULL)
 				{
 					fprintf(stderr, "undefined variable %s\n",
-						expr->u.variable.varname);
+							expr->u.variable.varname);
 					return false;
 				}
 				*retval = strtoint64(var);
@@ -927,8 +934,8 @@ evaluateExpr(CState *st, PgBenchExpr *expr, int64 *retval)
 
 		case ENODE_OPERATOR:
 			{
-				int64	lval;
-				int64	rval;
+				int64		lval;
+				int64		rval;
 
 				if (!evaluateExpr(st, expr->u.operator.lexpr, &lval))
 					return false;
@@ -1115,7 +1122,7 @@ agg_vals_init(AggVals *aggs, instr_time start)
 	aggs->skipped = 0;			/* xacts skipped under --rate --latency-limit */
 
 	aggs->sum_latency = 0;		/* SUM(latency) */
-	aggs->sum2_latency = 0;				/* SUM(latency*latency) */
+	aggs->sum2_latency = 0;		/* SUM(latency*latency) */
 
 	/* min and max transaction duration */
 	aggs->min_latency = 0;
@@ -1535,9 +1542,10 @@ top:
 			/*
 			 * Generate random number functions need to be able to subtract
 			 * max from min and add one to the result without overflowing.
-			 * Since we know max > min, we can detect overflow just by checking
-			 * for a negative result. But we must check both that the subtraction
-			 * doesn't overflow, and that adding one to the result doesn't overflow either.
+			 * Since we know max > min, we can detect overflow just by
+			 * checking for a negative result. But we must check both that the
+			 * subtraction doesn't overflow, and that adding one to the result
+			 * doesn't overflow either.
 			 */
 			if (max - min < 0 || (max - min) + 1 < 0)
 			{
@@ -1546,7 +1554,7 @@ top:
 				return true;
 			}
 
-			if (argc == 4 || /* uniform without or with "uniform" keyword */
+			if (argc == 4 ||	/* uniform without or with "uniform" keyword */
 				(argc == 5 && pg_strcasecmp(argv[4], "uniform") == 0))
 			{
 #ifdef DEBUG
@@ -1598,7 +1606,7 @@ top:
 					snprintf(res, sizeof(res), INT64_FORMAT, getExponentialRand(thread, min, max, threshold));
 				}
 			}
-			else /* this means an error somewhere in the parsing phase... */
+			else	/* this means an error somewhere in the parsing phase... */
 			{
 				fprintf(stderr, "%s: unexpected arguments\n", argv[0]);
 				st->ecnt++;
@@ -1742,7 +1750,10 @@ doLog(TState *thread, CState *st, FILE *logfile, instr_time *now, AggVals *agg,
 			agg->cnt += 1;
 			if (skipped)
 			{
-				/* there is no latency to record if the transaction was skipped */
+				/*
+				 * there is no latency to record if the transaction was
+				 * skipped
+				 */
 				agg->skipped += 1;
 			}
 			else
@@ -1779,9 +1790,9 @@ doLog(TState *thread, CState *st, FILE *logfile, instr_time *now, AggVals *agg,
 			while (agg->start_time + agg_interval < INSTR_TIME_GET_DOUBLE(*now))
 			{
 				/*
-				 * This is a non-Windows branch (thanks to the
-				 * ifdef in usage), so we don't need to handle
-				 * this in a special way (see below).
+				 * This is a non-Windows branch (thanks to the ifdef in
+				 * usage), so we don't need to handle this in a special way
+				 * (see below).
 				 */
 				fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f",
 						agg->start_time,
@@ -2217,7 +2228,7 @@ syntax_error(const char *source, const int lineno,
 		fprintf(stderr, "%s\n", line);
 		if (column != -1)
 		{
-			int i;
+			int			i;
 
 			for (i = 0; i < column - 1; i++)
 				fprintf(stderr, " ");
@@ -2260,7 +2271,8 @@ process_commands(char *buf, const char *source, const int lineno)
 
 	if (*p == '\\')
 	{
-		int		max_args = -1;
+		int			max_args = -1;
+
 		my_commands->type = META_COMMAND;
 
 		j = 0;
@@ -2282,9 +2294,9 @@ process_commands(char *buf, const char *source, const int lineno)
 
 		if (pg_strcasecmp(my_commands->argv[0], "setrandom") == 0)
 		{
-			/* parsing:
-			 * \setrandom variable min max [uniform]
-			 * \setrandom variable min max (gaussian|exponential) threshold
+			/*
+			 * parsing: \setrandom variable min max [uniform] \setrandom
+			 * variable min max (gaussian|exponential) threshold
 			 */
 
 			if (my_commands->argc < 4)
@@ -2295,20 +2307,21 @@ process_commands(char *buf, const char *source, const int lineno)
 
 			/* argc >= 4 */
 
-			if (my_commands->argc == 4 || /* uniform without/with "uniform" keyword */
+			if (my_commands->argc == 4 ||		/* uniform without/with
+												 * "uniform" keyword */
 				(my_commands->argc == 5 &&
 				 pg_strcasecmp(my_commands->argv[4], "uniform") == 0))
 			{
 				/* nothing to do */
 			}
-			else if (/* argc >= 5 */
+			else if (			/* argc >= 5 */
 					 (pg_strcasecmp(my_commands->argv[4], "gaussian") == 0) ||
-					 (pg_strcasecmp(my_commands->argv[4], "exponential") == 0))
+				   (pg_strcasecmp(my_commands->argv[4], "exponential") == 0))
 			{
 				if (my_commands->argc < 6)
 				{
 					syntax_error(source, lineno, my_commands->line, my_commands->argv[0],
-								 "missing threshold argument", my_commands->argv[4], -1);
+					 "missing threshold argument", my_commands->argv[4], -1);
 				}
 				else if (my_commands->argc > 6)
 				{
@@ -2317,7 +2330,7 @@ process_commands(char *buf, const char *source, const int lineno)
 								 my_commands->cols[6]);
 				}
 			}
-			else /* cannot parse, unexpected arguments */
+			else	/* cannot parse, unexpected arguments */
 			{
 				syntax_error(source, lineno, my_commands->line, my_commands->argv[0],
 							 "unexpected argument", my_commands->argv[4],
@@ -2486,7 +2499,8 @@ process_file(char *filename)
 
 	Command   **my_commands;
 	FILE	   *fd;
-	int			lineno, index;
+	int			lineno,
+				index;
 	char	   *buf;
 	int			alloc_num;
 
@@ -2514,6 +2528,7 @@ process_file(char *filename)
 	while ((buf = read_line_from_file(fd)) != NULL)
 	{
 		Command    *command;
+
 		lineno += 1;
 
 		command = process_commands(buf, filename, lineno);
@@ -2547,7 +2562,8 @@ process_builtin(char *tb, const char *source)
 #define COMMANDS_ALLOC_NUM 128
 
 	Command   **my_commands;
-	int			lineno, index;
+	int			lineno,
+				index;
 	char		buf[BUFSIZ];
 	int			alloc_num;
 
@@ -2653,7 +2669,7 @@ printResults(int ttype, int64 normal_xacts, int nclients,
 	if (latency_limit)
 		printf("number of transactions above the %.1f ms latency limit: " INT64_FORMAT " (%.3f %%)\n",
 			   latency_limit / 1000.0, latency_late,
-			   100.0 * latency_late / (throttle_latency_skipped + normal_xacts));
+		   100.0 * latency_late / (throttle_latency_skipped + normal_xacts));
 
 	if (throttle_delay || progress || latency_limit)
 	{
@@ -3045,7 +3061,8 @@ main(int argc, char **argv)
 				break;
 			case 'L':
 				{
-					double limit_ms = atof(optarg);
+					double		limit_ms = atof(optarg);
+
 					if (limit_ms <= 0.0)
 					{
 						fprintf(stderr, "invalid latency limit: %s\n", optarg);
diff --git a/src/bin/pgbench/pgbench.h b/src/bin/pgbench/pgbench.h
index a3db6b97cc9..42e2aae2943 100644
--- a/src/bin/pgbench/pgbench.h
+++ b/src/bin/pgbench/pgbench.h
@@ -22,39 +22,39 @@ typedef struct PgBenchExpr PgBenchExpr;
 
 struct PgBenchExpr
 {
-	PgBenchExprType	etype;
+	PgBenchExprType etype;
 	union
 	{
 		struct
 		{
-			int64 ival;
-		} integer_constant;
+			int64		ival;
+		}			integer_constant;
 		struct
 		{
-			char *varname;
-		} variable;
+			char	   *varname;
+		}			variable;
 		struct
 		{
-			char operator;
-			PgBenchExpr	*lexpr;
+			char		operator;
+			PgBenchExpr *lexpr;
 			PgBenchExpr *rexpr;
-		} operator;
-	} u;
+		}			operator;
+	}			u;
 };
 
 extern PgBenchExpr *expr_parse_result;
 
-extern int      expr_yyparse(void);
-extern int      expr_yylex(void);
+extern int	expr_yyparse(void);
+extern int	expr_yylex(void);
 extern void expr_yyerror(const char *str);
 extern void expr_scanner_init(const char *str, const char *source,
-							  const int lineno, const char *line,
-							  const char *cmd, const int ecol);
-extern void syntax_error(const char* source, const int lineno, const char* line,
-						 const char* cmd, const char* msg, const char* more,
-						 const int col);
+				  const int lineno, const char *line,
+				  const char *cmd, const int ecol);
+extern void syntax_error(const char *source, const int lineno, const char *line,
+			 const char *cmd, const char *msg, const char *more,
+			 const int col);
 extern void expr_scanner_finish(void);
 
 extern int64 strtoint64(const char *str);
 
-#endif	/* PGBENCH_H */
+#endif   /* PGBENCH_H */
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 70b7d3be151..38253fa0988 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -1082,7 +1082,8 @@ exec_command(const char *cmd,
 
 			for (i = 0; my_list[i] != NULL; i++)
 			{
-				char   *val = pset_value_string(my_list[i], &pset.popt);
+				char	   *val = pset_value_string(my_list[i], &pset.popt);
+
 				printf("%-24s %s\n", my_list[i], val);
 				free(val);
 			}
@@ -1515,7 +1516,7 @@ exec_command(const char *cmd,
 	else if (strcmp(cmd, "?") == 0)
 	{
 		char	   *opt0 = psql_scan_slash_option(scan_state,
-													OT_NORMAL, NULL, false);
+												  OT_NORMAL, NULL, false);
 
 		if (!opt0 || strcmp(opt0, "commands") == 0)
 			slashUsage(pset.popt.topt.pager);
@@ -1636,8 +1637,8 @@ do_connect(char *dbname, char *user, char *host, char *port)
 
 	/*
 	 * Any change in the parameters read above makes us discard the password.
-	 * We also discard it if we're to use a conninfo rather than the positional
-	 * syntax.
+	 * We also discard it if we're to use a conninfo rather than the
+	 * positional syntax.
 	 */
 	keep_password =
 		((strcmp(user, PQuser(o_conn)) == 0) &&
@@ -1863,7 +1864,7 @@ printSSLInfo(void)
 		   protocol ? protocol : _("unknown"),
 		   cipher ? cipher : _("unknown"),
 		   bits ? bits : _("unknown"),
-		   (compression && strcmp(compression, "off") != 0) ? _("on") : _("off"));
+	  (compression && strcmp(compression, "off") != 0) ? _("on") : _("off"));
 }
 
 
@@ -2402,7 +2403,7 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet)
 		if (!value)
 			;
 		else if (!set_unicode_line_style(popt, value, vallen,
-										 &popt->topt.unicode_border_linestyle))
+									   &popt->topt.unicode_border_linestyle))
 		{
 			psql_error("\\pset: allowed unicode border linestyle are single, double\n");
 			return false;
@@ -2415,7 +2416,7 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet)
 		if (!value)
 			;
 		else if (!set_unicode_line_style(popt, value, vallen,
-										 &popt->topt.unicode_column_linestyle))
+									   &popt->topt.unicode_column_linestyle))
 		{
 			psql_error("\\pset: allowed unicode column linestyle are single, double\n");
 			return false;
@@ -2428,7 +2429,7 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet)
 		if (!value)
 			;
 		else if (!set_unicode_line_style(popt, value, vallen,
-										 &popt->topt.unicode_header_linestyle))
+									   &popt->topt.unicode_header_linestyle))
 		{
 			psql_error("\\pset: allowed unicode header linestyle are single, double\n");
 			return false;
@@ -2742,19 +2743,19 @@ printPsetInfo(const char *param, struct printQueryOpt *popt)
 	else if (strcmp(param, "unicode_border_linestyle") == 0)
 	{
 		printf(_("Unicode border linestyle is \"%s\".\n"),
-				_unicode_linestyle2string(popt->topt.unicode_border_linestyle));
+			 _unicode_linestyle2string(popt->topt.unicode_border_linestyle));
 	}
 
 	else if (strcmp(param, "unicode_column_linestyle") == 0)
 	{
 		printf(_("Unicode column linestyle is \"%s\".\n"),
-				_unicode_linestyle2string(popt->topt.unicode_column_linestyle));
+			 _unicode_linestyle2string(popt->topt.unicode_column_linestyle));
 	}
 
 	else if (strcmp(param, "unicode_header_linestyle") == 0)
 	{
 		printf(_("Unicode border linestyle is \"%s\".\n"),
-				_unicode_linestyle2string(popt->topt.unicode_header_linestyle));
+			 _unicode_linestyle2string(popt->topt.unicode_header_linestyle));
 	}
 
 	else
@@ -2945,7 +2946,7 @@ do_watch(PQExpBuffer query_buf, long sleep)
 
 	for (;;)
 	{
-		int	res;
+		int			res;
 		time_t		timer;
 		long		i;
 
@@ -2962,8 +2963,8 @@ do_watch(PQExpBuffer query_buf, long sleep)
 		res = PSQLexecWatch(query_buf->data, &myopt);
 
 		/*
-		 * PSQLexecWatch handles the case where we can no longer
-		 * repeat the query, and returns 0 or -1.
+		 * PSQLexecWatch handles the case where we can no longer repeat the
+		 * query, and returns 0 or -1.
 		 */
 		if (res == 0)
 			break;
@@ -3001,7 +3002,7 @@ do_watch(PQExpBuffer query_buf, long sleep)
  * returns true unless we have ECHO_HIDDEN_NOEXEC.
  */
 static bool
-lookup_function_echo_hidden(char * query)
+lookup_function_echo_hidden(char *query)
 {
 	if (pset.echo_hidden != PSQL_ECHO_HIDDEN_OFF)
 	{
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index ff01368531a..0e266a3e188 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -491,7 +491,7 @@ int
 PSQLexecWatch(const char *query, const printQueryOpt *opt)
 {
 	PGresult   *res;
-	double	elapsed_msec = 0;
+	double		elapsed_msec = 0;
 	instr_time	before;
 	instr_time	after;
 
@@ -524,10 +524,9 @@ PSQLexecWatch(const char *query, const printQueryOpt *opt)
 	}
 
 	/*
-	 * If SIGINT is sent while the query is processing, the interrupt
-	 * will be consumed.  The user's intention, though, is to cancel
-	 * the entire watch process, so detect a sent cancellation request and
-	 * exit in this case.
+	 * If SIGINT is sent while the query is processing, the interrupt will be
+	 * consumed.  The user's intention, though, is to cancel the entire watch
+	 * process, so detect a sent cancellation request and exit in this case.
 	 */
 	if (cancel_pressed)
 	{
diff --git a/src/bin/psql/common.h b/src/bin/psql/common.h
index 3c3ffa3f141..caf31d19b89 100644
--- a/src/bin/psql/common.h
+++ b/src/bin/psql/common.h
@@ -36,7 +36,7 @@ extern void SetCancelConn(void);
 extern void ResetCancelConn(void);
 
 extern PGresult *PSQLexec(const char *query);
-extern int PSQLexecWatch(const char *query, const printQueryOpt *opt);
+extern int	PSQLexecWatch(const char *query, const printQueryOpt *opt);
 
 extern bool SendQuery(const char *query);
 
diff --git a/src/bin/psql/copy.c b/src/bin/psql/copy.c
index 965a1dcb266..f1eb518de79 100644
--- a/src/bin/psql/copy.c
+++ b/src/bin/psql/copy.c
@@ -556,6 +556,7 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary, PGresult **res)
 		if (showprompt)
 		{
 			const char *prompt = get_prompt(PROMPT_COPY);
+
 			fputs(prompt, stdout);
 			fflush(stdout);
 		}
@@ -593,6 +594,7 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary, PGresult **res)
 			if (showprompt)
 			{
 				const char *prompt = get_prompt(PROMPT_COPY);
+
 				fputs(prompt, stdout);
 				fflush(stdout);
 			}
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 04d769e3d6b..db568096dce 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -531,7 +531,7 @@ describeTypes(const char *pattern, bool verbose, bool showSystem)
 	if (verbose)
 	{
 		appendPQExpBuffer(&buf,
-						  "  pg_catalog.pg_get_userbyid(t.typowner) AS \"%s\",\n",
+					 "  pg_catalog.pg_get_userbyid(t.typowner) AS \"%s\",\n",
 						  gettext_noop("Owner"));
 	}
 	if (verbose && pset.sversion >= 90200)
@@ -803,7 +803,7 @@ permissionsList(const char *pattern)
 						  "       ELSE E''\n"
 						  "       END"
 						  "    || CASE WHEN polroles <> '{0}' THEN\n"
-						  "           E'\\n  to: ' || pg_catalog.array_to_string(\n"
+				   "           E'\\n  to: ' || pg_catalog.array_to_string(\n"
 						  "               ARRAY(\n"
 						  "                   SELECT rolname\n"
 						  "                   FROM pg_catalog.pg_roles\n"
@@ -2031,19 +2031,19 @@ describeOneTableDetails(const char *schemaname,
 		if (pset.sversion >= 90500)
 		{
 			printfPQExpBuffer(&buf,
-						   "SELECT pol.polname,\n"
-						   "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE array_to_string(array(select rolname from pg_roles where oid = any (pol.polroles) order by 1),',') END,\n"
-						   "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid),\n"
-						   "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid),\n"
-						   "CASE pol.polcmd \n"
-						   "WHEN 'r' THEN 'SELECT'\n"
-						   "WHEN 'a' THEN 'INSERT'\n"
-						   "WHEN 'w' THEN 'UPDATE'\n"
-						   "WHEN 'd' THEN 'DELETE'\n"
-						   "WHEN '*' THEN 'ALL'\n"
-						   "END AS cmd\n"
+							  "SELECT pol.polname,\n"
+							  "CASE WHEN pol.polroles = '{0}' THEN NULL ELSE array_to_string(array(select rolname from pg_roles where oid = any (pol.polroles) order by 1),',') END,\n"
+					   "pg_catalog.pg_get_expr(pol.polqual, pol.polrelid),\n"
+				  "pg_catalog.pg_get_expr(pol.polwithcheck, pol.polrelid),\n"
+							  "CASE pol.polcmd \n"
+							  "WHEN 'r' THEN 'SELECT'\n"
+							  "WHEN 'a' THEN 'INSERT'\n"
+							  "WHEN 'w' THEN 'UPDATE'\n"
+							  "WHEN 'd' THEN 'DELETE'\n"
+							  "WHEN '*' THEN 'ALL'\n"
+							  "END AS cmd\n"
 							  "FROM pg_catalog.pg_policy pol\n"
-				  "WHERE pol.polrelid = '%s' ORDER BY 1;",
+							  "WHERE pol.polrelid = '%s' ORDER BY 1;",
 							  oid);
 
 			result = PSQLexec(buf.data);
@@ -2053,9 +2053,9 @@ describeOneTableDetails(const char *schemaname,
 				tuples = PQntuples(result);
 
 			/*
-			 * Handle cases where RLS is enabled and there are policies,
-			 * or there aren't policies, or RLS isn't enabled but there
-			 * are policies
+			 * Handle cases where RLS is enabled and there are policies, or
+			 * there aren't policies, or RLS isn't enabled but there are
+			 * policies
 			 */
 			if (tableinfo.rowsecurity && tuples > 0)
 				printTableAddFooter(&cont, _("Policies:"));
@@ -2070,7 +2070,7 @@ describeOneTableDetails(const char *schemaname,
 			for (i = 0; i < tuples; i++)
 			{
 				printfPQExpBuffer(&buf, "    POLICY \"%s\"",
-									  PQgetvalue(result, i, 0));
+								  PQgetvalue(result, i, 0));
 
 				if (!PQgetisnull(result, i, 4))
 					appendPQExpBuffer(&buf, " FOR %s",
diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c
index ea05c3e37be..b523054825f 100644
--- a/src/bin/psql/help.c
+++ b/src/bin/psql/help.c
@@ -81,11 +81,11 @@ usage(unsigned short int pager)
 	fprintf(output, _("  -f, --file=FILENAME      execute commands from file, then exit\n"));
 	fprintf(output, _("  -l, --list               list available databases, then exit\n"));
 	fprintf(output, _("  -v, --set=, --variable=NAME=VALUE\n"
-			 "                           set psql variable NAME to VALUE e.g.: -v ON_ERROR_STOP=1\n"));
+					  "                           set psql variable NAME to VALUE e.g.: -v ON_ERROR_STOP=1\n"));
 	fprintf(output, _("  -V, --version            output version information, then exit\n"));
 	fprintf(output, _("  -X, --no-psqlrc          do not read startup file (~/.psqlrc)\n"));
 	fprintf(output, _("  -1 (\"one\"), --single-transaction\n"
-			 "                           execute as a single transaction (if non-interactive)\n"));
+					  "                           execute as a single transaction (if non-interactive)\n"));
 	fprintf(output, _("  -?, --help[=options]     show this help, then exit\n"));
 	fprintf(output, _("      --help=variables     show a list of all specially treated variables, then exit\n"));
 	fprintf(output, _("      --help=commands      show a list of backslash commands, then exit\n"));
@@ -105,29 +105,29 @@ usage(unsigned short int pager)
 	fprintf(output, _("\nOutput format options:\n"));
 	fprintf(output, _("  -A, --no-align           unaligned table output mode\n"));
 	fprintf(output, _("  -F, --field-separator=STRING\n"
-			 "                           field separator for unaligned output (default: \"%s\")\n"),
-		   DEFAULT_FIELD_SEP);
+					  "                           field separator for unaligned output (default: \"%s\")\n"),
+			DEFAULT_FIELD_SEP);
 	fprintf(output, _("  -H, --html               HTML table output mode\n"));
 	fprintf(output, _("  -P, --pset=VAR[=ARG]     set printing option VAR to ARG (see \\pset command)\n"));
 	fprintf(output, _("  -R, --record-separator=STRING\n"
-			 "                           record separator for unaligned output (default: newline)\n"));
+					  "                           record separator for unaligned output (default: newline)\n"));
 	fprintf(output, _("  -t, --tuples-only        print rows only\n"));
 	fprintf(output, _("  -T, --table-attr=TEXT    set HTML table tag attributes (e.g., width, border)\n"));
 	fprintf(output, _("  -x, --expanded           turn on expanded table output\n"));
 	fprintf(output, _("  -z, --field-separator-zero\n"
-			 "                           set field separator for unaligned output to zero byte\n"));
+					  "                           set field separator for unaligned output to zero byte\n"));
 	fprintf(output, _("  -0, --record-separator-zero\n"
-			 "                           set record separator for unaligned output to zero byte\n"));
+					  "                           set record separator for unaligned output to zero byte\n"));
 
 	fprintf(output, _("\nConnection options:\n"));
 	/* Display default host */
 	env = getenv("PGHOST");
 	fprintf(output, _("  -h, --host=HOSTNAME      database server host or socket directory (default: \"%s\")\n"),
-		   env ? env : _("local socket"));
+			env ? env : _("local socket"));
 	/* Display default port */
 	env = getenv("PGPORT");
 	fprintf(output, _("  -p, --port=PORT          database server port (default: \"%s\")\n"),
-		   env ? env : DEF_PGPORT_STR);
+			env ? env : DEF_PGPORT_STR);
 	/* Display default user */
 	env = getenv("PGUSER");
 	if (!env)
@@ -137,8 +137,8 @@ usage(unsigned short int pager)
 	fprintf(output, _("  -W, --password           force password prompt (should happen automatically)\n"));
 
 	fprintf(output, _("\nFor more information, type \"\\?\" (for internal commands) or \"\\help\" (for SQL\n"
-			 "commands) from within psql, or consult the psql section in the PostgreSQL\n"
-			 "documentation.\n\n"));
+					  "commands) from within psql, or consult the psql section in the PostgreSQL\n"
+					  "documentation.\n\n"));
 	fprintf(output, _("Report bugs to <pgsql-bugs@postgresql.org>.\n"));
 
 	ClosePager(output);
@@ -315,15 +315,15 @@ helpVariables(unsigned short int pager)
 
 	fprintf(output, _("  AUTOCOMMIT         if set, successful SQL commands are automatically committed\n"));
 	fprintf(output, _("  COMP_KEYWORD_CASE  determine the case used to complete SQL keywords\n"
-					 "                     [lower, upper, preserve-lower, preserve-upper]\n"));
+	"                     [lower, upper, preserve-lower, preserve-upper]\n"));
 	fprintf(output, _("  DBNAME             the currently connected database name\n"));
 	fprintf(output, _("  ECHO               control what input is written to standard output\n"
-					 "                     [all, errors, none, queries]\n"));
+					  "                     [all, errors, none, queries]\n"));
 	fprintf(output, _("  ECHO_HIDDEN        display internal queries executed by backslash commands when it is set\n"
-					 "                     or with [noexec] just show without execution\n"));
+	 "                     or with [noexec] just show without execution\n"));
 	fprintf(output, _("  ENCODING           current client character set encoding\n"));
 	fprintf(output, _("  FETCH_COUNT        the number of result rows to fetch and display at a time\n"
-					 "                     (default: 0=unlimited)\n"));
+					  "                     (default: 0=unlimited)\n"));
 	fprintf(output, _("  HISTCONTROL        control history list [ignorespace, ignoredups, ignoreboth]\n"));
 	fprintf(output, _("  HISTFILE           file name used to store the history list\n"));
 	fprintf(output, _("  HISTSIZE           the number of commands to store in the command history\n"));
@@ -356,18 +356,18 @@ helpVariables(unsigned short int pager)
 	fprintf(output, _("  linestyle          set the border line drawing style [ascii, old-ascii, unicode]\n"));
 	fprintf(output, _("  null               set the string to be printed in place of a null value\n"));
 	fprintf(output, _("  numericlocale      enable or disable display of a locale-specific character to separate\n"
-					 "                     groups of digits [on, off]\n"));
+					  "                     groups of digits [on, off]\n"));
 	fprintf(output, _("  pager              control when an external pager is used [yes, no, always]\n"));
 	fprintf(output, _("  recordsep          specify the record (line) separator to use in unaligned output format\n"));
 	fprintf(output, _("  recordsep_zero     set the record separator to use in unaligned output format to a zero byte.\n"));
 	fprintf(output, _("  tableattr (or T)   specify attributes for table tag in html format or proportional\n"
-					 "                     column width of left aligned data type in latex format\n"));
+					  "                     column width of left aligned data type in latex format\n"));
 	fprintf(output, _("  title              set the table title for any subsequently printed tables\n"));
 	fprintf(output, _("  tuples_only        if set, only actual table data is shown\n"));
 	fprintf(output, _("  unicode_border_linestyle\n"));
 	fprintf(output, _("  unicode_column_linestyle\n"));
 	fprintf(output, _("  unicode_header_linestyle\n"
-					 "                     set the style of unicode line drawing [single, double]\n"));
+					  "                     set the style of unicode line drawing [single, double]\n"));
 
 	fprintf(output, _("\nEnvironment variables:\n"));
 	fprintf(output, _("Usage:\n"));
@@ -388,9 +388,9 @@ helpVariables(unsigned short int pager)
 	fprintf(output, _("  PGPASSWORD         connection password (not recommended)\n"));
 	fprintf(output, _("  PGPASSFILE         password file name\n"));
 	fprintf(output, _("  PSQL_EDITOR, EDITOR, VISUAL\n"
-					 "                     editor used by the \\e and \\ef commands\n"));
+		 "                     editor used by the \\e and \\ef commands\n"));
 	fprintf(output, _("  PSQL_EDITOR_LINENUMBER_ARG\n"
-					 "                     how to specify a line number when invoking the editor\n"));
+					  "                     how to specify a line number when invoking the editor\n"));
 	fprintf(output, _("  PSQL_HISTORY       alternative location for the command history file\n"));
 	fprintf(output, _("  PSQLRC             alternative location for the user's .psqlrc file\n"));
 	fprintf(output, _("  SHELL              shell used by the \\! command\n"));
diff --git a/src/bin/psql/print.c b/src/bin/psql/print.c
index 94c69845c79..cab9e6eb44b 100644
--- a/src/bin/psql/print.c
+++ b/src/bin/psql/print.c
@@ -92,20 +92,23 @@ const printTextFormat pg_asciiformat_old =
 /* Default unicode linestyle format */
 const printTextFormat pg_utf8format;
 
-typedef struct unicodeStyleRowFormat {
+typedef struct unicodeStyleRowFormat
+{
 	const char *horizontal;
 	const char *vertical_and_right[2];
 	const char *vertical_and_left[2];
 } unicodeStyleRowFormat;
 
-typedef struct unicodeStyleColumnFormat {
+typedef struct unicodeStyleColumnFormat
+{
 	const char *vertical;
 	const char *vertical_and_horizontal[2];
 	const char *up_and_horizontal[2];
 	const char *down_and_horizontal[2];
 } unicodeStyleColumnFormat;
 
-typedef struct unicodeStyleBorderFormat {
+typedef struct unicodeStyleBorderFormat
+{
 	const char *up_and_right;
 	const char *vertical;
 	const char *down_and_right;
@@ -114,7 +117,8 @@ typedef struct unicodeStyleBorderFormat {
 	const char *left_and_right;
 } unicodeStyleBorderFormat;
 
-typedef struct unicodeStyleFormat {
+typedef struct unicodeStyleFormat
+{
 	unicodeStyleRowFormat row_style[2];
 	unicodeStyleColumnFormat column_style[2];
 	unicodeStyleBorderFormat border_style[2];
@@ -124,7 +128,7 @@ typedef struct unicodeStyleFormat {
 	const char *nl_right;
 	const char *wrap_left;
 	const char *wrap_right;
-	bool wrap_right_border;
+	bool		wrap_right_border;
 } unicodeStyleFormat;
 
 const unicodeStyleFormat unicode_style = {
@@ -175,11 +179,11 @@ const unicodeStyleFormat unicode_style = {
 		{"\342\225\232", "\342\225\221", "\342\225\224", "\342\225\220", "\342\225\227", "\342\225\235"},
 	},
 	" ",
-	"\342\206\265", /* ↵ */
+	"\342\206\265",				/* ↵ */
 	" ",
-	"\342\206\265", /* ↵ */
-	"\342\200\246", /* … */
-	"\342\200\246", /* … */
+	"\342\206\265",				/* ↵ */
+	"\342\200\246",				/* … */
+	"\342\200\246",				/* … */
 	true
 };
 
@@ -984,7 +988,7 @@ print_aligned_text(const printTableContent *cont, FILE *fout)
 				int			bytes_to_output;
 				int			chars_to_output = width_wrap[j];
 				bool		finalspaces = (opt_border == 2 ||
-								(col_count > 0 && j < col_count - 1));
+									   (col_count > 0 && j < col_count - 1));
 
 				/* Print left-hand wrap or newline mark */
 				if (opt_border != 0)
@@ -1356,12 +1360,13 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
 		else if (opt_border == 1)
 		{
 			/*
-			 * For border = 1, one for the pipe (|) in the middle
-			 * between the two spaces.
+			 * For border = 1, one for the pipe (|) in the middle between the
+			 * two spaces.
 			 */
 			swidth = 3;
 		}
 		else
+
 			/*
 			 * For border = 2, two more for the pipes (|) at the beginning and
 			 * at the end of the lines.
@@ -1370,10 +1375,10 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
 
 		if ((opt_border < 2) &&
 			((hmultiline &&
-			(format == &pg_asciiformat_old)) ||
-			(dmultiline &&
-			(format != &pg_asciiformat_old))))
-			iwidth++; /* for newline indicators */
+			  (format == &pg_asciiformat_old)) ||
+			 (dmultiline &&
+			  (format != &pg_asciiformat_old))))
+			iwidth++;			/* for newline indicators */
 
 		min_width = hwidth + iwidth + swidth + 3;
 
@@ -1386,6 +1391,7 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
 			 * Record number
 			 */
 			unsigned int rwidth = 1 + log10(cont->nrows);
+
 			if (opt_border == 0)
 				rwidth += 9;	/* "* RECORD " */
 			else if (opt_border == 1)
@@ -1402,6 +1408,7 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
 		if ((width < min_width) || (output_columns < min_width))
 			width = min_width - hwidth - iwidth - swidth;
 		else if (output_columns > 0)
+
 			/*
 			 * Wrap to maximum width
 			 */
@@ -1412,7 +1419,7 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
 			dmultiline = true;
 			if ((opt_border == 0) &&
 				(format != &pg_asciiformat_old))
-				width--; /* for wrap indicators */
+				width--;		/* for wrap indicators */
 		}
 		dwidth = width;
 	}
@@ -1440,10 +1447,11 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
 		if (i % cont->ncolumns == 0)
 		{
 			unsigned int lhwidth = hwidth;
+
 			if ((opt_border < 2) &&
 				(hmultiline) &&
 				(format == &pg_asciiformat_old))
-				lhwidth++; /* for newline indicators */
+				lhwidth++;		/* for newline indicators */
 
 			if (!opt_tuples_only)
 				print_aligned_vertical_line(format, opt_border, record++,
@@ -1480,12 +1488,14 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
 			{
 				int			swidth = hwidth,
 							target_width = hwidth;
+
 				/*
 				 * Left spacer or new line indicator
 				 */
 				if ((opt_border == 2) ||
 					(hmultiline && (format == &pg_asciiformat_old)))
 					fputs(hline ? format->header_nl_left : " ", fout);
+
 				/*
 				 * Header text
 				 */
@@ -1523,6 +1533,7 @@ print_aligned_vertical(const printTableContent *cont, FILE *fout)
 			else
 			{
 				unsigned int swidth = hwidth + opt_border;
+
 				if ((opt_border < 2) &&
 					(hmultiline) &&
 					(format == &pg_asciiformat_old))
@@ -1886,9 +1897,10 @@ static void
 asciidoc_escaped_print(const char *in, FILE *fout)
 {
 	const char *p;
+
 	for (p = in; *p; p++)
 	{
-		switch(*p)
+		switch (*p)
 		{
 			case '|':
 				fputs("\\|", fout);
@@ -1925,7 +1937,7 @@ print_asciidoc_text(const printTableContent *cont, FILE *fout)
 
 		/* print table [] header definition */
 		fprintf(fout, "[%scols=\"", !opt_tuples_only ? "options=\"header\"," : "");
-		for(i = 0; i < cont->ncolumns; i++)
+		for (i = 0; i < cont->ncolumns; i++)
 		{
 			if (i != 0)
 				fputs(",", fout);
@@ -2046,7 +2058,7 @@ print_asciidoc_vertical(const printTableContent *cont, FILE *fout)
 				break;
 			case 2:
 				fputs(",frame=\"all\",grid=\"all\"", fout);
-			break;
+				break;
 		}
 		fputs("]\n", fout);
 		fputs("|====\n", fout);
@@ -2729,8 +2741,8 @@ PageOutput(int lines, const printTableOpt *topt)
 	{
 		const char *pagerprog;
 		FILE	   *pagerpipe;
-		unsigned short int  pager =  topt->pager;
-		int         min_lines = topt->pager_min_lines;
+		unsigned short int pager = topt->pager;
+		int			min_lines = topt->pager_min_lines;
 
 #ifdef TIOCGWINSZ
 		int			result;
@@ -3262,7 +3274,7 @@ get_line_style(const printTableOpt *opt)
 void
 refresh_utf8format(const printTableOpt *opt)
 {
-	printTextFormat *popt =  (printTextFormat *) &pg_utf8format;
+	printTextFormat *popt = (printTextFormat *) &pg_utf8format;
 
 	const unicodeStyleBorderFormat *border;
 	const unicodeStyleRowFormat *header;
diff --git a/src/bin/psql/print.h b/src/bin/psql/print.h
index 322db4d6eff..b0b6bf52517 100644
--- a/src/bin/psql/print.h
+++ b/src/bin/psql/print.h
@@ -90,7 +90,7 @@ typedef struct printTableOpt
 								 * 1=dividing lines, 2=full */
 	unsigned short int pager;	/* use pager for output (if to stdout and
 								 * stdout is a tty) 0=off 1=on 2=always */
-	int         pager_min_lines;/* don't use pager unless there are at least
+	int			pager_min_lines;/* don't use pager unless there are at least
 								 * this many lines */
 	bool		tuples_only;	/* don't output headers, row counts, etc. */
 	bool		start_table;	/* print start decoration, eg <table> */
@@ -106,9 +106,9 @@ typedef struct printTableOpt
 	int			encoding;		/* character encoding */
 	int			env_columns;	/* $COLUMNS on psql start, 0 is unset */
 	int			columns;		/* target width for wrapped format */
-	unicode_linestyle	unicode_border_linestyle;
-	unicode_linestyle	unicode_column_linestyle;
-	unicode_linestyle	unicode_header_linestyle;
+	unicode_linestyle unicode_border_linestyle;
+	unicode_linestyle unicode_column_linestyle;
+	unicode_linestyle unicode_header_linestyle;
 } printTableOpt;
 
 /*
diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c
index d57901f7780..28ba75a72e5 100644
--- a/src/bin/psql/startup.c
+++ b/src/bin/psql/startup.c
@@ -592,7 +592,7 @@ parse_psql_options(int argc, char *argv[], struct adhoc_opts * options)
 				}
 				break;
 			default:
-			unknown_option:
+		unknown_option:
 				fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
 						pset.progname);
 				exit(EXIT_FAILURE);
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index 750e29ddf3c..b9f5acc65ee 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -816,10 +816,10 @@ static char *_complete_from_query(int is_schema_query,
 static char *complete_from_list(const char *text, int state);
 static char *complete_from_const(const char *text, int state);
 static void append_variable_names(char ***varnames, int *nvars,
-								  int *maxvars, const char *varname,
-								  const char *prefix, const char *suffix);
+					  int *maxvars, const char *varname,
+					  const char *prefix, const char *suffix);
 static char **complete_from_variables(const char *text,
-					  const char *prefix, const char *suffix, bool need_value);
+					const char *prefix, const char *suffix, bool need_value);
 static char *complete_from_files(const char *text, int state);
 
 static char *pg_strdup_keyword_case(const char *s, const char *ref);
@@ -961,6 +961,7 @@ psql_completion(const char *text, int start, int end)
 		COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables,
 								   "UNION SELECT 'ALL IN TABLESPACE'");
 	}
+
 	/*
 	 * complete with what you can alter (TABLE, GROUP, USER, ...) unless we're
 	 * in ALTER TABLE sth ALTER
@@ -984,7 +985,7 @@ psql_completion(const char *text, int start, int end)
 			 pg_strcasecmp(prev2_wd, "TABLESPACE") == 0)
 	{
 		static const char *const list_ALTERALLINTSPC[] =
-			{"SET TABLESPACE", "OWNED BY", NULL};
+		{"SET TABLESPACE", "OWNED BY", NULL};
 
 		COMPLETE_WITH_LIST(list_ALTERALLINTSPC);
 	}
@@ -1129,7 +1130,7 @@ psql_completion(const char *text, int start, int end)
 	{
 		static const char *const list_ALTER_FOREIGN_TABLE[] =
 		{"ADD", "ALTER", "DISABLE TRIGGER", "DROP", "ENABLE", "INHERIT",
-		"NO INHERIT", "OPTIONS", "OWNER TO", "RENAME", "SET",
+			"NO INHERIT", "OPTIONS", "OWNER TO", "RENAME", "SET",
 		"VALIDATE CONSTRAINT", NULL};
 
 		COMPLETE_WITH_LIST(list_ALTER_FOREIGN_TABLE);
@@ -1381,7 +1382,7 @@ psql_completion(const char *text, int start, int end)
 	else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
 			 pg_strcasecmp(prev2_wd, "SYSTEM") == 0 &&
 			 (pg_strcasecmp(prev_wd, "SET") == 0 ||
-			 pg_strcasecmp(prev_wd, "RESET") == 0))
+			  pg_strcasecmp(prev_wd, "RESET") == 0))
 		COMPLETE_WITH_QUERY(Query_for_list_of_alter_system_set_vars);
 	/* ALTER VIEW <name> */
 	else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
@@ -1572,7 +1573,7 @@ psql_completion(const char *text, int start, int end)
 			 pg_strcasecmp(prev_wd, "DISABLE") == 0)
 	{
 		static const char *const list_ALTERDISABLE[] =
-		{ "ROW LEVEL SECURITY", "RULE", "TRIGGER", NULL};
+		{"ROW LEVEL SECURITY", "RULE", "TRIGGER", NULL};
 
 		COMPLETE_WITH_LIST(list_ALTERDISABLE);
 	}
@@ -1598,7 +1599,7 @@ psql_completion(const char *text, int start, int end)
 			 pg_strcasecmp(prev_wd, "SECURITY") == 0)
 	{
 		static const char *const list_DISABLERLS[] =
-		{ "CASCADE", NULL};
+		{"CASCADE", NULL};
 
 		COMPLETE_WITH_LIST(list_DISABLERLS);
 	}
@@ -2140,7 +2141,7 @@ psql_completion(const char *text, int start, int end)
 			   pg_strcasecmp(prev4_wd, "ON") == 0) ||
 			  (pg_strcasecmp(prev6_wd, "COMMENT") == 0 &&
 			   pg_strcasecmp(prev5_wd, "ON") == 0)) &&
-			   pg_strcasecmp(prev_wd, "IS") != 0)
+			 pg_strcasecmp(prev_wd, "IS") != 0)
 		COMPLETE_WITH_CONST("IS");
 
 /* COPY */
@@ -2205,7 +2206,7 @@ psql_completion(const char *text, int start, int end)
 	{
 		static const char *const list_DATABASE[] =
 		{"OWNER", "TEMPLATE", "ENCODING", "TABLESPACE", "IS_TEMPLATE",
-		"ALLOW_CONNECTIONS", "CONNECTION LIMIT", "LC_COLLATE", "LC_CTYPE",
+			"ALLOW_CONNECTIONS", "CONNECTION LIMIT", "LC_COLLATE", "LC_CTYPE",
 		NULL};
 
 		COMPLETE_WITH_LIST(list_DATABASE);
@@ -2309,8 +2310,8 @@ psql_completion(const char *text, int start, int end)
 		COMPLETE_WITH_ATTR(prev4_wd, "");
 	/* Complete USING with an index method */
 	else if ((pg_strcasecmp(prev6_wd, "INDEX") == 0 ||
-			 pg_strcasecmp(prev5_wd, "INDEX") == 0 ||
-			 pg_strcasecmp(prev4_wd, "INDEX") == 0) &&
+			  pg_strcasecmp(prev5_wd, "INDEX") == 0 ||
+			  pg_strcasecmp(prev4_wd, "INDEX") == 0) &&
 			 pg_strcasecmp(prev3_wd, "ON") == 0 &&
 			 pg_strcasecmp(prev_wd, "USING") == 0)
 		COMPLETE_WITH_QUERY(Query_for_list_of_access_methods);
@@ -2340,7 +2341,11 @@ psql_completion(const char *text, int start, int end)
 
 		COMPLETE_WITH_LIST(list_POLICYOPTIONS);
 	}
-	/* Complete "CREATE POLICY <name> ON <table> FOR ALL|SELECT|INSERT|UPDATE|DELETE" */
+
+	/*
+	 * Complete "CREATE POLICY <name> ON <table> FOR
+	 * ALL|SELECT|INSERT|UPDATE|DELETE"
+	 */
 	else if (pg_strcasecmp(prev6_wd, "CREATE") == 0 &&
 			 pg_strcasecmp(prev5_wd, "POLICY") == 0 &&
 			 pg_strcasecmp(prev3_wd, "ON") == 0 &&
@@ -2362,30 +2367,33 @@ psql_completion(const char *text, int start, int end)
 
 		COMPLETE_WITH_LIST(list_POLICYOPTIONS);
 	}
+
 	/*
-	 * Complete "CREATE POLICY <name> ON <table> FOR SELECT TO|USING"
-	 * Complete "CREATE POLICY <name> ON <table> FOR DELETE TO|USING"
+	 * Complete "CREATE POLICY <name> ON <table> FOR SELECT TO|USING" Complete
+	 * "CREATE POLICY <name> ON <table> FOR DELETE TO|USING"
 	 */
 	else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
 			 pg_strcasecmp(prev4_wd, "ON") == 0 &&
 			 pg_strcasecmp(prev2_wd, "FOR") == 0 &&
 			 (pg_strcasecmp(prev_wd, "SELECT") == 0 ||
-			 pg_strcasecmp(prev_wd, "DELETE") == 0))
+			  pg_strcasecmp(prev_wd, "DELETE") == 0))
 	{
 		static const char *const list_POLICYOPTIONS[] =
 		{"TO", "USING", NULL};
 
 		COMPLETE_WITH_LIST(list_POLICYOPTIONS);
 	}
+
 	/*
 	 * Complete "CREATE POLICY <name> ON <table> FOR ALL TO|USING|WITH CHECK"
-	 * Complete "CREATE POLICY <name> ON <table> FOR UPDATE TO|USING|WITH CHECK"
+	 * Complete "CREATE POLICY <name> ON <table> FOR UPDATE TO|USING|WITH
+	 * CHECK"
 	 */
 	else if (pg_strcasecmp(prev6_wd, "POLICY") == 0 &&
 			 pg_strcasecmp(prev4_wd, "ON") == 0 &&
 			 pg_strcasecmp(prev2_wd, "FOR") == 0 &&
 			 (pg_strcasecmp(prev_wd, "ALL") == 0 ||
-			 pg_strcasecmp(prev_wd, "UPDATE") == 0))
+			  pg_strcasecmp(prev_wd, "UPDATE") == 0))
 	{
 		static const char *const list_POLICYOPTIONS[] =
 		{"TO", "USING", "WITH CHECK", NULL};
@@ -3336,7 +3344,7 @@ psql_completion(const char *text, int start, int end)
 	else if (pg_strcasecmp(prev_wd, "REINDEX") == 0)
 	{
 		static const char *const list_REINDEX[] =
-			{"TABLE", "INDEX", "SYSTEM", "SCHEMA", "DATABASE", NULL};
+		{"TABLE", "INDEX", "SYSTEM", "SCHEMA", "DATABASE", NULL};
 
 		COMPLETE_WITH_LIST(list_REINDEX);
 	}
@@ -3346,7 +3354,7 @@ psql_completion(const char *text, int start, int end)
 			COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL);
 		else if (pg_strcasecmp(prev_wd, "INDEX") == 0)
 			COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL);
-		else if (pg_strcasecmp(prev_wd, "SCHEMA") == 0 )
+		else if (pg_strcasecmp(prev_wd, "SCHEMA") == 0)
 			COMPLETE_WITH_QUERY(Query_for_list_of_schemas);
 		else if (pg_strcasecmp(prev_wd, "SYSTEM") == 0 ||
 				 pg_strcasecmp(prev_wd, "DATABASE") == 0)
@@ -4374,7 +4382,7 @@ complete_from_variables(const char *text, const char *prefix, const char *suffix
 		"ENCODING", "FETCH_COUNT", "HISTCONTROL", "HISTFILE", "HISTSIZE",
 		"HOST", "IGNOREEOF", "LASTOID", "ON_ERROR_ROLLBACK", "ON_ERROR_STOP",
 		"PORT", "PROMPT1", "PROMPT2", "PROMPT3", "QUIET", "SINGLELINE",
-		"SINGLESTEP", "USER", "VERBOSITY",	NULL
+		"SINGLESTEP", "USER", "VERBOSITY", NULL
 	};
 
 	varnames = (char **) pg_malloc((maxvars + 1) * sizeof(char *));
@@ -4390,7 +4398,7 @@ complete_from_variables(const char *text, const char *prefix, const char *suffix
 	{
 		if (need_value && !(ptr->value))
 			continue;
-		for (i = 0; known_varnames[i]; i++)	/* remove duplicate entry */
+		for (i = 0; known_varnames[i]; i++)		/* remove duplicate entry */
 		{
 			if (strcmp(ptr->name, known_varnames[i]) == 0)
 				continue;
@@ -4475,7 +4483,7 @@ pg_strdup_keyword_case(const char *s, const char *ref)
 
 	if (pset.comp_case == PSQL_COMP_CASE_LOWER ||
 		((pset.comp_case == PSQL_COMP_CASE_PRESERVE_LOWER ||
-		  pset.comp_case == PSQL_COMP_CASE_PRESERVE_UPPER) && islower(first)) ||
+	   pset.comp_case == PSQL_COMP_CASE_PRESERVE_UPPER) && islower(first)) ||
 		(pset.comp_case == PSQL_COMP_CASE_PRESERVE_LOWER && !isalpha(first)))
 	{
 		for (p = ret; *p; p++)
diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c
index da142aaa643..0deadec0975 100644
--- a/src/bin/scripts/common.c
+++ b/src/bin/scripts/common.c
@@ -21,7 +21,7 @@
 
 
 static PGcancel *volatile cancelConn = NULL;
-bool CancelRequested = false;
+bool		CancelRequested = false;
 
 #ifdef WIN32
 static CRITICAL_SECTION cancelConnLock;
diff --git a/src/bin/scripts/reindexdb.c b/src/bin/scripts/reindexdb.c
index 32d3409e057..941729da2e7 100644
--- a/src/bin/scripts/reindexdb.c
+++ b/src/bin/scripts/reindexdb.c
@@ -181,7 +181,7 @@ main(int argc, char *argv[])
 		}
 
 		reindex_all_databases(maintenance_db, host, port, username,
-							  prompt_password, progname, echo, quiet, verbose);
+							prompt_password, progname, echo, quiet, verbose);
 	}
 	else if (syscatalog)
 	{
@@ -233,7 +233,7 @@ main(int argc, char *argv[])
 			for (cell = schemas.head; cell; cell = cell->next)
 			{
 				reindex_one_database(cell->val, dbname, "SCHEMA", host, port,
-								   username, prompt_password, progname, echo, verbose);
+						 username, prompt_password, progname, echo, verbose);
 			}
 		}
 
@@ -244,7 +244,7 @@ main(int argc, char *argv[])
 			for (cell = indexes.head; cell; cell = cell->next)
 			{
 				reindex_one_database(cell->val, dbname, "INDEX", host, port,
-								  username, prompt_password, progname, echo, verbose);
+						 username, prompt_password, progname, echo, verbose);
 			}
 		}
 		if (tables.head != NULL)
@@ -254,13 +254,17 @@ main(int argc, char *argv[])
 			for (cell = tables.head; cell; cell = cell->next)
 			{
 				reindex_one_database(cell->val, dbname, "TABLE", host, port,
-								  username, prompt_password, progname, echo, verbose);
+						 username, prompt_password, progname, echo, verbose);
 			}
 		}
-		/* reindex database only if neither index nor table nor schema is specified */
+
+		/*
+		 * reindex database only if neither index nor table nor schema is
+		 * specified
+		 */
 		if (indexes.head == NULL && tables.head == NULL && schemas.head == NULL)
 			reindex_one_database(dbname, dbname, "DATABASE", host, port,
-								 username, prompt_password, progname, echo, verbose);
+						 username, prompt_password, progname, echo, verbose);
 	}
 
 	exit(0);
@@ -269,7 +273,7 @@ main(int argc, char *argv[])
 static void
 reindex_one_database(const char *name, const char *dbname, const char *type,
 					 const char *host, const char *port, const char *username,
-					 enum trivalue prompt_password, const char *progname, bool echo,
+			  enum trivalue prompt_password, const char *progname, bool echo,
 					 bool verbose)
 {
 	PQExpBufferData sql;
@@ -322,7 +326,7 @@ static void
 reindex_all_databases(const char *maintenance_db,
 					  const char *host, const char *port,
 					  const char *username, enum trivalue prompt_password,
-					  const char *progname, bool echo, bool quiet, bool verbose)
+				   const char *progname, bool echo, bool quiet, bool verbose)
 {
 	PGconn	   *conn;
 	PGresult   *result;
diff --git a/src/bin/scripts/t/102_vacuumdb_stages.pl b/src/bin/scripts/t/102_vacuumdb_stages.pl
index 1ff05e3c27c..57b980ec6a5 100644
--- a/src/bin/scripts/t/102_vacuumdb_stages.pl
+++ b/src/bin/scripts/t/102_vacuumdb_stages.pl
@@ -19,7 +19,7 @@ qr/.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0;
 
 issues_sql_like(
 	[ 'vacuumdb', '--analyze-in-stages', '--all' ],
-                qr/.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0;
+qr/.*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0;
                    .*statement:\ ANALYZE.*
                    .*statement:\ SET\ default_statistics_target=1;\ SET\ vacuum_cost_delay=0;
                    .*statement:\ ANALYZE.*
diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c
index 2cd4aa65442..f600b0514a8 100644
--- a/src/bin/scripts/vacuumdb.c
+++ b/src/bin/scripts/vacuumdb.c
@@ -674,7 +674,7 @@ run_vacuum_command(PGconn *conn, const char *sql, bool echo,
 				   const char *dbname, const char *table,
 				   const char *progname, bool async)
 {
-	bool	status;
+	bool		status;
 
 	if (async)
 	{
@@ -943,7 +943,7 @@ help(const char *progname)
 	printf(_("  -Z, --analyze-only              only update optimizer statistics;  no vacuum\n"));
 	printf(_("  -j, --jobs=NUM                  use this many concurrent connections to vacuum\n"));
 	printf(_("      --analyze-in-stages         only update optimizer statistics, in multiple\n"
-		   "                                  stages for faster results;  no vacuum\n"));
+			 "                                  stages for faster results;  no vacuum\n"));
 	printf(_("  -?, --help                      show this help, then exit\n"));
 	printf(_("\nConnection options:\n"));
 	printf(_("  -h, --host=HOSTNAME       database server host or socket directory\n"));
diff --git a/src/common/restricted_token.c b/src/common/restricted_token.c
index a8213c0bafe..93da03570da 100644
--- a/src/common/restricted_token.c
+++ b/src/common/restricted_token.c
@@ -25,7 +25,7 @@
 #ifdef WIN32
 
 /* internal vars */
-char	*restrict_env;
+char	   *restrict_env;
 
 typedef BOOL (WINAPI * __CreateRestrictedToken) (HANDLE, DWORD, DWORD, PSID_AND_ATTRIBUTES, DWORD, PLUID_AND_ATTRIBUTES, DWORD, PSID_AND_ATTRIBUTES, PHANDLE);
 
diff --git a/src/include/access/brin_page.h b/src/include/access/brin_page.h
index 6c645b34b24..ecbd13a9a39 100644
--- a/src/include/access/brin_page.h
+++ b/src/include/access/brin_page.h
@@ -40,11 +40,11 @@ typedef struct BrinSpecialSpace
  * See comments above GinPageOpaqueData.
  */
 #define BrinPageType(page)		\
-	(((BrinSpecialSpace *)  	\
+	(((BrinSpecialSpace *)		\
 	  PageGetSpecialPointer(page))->vector[MAXALIGN(1) / sizeof(uint16) - 1])
 
 #define BrinPageFlags(page)		\
-	(((BrinSpecialSpace *)  	\
+	(((BrinSpecialSpace *)		\
 	  PageGetSpecialPointer(page))->vector[MAXALIGN(1) / sizeof(uint16) - 2])
 
 /* special space on all BRIN pages stores a "type" identifier */
diff --git a/src/include/access/commit_ts.h b/src/include/access/commit_ts.h
index ad44db357aa..bd05ab4d5ce 100644
--- a/src/include/access/commit_ts.h
+++ b/src/include/access/commit_ts.h
@@ -17,7 +17,7 @@
 #include "utils/guc.h"
 
 
-extern PGDLLIMPORT bool	track_commit_timestamp;
+extern PGDLLIMPORT bool track_commit_timestamp;
 
 extern bool check_track_commit_timestamp(bool *newval, void **extra,
 							 GucSource source);
@@ -53,9 +53,9 @@ extern void AdvanceOldestCommitTs(TransactionId oldestXact);
 
 typedef struct xl_commit_ts_set
 {
-	TimestampTz		timestamp;
-	RepOriginId		nodeid;
-	TransactionId	mainxid;
+	TimestampTz timestamp;
+	RepOriginId nodeid;
+	TransactionId mainxid;
 	/* subxact Xids follow */
 } xl_commit_ts_set;
 
diff --git a/src/include/access/gin.h b/src/include/access/gin.h
index 27b497133e4..8f1abaa2f71 100644
--- a/src/include/access/gin.h
+++ b/src/include/access/gin.h
@@ -67,7 +67,7 @@ typedef char GinTernaryValue;
 
 /* GUC parameters */
 extern PGDLLIMPORT int GinFuzzySearchLimit;
-extern int gin_pending_list_limit;
+extern int	gin_pending_list_limit;
 
 /* ginutil.c */
 extern void ginGetStats(Relation index, GinStatsData *stats);
diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h
index 0e819d7b511..4f1a5c33eae 100644
--- a/src/include/access/gist_private.h
+++ b/src/include/access/gist_private.h
@@ -121,7 +121,7 @@ typedef struct GISTSearchHeapItem
 {
 	ItemPointerData heapPtr;
 	bool		recheck;		/* T if quals must be rechecked */
-	bool		recheckDistances;	/* T if distances must be rechecked */
+	bool		recheckDistances;		/* T if distances must be rechecked */
 	IndexTuple	ftup;			/* data fetched back from the index, used in
 								 * index-only scans */
 } GISTSearchHeapItem;
@@ -166,7 +166,7 @@ typedef struct GISTScanOpaqueData
 	OffsetNumber nPageData;		/* number of valid items in array */
 	OffsetNumber curPageData;	/* next item to return */
 	MemoryContext pageDataCxt;	/* context holding the fetched tuples, for
-								   index-only scans */
+								 * index-only scans */
 } GISTScanOpaqueData;
 
 typedef GISTScanOpaqueData *GISTScanOpaque;
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index fc3c7f4097e..93cc8afcebc 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -338,7 +338,7 @@ typedef struct HSpool HSpool;	/* opaque struct in hashsort.c */
 extern HSpool *_h_spoolinit(Relation heap, Relation index, uint32 num_buckets);
 extern void _h_spooldestroy(HSpool *hspool);
 extern void _h_spool(HSpool *hspool, ItemPointer self,
-					 Datum *values, bool *isnull);
+		 Datum *values, bool *isnull);
 extern void _h_indexbuild(HSpool *hspool);
 
 /* hashutil.c */
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index eec7c95b218..31139cbd0cc 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -118,7 +118,7 @@ extern HeapScanDesc heap_beginscan_sampling(Relation relation,
 						Snapshot snapshot, int nkeys, ScanKey key,
 						bool allow_strat, bool allow_pagemode);
 extern void heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk,
-		   BlockNumber endBlk);
+				   BlockNumber endBlk);
 extern void heapgetpage(HeapScanDesc scan, BlockNumber page);
 extern void heap_rescan(HeapScanDesc scan, ScanKey key);
 extern void heap_endscan(HeapScanDesc scan);
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index 80285acc3b6..55d483dfaf0 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -402,7 +402,7 @@ do { \
 
 #define HeapTupleHeaderGetSpeculativeToken(tup) \
 ( \
-	AssertMacro(HeapTupleHeaderIsSpeculative(tup)),	\
+	AssertMacro(HeapTupleHeaderIsSpeculative(tup)), \
 	ItemPointerGetBlockNumber(&(tup)->t_ctid) \
 )
 
diff --git a/src/include/access/multixact.h b/src/include/access/multixact.h
index 935328983e8..f1448fe0635 100644
--- a/src/include/access/multixact.h
+++ b/src/include/access/multixact.h
@@ -126,7 +126,7 @@ extern void MultiXactAdvanceNextMXact(MultiXactId minMulti,
 						  MultiXactOffset minMultiOffset);
 extern void MultiXactAdvanceOldest(MultiXactId oldestMulti, Oid oldestMultiDB);
 extern void MultiXactSetSafeTruncate(MultiXactId safeTruncateMulti);
-extern int MultiXactMemberFreezeThreshold(void);
+extern int	MultiXactMemberFreezeThreshold(void);
 
 extern void multixact_twophase_recover(TransactionId xid, uint16 info,
 						   void *recdata, uint32 len);
diff --git a/src/include/access/parallel.h b/src/include/access/parallel.h
index 5f23f18f43b..b029c1e8831 100644
--- a/src/include/access/parallel.h
+++ b/src/include/access/parallel.h
@@ -21,33 +21,33 @@
 #include "storage/shm_toc.h"
 #include "utils/elog.h"
 
-typedef void (*parallel_worker_main_type)(dsm_segment *seg, shm_toc *toc);
+typedef void (*parallel_worker_main_type) (dsm_segment *seg, shm_toc *toc);
 
 typedef struct ParallelWorkerInfo
 {
 	BackgroundWorkerHandle *bgwhandle;
 	shm_mq_handle *error_mqh;
-	int32 pid;
+	int32		pid;
 } ParallelWorkerInfo;
 
 typedef struct ParallelContext
 {
-	dlist_node node;
+	dlist_node	node;
 	SubTransactionId subid;
-	int nworkers;
+	int			nworkers;
 	parallel_worker_main_type entrypoint;
-	char *library_name;
-	char *function_name;
+	char	   *library_name;
+	char	   *function_name;
 	ErrorContextCallback *error_context_stack;
 	shm_toc_estimator estimator;
 	dsm_segment *seg;
-	void *private_memory;
-	shm_toc *toc;
+	void	   *private_memory;
+	shm_toc    *toc;
 	ParallelWorkerInfo *worker;
 } ParallelContext;
 
 extern bool ParallelMessagePending;
-extern int ParallelWorkerNumber;
+extern int	ParallelWorkerNumber;
 
 #define		IsParallelWorker()		(ParallelWorkerNumber >= 0)
 
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index 29f5b35b326..f2482e99d6c 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -38,8 +38,8 @@ typedef struct HeapScanDescData
 	/* state set up at initscan time */
 	BlockNumber rs_nblocks;		/* total number of blocks in rel */
 	BlockNumber rs_startblock;	/* block # to start at */
-	BlockNumber	rs_initblock;	/* block # to consider initial of rel */
-	BlockNumber	rs_numblocks;	/* number of blocks to scan */
+	BlockNumber rs_initblock;	/* block # to consider initial of rel */
+	BlockNumber rs_numblocks;	/* number of blocks to scan */
 	BufferAccessStrategy rs_strategy;	/* access strategy for reads */
 	bool		rs_syncscan;	/* report location to syncscan logic? */
 
diff --git a/src/include/access/stratnum.h b/src/include/access/stratnum.h
index a372be81e21..102f1fb94f6 100644
--- a/src/include/access/stratnum.h
+++ b/src/include/access/stratnum.h
@@ -72,4 +72,4 @@ typedef uint16 StrategyNumber;
 #define RTMaxStrategyNumber				27
 
 
-#endif		/* STRATNUM_H */
+#endif   /* STRATNUM_H */
diff --git a/src/include/access/tablesample.h b/src/include/access/tablesample.h
index 222fa8d5561..a02e93d3222 100644
--- a/src/include/access/tablesample.h
+++ b/src/include/access/tablesample.h
@@ -1,7 +1,7 @@
 /*-------------------------------------------------------------------------
  *
  * tablesample.h
- *        Public header file for TABLESAMPLE clause interface
+ *		  Public header file for TABLESAMPLE clause interface
  *
  *
  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
@@ -17,30 +17,31 @@
 #include "access/relscan.h"
 #include "executor/executor.h"
 
-typedef struct TableSampleDesc {
-	HeapScanDesc	heapScan;
-	TupleDesc		tupDesc;	/* Mostly useful for tsmexaminetuple */
+typedef struct TableSampleDesc
+{
+	HeapScanDesc heapScan;
+	TupleDesc	tupDesc;		/* Mostly useful for tsmexaminetuple */
 
-	void		   *tsmdata;	/* private method data */
+	void	   *tsmdata;		/* private method data */
 
 	/* These point to he function of the TABLESAMPLE Method. */
-	FmgrInfo		tsminit;
-	FmgrInfo		tsmnextblock;
-	FmgrInfo		tsmnexttuple;
-	FmgrInfo		tsmexaminetuple;
-	FmgrInfo		tsmreset;
-	FmgrInfo		tsmend;
+	FmgrInfo	tsminit;
+	FmgrInfo	tsmnextblock;
+	FmgrInfo	tsmnexttuple;
+	FmgrInfo	tsmexaminetuple;
+	FmgrInfo	tsmreset;
+	FmgrInfo	tsmend;
 } TableSampleDesc;
 
 
 extern TableSampleDesc *tablesample_init(SampleScanState *scanstate,
-										 TableSampleClause *tablesample);
+				 TableSampleClause *tablesample);
 extern HeapTuple tablesample_getnext(TableSampleDesc *desc);
 extern void tablesample_reset(TableSampleDesc *desc);
 extern void tablesample_end(TableSampleDesc *desc);
 extern HeapTuple tablesample_source_getnext(TableSampleDesc *desc);
 extern HeapTuple tablesample_source_gettup(TableSampleDesc *desc, ItemPointer tid,
-										   bool *visible);
+						  bool *visible);
 
 extern Datum tsm_system_init(PG_FUNCTION_ARGS);
 extern Datum tsm_system_nextblock(PG_FUNCTION_ARGS);
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
index a518a8613b3..cb1c2db4cf8 100644
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -204,7 +204,7 @@ typedef struct xl_xact_subxacts
 
 typedef struct xl_xact_relfilenodes
 {
-	int			nrels;		/* number of subtransaction XIDs */
+	int			nrels;			/* number of subtransaction XIDs */
 	RelFileNode xnodes[FLEXIBLE_ARRAY_MEMBER];
 } xl_xact_relfilenodes;
 #define MinSizeOfXactRelfilenodes offsetof(xl_xact_relfilenodes, xnodes)
@@ -262,23 +262,23 @@ typedef struct xl_xact_abort
  */
 typedef struct xl_xact_parsed_commit
 {
-	TimestampTz		xact_time;
+	TimestampTz xact_time;
 
-	uint32			xinfo;
+	uint32		xinfo;
 
-	Oid				dbId;		/* MyDatabaseId */
-	Oid				tsId;		/* MyDatabaseTableSpace */
+	Oid			dbId;			/* MyDatabaseId */
+	Oid			tsId;			/* MyDatabaseTableSpace */
 
-	int				nsubxacts;
-	TransactionId  *subxacts;
+	int			nsubxacts;
+	TransactionId *subxacts;
 
-	int				nrels;
-	RelFileNode	   *xnodes;
+	int			nrels;
+	RelFileNode *xnodes;
 
-	int				nmsgs;
+	int			nmsgs;
 	SharedInvalidationMessage *msgs;
 
-	TransactionId	twophase_xid;	/* only for 2PC */
+	TransactionId twophase_xid; /* only for 2PC */
 
 	XLogRecPtr	origin_lsn;
 	TimestampTz origin_timestamp;
@@ -286,16 +286,16 @@ typedef struct xl_xact_parsed_commit
 
 typedef struct xl_xact_parsed_abort
 {
-	TimestampTz		xact_time;
-	uint32			xinfo;
+	TimestampTz xact_time;
+	uint32		xinfo;
 
-	int				nsubxacts;
-	TransactionId  *subxacts;
+	int			nsubxacts;
+	TransactionId *subxacts;
 
-	int				nrels;
-	RelFileNode	   *xnodes;
+	int			nrels;
+	RelFileNode *xnodes;
 
-	TransactionId	twophase_xid;	/* only for 2PC */
+	TransactionId twophase_xid; /* only for 2PC */
 } xl_xact_parsed_abort;
 
 
@@ -356,16 +356,16 @@ extern void UnregisterSubXactCallback(SubXactCallback callback, void *arg);
 extern int	xactGetCommittedChildren(TransactionId **ptr);
 
 extern XLogRecPtr XactLogCommitRecord(TimestampTz commit_time,
-									  int nsubxacts, TransactionId *subxacts,
-									  int nrels, RelFileNode *rels,
-									  int nmsgs, SharedInvalidationMessage *msgs,
-									  bool relcacheInval, bool forceSync,
-									  TransactionId twophase_xid);
+					int nsubxacts, TransactionId *subxacts,
+					int nrels, RelFileNode *rels,
+					int nmsgs, SharedInvalidationMessage *msgs,
+					bool relcacheInval, bool forceSync,
+					TransactionId twophase_xid);
 
 extern XLogRecPtr XactLogAbortRecord(TimestampTz abort_time,
-									 int nsubxacts, TransactionId *subxacts,
-									 int nrels, RelFileNode *rels,
-									 TransactionId twophase_xid);
+				   int nsubxacts, TransactionId *subxacts,
+				   int nrels, RelFileNode *rels,
+				   TransactionId twophase_xid);
 extern void xact_redo(XLogReaderState *record);
 
 /* xactdesc.c */
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index 9567379f49d..33348083ebc 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -110,9 +110,9 @@ extern int	CheckPointSegments;
 /* Archive modes */
 typedef enum ArchiveMode
 {
-	ARCHIVE_MODE_OFF = 0,	/* disabled */
-	ARCHIVE_MODE_ON,		/* enabled while server is running normally */
-	ARCHIVE_MODE_ALWAYS		/* enabled always (even during recovery) */
+	ARCHIVE_MODE_OFF = 0,		/* disabled */
+	ARCHIVE_MODE_ON,			/* enabled while server is running normally */
+	ARCHIVE_MODE_ALWAYS			/* enabled always (even during recovery) */
 } ArchiveMode;
 extern int	XLogArchiveMode;
 
diff --git a/src/include/access/xloginsert.h b/src/include/access/xloginsert.h
index ac609298cc2..31b45ba1396 100644
--- a/src/include/access/xloginsert.h
+++ b/src/include/access/xloginsert.h
@@ -29,13 +29,14 @@
 /* flags for XLogRegisterBuffer */
 #define REGBUF_FORCE_IMAGE	0x01	/* force a full-page image */
 #define REGBUF_NO_IMAGE		0x02	/* don't take a full-page image */
-#define REGBUF_WILL_INIT	(0x04 | 0x02)	/* page will be re-initialized at
-									 * replay (implies NO_IMAGE) */
-#define REGBUF_STANDARD		0x08	/* page follows "standard" page layout,
-									 * (data between pd_lower and pd_upper
-									 * will be skipped) */
-#define REGBUF_KEEP_DATA	0x10	/* include data even if a full-page image
-									 * is taken */
+#define REGBUF_WILL_INIT	(0x04 | 0x02)		/* page will be re-initialized
+												 * at replay (implies
+												 * NO_IMAGE) */
+#define REGBUF_STANDARD		0x08/* page follows "standard" page layout, (data
+								 * between pd_lower and pd_upper will be
+								 * skipped) */
+#define REGBUF_KEEP_DATA	0x10/* include data even if a full-page image is
+								 * taken */
 
 /* prototypes for public functions in xloginsert.c: */
 extern void XLogBeginInsert(void);
diff --git a/src/include/access/xlogreader.h b/src/include/access/xlogreader.h
index 5164abec758..640f7e14b15 100644
--- a/src/include/access/xlogreader.h
+++ b/src/include/access/xlogreader.h
@@ -127,7 +127,7 @@ struct XLogReaderState
 	uint32		main_data_len;	/* main data portion's length */
 	uint32		main_data_bufsz;	/* allocated size of the buffer */
 
-	RepOriginId	record_origin;
+	RepOriginId record_origin;
 
 	/* information about blocks referenced by the record. */
 	DecodedBkpBlock blocks[XLR_MAX_BLOCK_ID + 1];
diff --git a/src/include/access/xlogrecord.h b/src/include/access/xlogrecord.h
index 7a049f0e979..4ef6c206d28 100644
--- a/src/include/access/xlogrecord.h
+++ b/src/include/access/xlogrecord.h
@@ -121,13 +121,13 @@ typedef struct XLogRecordBlockHeader
  */
 typedef struct XLogRecordBlockImageHeader
 {
-	uint16	length;	/* number of page image bytes */
-	uint16	hole_offset;	/* number of bytes before "hole" */
-	uint8		bimg_info;	/* flag bits, see below */
+	uint16		length;			/* number of page image bytes */
+	uint16		hole_offset;	/* number of bytes before "hole" */
+	uint8		bimg_info;		/* flag bits, see below */
 
 	/*
-	 * If BKPIMAGE_HAS_HOLE and BKPIMAGE_IS_COMPRESSED,
-	 * an XLogRecordBlockCompressHeader struct follows.
+	 * If BKPIMAGE_HAS_HOLE and BKPIMAGE_IS_COMPRESSED, an
+	 * XLogRecordBlockCompressHeader struct follows.
 	 */
 } XLogRecordBlockImageHeader;
 
@@ -136,7 +136,7 @@ typedef struct XLogRecordBlockImageHeader
 
 /* Information stored in bimg_info */
 #define BKPIMAGE_HAS_HOLE		0x01	/* page image has "hole" */
-#define BKPIMAGE_IS_COMPRESSED		0x02	/* page image is compressed */
+#define BKPIMAGE_IS_COMPRESSED		0x02		/* page image is compressed */
 
 /*
  * Extra header information used when page image has "hole" and
@@ -144,7 +144,7 @@ typedef struct XLogRecordBlockImageHeader
  */
 typedef struct XLogRecordBlockCompressHeader
 {
-	uint16	hole_length;	/* number of bytes in "hole" */
+	uint16		hole_length;	/* number of bytes in "hole" */
 } XLogRecordBlockCompressHeader;
 
 #define SizeOfXLogRecordBlockCompressHeader \
@@ -185,7 +185,7 @@ typedef struct XLogRecordDataHeaderShort
 {
 	uint8		id;				/* XLR_BLOCK_ID_DATA_SHORT */
 	uint8		data_length;	/* number of payload bytes */
-} XLogRecordDataHeaderShort;
+}	XLogRecordDataHeaderShort;
 
 #define SizeOfXLogRecordDataHeaderShort (sizeof(uint8) * 2)
 
@@ -193,7 +193,7 @@ typedef struct XLogRecordDataHeaderLong
 {
 	uint8		id;				/* XLR_BLOCK_ID_DATA_LONG */
 	/* followed by uint32 data_length, unaligned */
-} XLogRecordDataHeaderLong;
+}	XLogRecordDataHeaderLong;
 
 #define SizeOfXLogRecordDataHeaderLong (sizeof(uint8) + sizeof(uint32))
 
diff --git a/src/include/access/xlogutils.h b/src/include/access/xlogutils.h
index 3015ff9bccd..8cf51c7fd6d 100644
--- a/src/include/access/xlogutils.h
+++ b/src/include/access/xlogutils.h
@@ -26,11 +26,11 @@ extern void XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum,
 /* Result codes for XLogReadBufferForRedo[Extended] */
 typedef enum
 {
-	BLK_NEEDS_REDO,		/* changes from WAL record need to be applied */
-	BLK_DONE,			/* block is already up-to-date */
-	BLK_RESTORED,		/* block was restored from a full-page image */
-	BLK_NOTFOUND		/* block was not found (and hence does not need to be
-						 * replayed) */
+	BLK_NEEDS_REDO,				/* changes from WAL record need to be applied */
+	BLK_DONE,					/* block is already up-to-date */
+	BLK_RESTORED,				/* block was restored from a full-page image */
+	BLK_NOTFOUND				/* block was not found (and hence does not
+								 * need to be replayed) */
 } XLogRedoAction;
 
 extern XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record,
diff --git a/src/include/bootstrap/bootstrap.h b/src/include/bootstrap/bootstrap.h
index af9fc75a743..b88bb3e4d65 100644
--- a/src/include/bootstrap/bootstrap.h
+++ b/src/include/bootstrap/bootstrap.h
@@ -25,7 +25,7 @@
 
 #define BOOTCOL_NULL_AUTO			1
 #define BOOTCOL_NULL_FORCE_NULL		2
-#define BOOTCOL_NULL_FORCE_NOT_NULL	3
+#define BOOTCOL_NULL_FORCE_NOT_NULL 3
 
 extern Relation boot_reldesc;
 extern Form_pg_attribute attrtypes[MAXATTR];
diff --git a/src/include/catalog/binary_upgrade.h b/src/include/catalog/binary_upgrade.h
index 22388c3b703..efca09fa2df 100644
--- a/src/include/catalog/binary_upgrade.h
+++ b/src/include/catalog/binary_upgrade.h
@@ -17,7 +17,7 @@
 #include "catalog/pg_authid.h"
 
 /* pick a OID that will never be used for TOAST tables */
-#define OPTIONALLY_CREATE_TOAST_OID	BOOTSTRAP_SUPERUSERID
+#define OPTIONALLY_CREATE_TOAST_OID BOOTSTRAP_SUPERUSERID
 
 extern PGDLLIMPORT Oid binary_upgrade_next_pg_type_oid;
 extern PGDLLIMPORT Oid binary_upgrade_next_array_pg_type_oid;
diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h
index e961d37172c..8b3b28d954c 100644
--- a/src/include/catalog/index.h
+++ b/src/include/catalog/index.h
@@ -115,14 +115,14 @@ extern void validate_index(Oid heapId, Oid indexId, Snapshot snapshot);
 extern void index_set_state_flags(Oid indexId, IndexStateFlagsAction action);
 
 extern void reindex_index(Oid indexId, bool skip_constraint_checks,
-						  char relpersistence, int options);
+			  char relpersistence, int options);
 
 /* Flag bits for reindex_relation(): */
 #define REINDEX_REL_PROCESS_TOAST			0x01
 #define REINDEX_REL_SUPPRESS_INDEX_USE		0x02
 #define REINDEX_REL_CHECK_CONSTRAINTS		0x04
 #define REINDEX_REL_FORCE_INDEXES_UNLOGGED	0x08
-#define REINDEX_REL_FORCE_INDEXES_PERMANENT	0x10
+#define REINDEX_REL_FORCE_INDEXES_PERMANENT 0x10
 
 extern bool reindex_relation(Oid relid, int flags, int options);
 
diff --git a/src/include/catalog/indexing.h b/src/include/catalog/indexing.h
index 1c486c4b9cb..748aadde945 100644
--- a/src/include/catalog/indexing.h
+++ b/src/include/catalog/indexing.h
@@ -220,7 +220,7 @@ DECLARE_UNIQUE_INDEX(pg_tablespace_spcname_index, 2698, on pg_tablespace using b
 #define TablespaceNameIndexId  2698
 
 DECLARE_UNIQUE_INDEX(pg_transform_oid_index, 3574, on pg_transform using btree(oid oid_ops));
-#define TransformOidIndexId	3574
+#define TransformOidIndexId 3574
 DECLARE_UNIQUE_INDEX(pg_transform_type_lang_index, 3575, on pg_transform using btree(trftype oid_ops, trflang oid_ops));
 #define TransformTypeLangIndexId  3575
 
diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h
index 619b2f58bca..37808c03c6e 100644
--- a/src/include/catalog/objectaddress.h
+++ b/src/include/catalog/objectaddress.h
@@ -67,7 +67,7 @@ extern HeapTuple get_catalog_object_by_oid(Relation catalog,
 extern char *getObjectDescription(const ObjectAddress *object);
 extern char *getObjectDescriptionOids(Oid classid, Oid objid);
 
-extern int read_objtype_from_string(const char *objtype);
+extern int	read_objtype_from_string(const char *objtype);
 extern char *getObjectTypeDescription(const ObjectAddress *object);
 extern char *getObjectIdentity(const ObjectAddress *address);
 extern char *getObjectIdentityParts(const ObjectAddress *address,
diff --git a/src/include/catalog/opfam_internal.h b/src/include/catalog/opfam_internal.h
index f01dcbe3e31..32195a71310 100644
--- a/src/include/catalog/opfam_internal.h
+++ b/src/include/catalog/opfam_internal.h
@@ -25,4 +25,4 @@ typedef struct
 	Oid			sortfamily;		/* ordering operator's sort opfamily, or 0 */
 } OpFamilyMember;
 
-#endif		/* OPFAM_INTERNAL_H */
+#endif   /* OPFAM_INTERNAL_H */
diff --git a/src/include/catalog/pg_aggregate.h b/src/include/catalog/pg_aggregate.h
index b6b698841ce..dd6079fbe35 100644
--- a/src/include/catalog/pg_aggregate.h
+++ b/src/include/catalog/pg_aggregate.h
@@ -129,15 +129,15 @@ typedef FormData_pg_aggregate *Form_pg_aggregate;
 DATA(insert ( 2100	n 0 int8_avg_accum	numeric_poly_avg		int8_avg_accum	int8_avg_accum_inv	numeric_poly_avg	f f 0	2281	48	2281	48	_null_ _null_ ));
 DATA(insert ( 2101	n 0 int4_avg_accum	int8_avg		int4_avg_accum	int4_avg_accum_inv	int8_avg					f f 0	1016	0	1016	0	"{0,0}" "{0,0}" ));
 DATA(insert ( 2102	n 0 int2_avg_accum	int8_avg		int2_avg_accum	int2_avg_accum_inv	int8_avg					f f 0	1016	0	1016	0	"{0,0}" "{0,0}" ));
-DATA(insert ( 2103	n 0 numeric_avg_accum numeric_avg	numeric_avg_accum numeric_accum_inv numeric_avg 				f f 0	2281	128 2281	128 _null_ _null_ ));
+DATA(insert ( 2103	n 0 numeric_avg_accum numeric_avg	numeric_avg_accum numeric_accum_inv numeric_avg					f f 0	2281	128 2281	128 _null_ _null_ ));
 DATA(insert ( 2104	n 0 float4_accum	float8_avg		-				-				-								f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
 DATA(insert ( 2105	n 0 float8_accum	float8_avg		-				-				-								f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
 DATA(insert ( 2106	n 0 interval_accum	interval_avg	interval_accum	interval_accum_inv interval_avg					f f 0	1187	0	1187	0	"{0 second,0 second}" "{0 second,0 second}" ));
 
 /* sum */
-DATA(insert ( 2107	n 0 int8_avg_accum	numeric_poly_sum		int8_avg_accum	int8_avg_accum_inv numeric_poly_sum	f f 0	2281	48	2281	48	_null_ _null_ ));
-DATA(insert ( 2108	n 0 int4_sum		-				int4_avg_accum	int4_avg_accum_inv int2int4_sum 				f f 0	20		0	1016	0	_null_ "{0,0}" ));
-DATA(insert ( 2109	n 0 int2_sum		-				int2_avg_accum	int2_avg_accum_inv int2int4_sum 				f f 0	20		0	1016	0	_null_ "{0,0}" ));
+DATA(insert ( 2107	n 0 int8_avg_accum	numeric_poly_sum		int8_avg_accum	int8_avg_accum_inv numeric_poly_sum f f 0	2281	48	2281	48	_null_ _null_ ));
+DATA(insert ( 2108	n 0 int4_sum		-				int4_avg_accum	int4_avg_accum_inv int2int4_sum					f f 0	20		0	1016	0	_null_ "{0,0}" ));
+DATA(insert ( 2109	n 0 int2_sum		-				int2_avg_accum	int2_avg_accum_inv int2int4_sum					f f 0	20		0	1016	0	_null_ "{0,0}" ));
 DATA(insert ( 2110	n 0 float4pl		-				-				-				-								f f 0	700		0	0		0	_null_ _null_ ));
 DATA(insert ( 2111	n 0 float8pl		-				-				-				-								f f 0	701		0	0		0	_null_ _null_ ));
 DATA(insert ( 2112	n 0 cash_pl			-				cash_pl			cash_mi			-								f f 0	790		0	790		0	_null_ _null_ ));
@@ -195,7 +195,7 @@ DATA(insert ( 2147	n 0 int8inc_any		-				int8inc_any		int8dec_any		-				f f 0		2
 DATA(insert ( 2803	n 0 int8inc			-				int8inc			int8dec			-				f f 0		20		0	20		0	"0" "0" ));
 
 /* var_pop */
-DATA(insert ( 2718	n 0 int8_accum	numeric_var_pop		int8_accum		int8_accum_inv	numeric_var_pop 				f f 0	2281	128 2281	128 _null_ _null_ ));
+DATA(insert ( 2718	n 0 int8_accum	numeric_var_pop		int8_accum		int8_accum_inv	numeric_var_pop					f f 0	2281	128 2281	128 _null_ _null_ ));
 DATA(insert ( 2719	n 0 int4_accum	numeric_poly_var_pop		int4_accum		int4_accum_inv	numeric_poly_var_pop	f f 0	2281	48	2281	48	_null_ _null_ ));
 DATA(insert ( 2720	n 0 int2_accum	numeric_poly_var_pop		int2_accum		int2_accum_inv	numeric_poly_var_pop	f f 0	2281	48	2281	48	_null_ _null_ ));
 DATA(insert ( 2721	n 0 float4_accum	float8_var_pop	-				-				-								f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
@@ -204,38 +204,38 @@ DATA(insert ( 2723	n 0 numeric_accum	numeric_var_pop numeric_accum numeric_accum
 
 /* var_samp */
 DATA(insert ( 2641	n 0 int8_accum	numeric_var_samp	int8_accum		int8_accum_inv	numeric_var_samp				f f 0	2281	128 2281	128 _null_ _null_ ));
-DATA(insert ( 2642	n 0 int4_accum	numeric_poly_var_samp		int4_accum		int4_accum_inv	numeric_poly_var_samp	f f 0	2281	48 	2281	48	_null_ _null_ ));
-DATA(insert ( 2643	n 0 int2_accum	numeric_poly_var_samp		int2_accum		int2_accum_inv	numeric_poly_var_samp	f f 0	2281	48 	2281	48	_null_ _null_ ));
+DATA(insert ( 2642	n 0 int4_accum	numeric_poly_var_samp		int4_accum		int4_accum_inv	numeric_poly_var_samp	f f 0	2281	48	2281	48	_null_ _null_ ));
+DATA(insert ( 2643	n 0 int2_accum	numeric_poly_var_samp		int2_accum		int2_accum_inv	numeric_poly_var_samp	f f 0	2281	48	2281	48	_null_ _null_ ));
 DATA(insert ( 2644	n 0 float4_accum	float8_var_samp -				-				-								f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
 DATA(insert ( 2645	n 0 float8_accum	float8_var_samp -				-				-								f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
-DATA(insert ( 2646	n 0 numeric_accum	numeric_var_samp numeric_accum numeric_accum_inv numeric_var_samp 				f f 0	2281	128 2281	128 _null_ _null_ ));
+DATA(insert ( 2646	n 0 numeric_accum	numeric_var_samp numeric_accum numeric_accum_inv numeric_var_samp				f f 0	2281	128 2281	128 _null_ _null_ ));
 
 /* variance: historical Postgres syntax for var_samp */
 DATA(insert ( 2148	n 0 int8_accum	numeric_var_samp	int8_accum		int8_accum_inv	numeric_var_samp				f f 0	2281	128 2281	128 _null_ _null_ ));
-DATA(insert ( 2149	n 0 int4_accum	numeric_poly_var_samp		int4_accum		int4_accum_inv	numeric_poly_var_samp	f f 0	2281	48 	2281	48 	_null_ _null_ ));
-DATA(insert ( 2150	n 0 int2_accum	numeric_poly_var_samp		int2_accum		int2_accum_inv	numeric_poly_var_samp	f f 0	2281	48 	2281	48 	_null_ _null_ ));
+DATA(insert ( 2149	n 0 int4_accum	numeric_poly_var_samp		int4_accum		int4_accum_inv	numeric_poly_var_samp	f f 0	2281	48	2281	48	_null_ _null_ ));
+DATA(insert ( 2150	n 0 int2_accum	numeric_poly_var_samp		int2_accum		int2_accum_inv	numeric_poly_var_samp	f f 0	2281	48	2281	48	_null_ _null_ ));
 DATA(insert ( 2151	n 0 float4_accum	float8_var_samp -				-				-								f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
 DATA(insert ( 2152	n 0 float8_accum	float8_var_samp -				-				-								f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
 DATA(insert ( 2153	n 0 numeric_accum	numeric_var_samp numeric_accum numeric_accum_inv numeric_var_samp				f f 0	2281	128 2281	128 _null_ _null_ ));
 
 /* stddev_pop */
 DATA(insert ( 2724	n 0 int8_accum	numeric_stddev_pop	int8_accum	int8_accum_inv	numeric_stddev_pop					f f 0	2281	128 2281	128 _null_ _null_ ));
-DATA(insert ( 2725	n 0 int4_accum	numeric_poly_stddev_pop	int4_accum	int4_accum_inv	numeric_poly_stddev_pop	f f 0	2281	48	2281	48	_null_ _null_ ));
-DATA(insert ( 2726	n 0 int2_accum	numeric_poly_stddev_pop	int2_accum	int2_accum_inv	numeric_poly_stddev_pop	f f 0	2281	48	2281	48	_null_ _null_ ));
+DATA(insert ( 2725	n 0 int4_accum	numeric_poly_stddev_pop int4_accum	int4_accum_inv	numeric_poly_stddev_pop f f 0	2281	48	2281	48	_null_ _null_ ));
+DATA(insert ( 2726	n 0 int2_accum	numeric_poly_stddev_pop int2_accum	int2_accum_inv	numeric_poly_stddev_pop f f 0	2281	48	2281	48	_null_ _null_ ));
 DATA(insert ( 2727	n 0 float4_accum	float8_stddev_pop	-				-				-							f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
 DATA(insert ( 2728	n 0 float8_accum	float8_stddev_pop	-				-				-							f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
 DATA(insert ( 2729	n 0 numeric_accum	numeric_stddev_pop numeric_accum numeric_accum_inv numeric_stddev_pop			f f 0	2281	128 2281	128 _null_ _null_ ));
 
 /* stddev_samp */
 DATA(insert ( 2712	n 0 int8_accum	numeric_stddev_samp		int8_accum	int8_accum_inv	numeric_stddev_samp				f f 0	2281	128 2281	128 _null_ _null_ ));
-DATA(insert ( 2713	n 0 int4_accum	numeric_poly_stddev_samp	int4_accum	int4_accum_inv	numeric_poly_stddev_samp	f f 0	2281	48 	2281	48	_null_ _null_ ));
-DATA(insert ( 2714	n 0 int2_accum	numeric_poly_stddev_samp	int2_accum	int2_accum_inv	numeric_poly_stddev_samp	f f 0	2281	48 	2281	48	_null_ _null_ ));
+DATA(insert ( 2713	n 0 int4_accum	numeric_poly_stddev_samp	int4_accum	int4_accum_inv	numeric_poly_stddev_samp	f f 0	2281	48	2281	48	_null_ _null_ ));
+DATA(insert ( 2714	n 0 int2_accum	numeric_poly_stddev_samp	int2_accum	int2_accum_inv	numeric_poly_stddev_samp	f f 0	2281	48	2281	48	_null_ _null_ ));
 DATA(insert ( 2715	n 0 float4_accum	float8_stddev_samp	-				-				-							f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
 DATA(insert ( 2716	n 0 float8_accum	float8_stddev_samp	-				-				-							f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
 DATA(insert ( 2717	n 0 numeric_accum	numeric_stddev_samp numeric_accum numeric_accum_inv numeric_stddev_samp			f f 0	2281	128 2281	128 _null_ _null_ ));
 
 /* stddev: historical Postgres syntax for stddev_samp */
-DATA(insert ( 2154	n 0 int8_accum	numeric_stddev_samp		int8_accum	int8_accum_inv	numeric_stddev_samp 			f f 0	2281	128 2281	128 _null_ _null_ ));
+DATA(insert ( 2154	n 0 int8_accum	numeric_stddev_samp		int8_accum	int8_accum_inv	numeric_stddev_samp				f f 0	2281	128 2281	128 _null_ _null_ ));
 DATA(insert ( 2155	n 0 int4_accum	numeric_poly_stddev_samp	int4_accum	int4_accum_inv	numeric_poly_stddev_samp	f f 0	2281	48	2281	48	_null_ _null_ ));
 DATA(insert ( 2156	n 0 int2_accum	numeric_poly_stddev_samp	int2_accum	int2_accum_inv	numeric_poly_stddev_samp	f f 0	2281	48	2281	48	_null_ _null_ ));
 DATA(insert ( 2157	n 0 float4_accum	float8_stddev_samp	-				-				-							f f 0	1022	0	0		0	"{0,0,0}" _null_ ));
diff --git a/src/include/catalog/pg_amop.h b/src/include/catalog/pg_amop.h
index 657ec07059c..da5fe9d9474 100644
--- a/src/include/catalog/pg_amop.h
+++ b/src/include/catalog/pg_amop.h
@@ -849,271 +849,271 @@ DATA(insert (	3550	869 869 27 s	934 783 0 ));
 
 /* BRIN opclasses */
 /* minmax bytea */
-DATA(insert (   4064     17   17 1 s      1957    3580 0 ));
-DATA(insert (   4064     17   17 2 s      1958    3580 0 ));
-DATA(insert (   4064     17   17 3 s      1955    3580 0 ));
-DATA(insert (   4064     17   17 4 s      1960    3580 0 ));
-DATA(insert (   4064     17   17 5 s      1959    3580 0 ));
+DATA(insert (	4064	 17   17 1 s	  1957	  3580 0 ));
+DATA(insert (	4064	 17   17 2 s	  1958	  3580 0 ));
+DATA(insert (	4064	 17   17 3 s	  1955	  3580 0 ));
+DATA(insert (	4064	 17   17 4 s	  1960	  3580 0 ));
+DATA(insert (	4064	 17   17 5 s	  1959	  3580 0 ));
 /* minmax "char" */
-DATA(insert (   4062     18   18 1 s       631    3580 0 ));
-DATA(insert (   4062     18   18 2 s       632    3580 0 ));
-DATA(insert (   4062     18   18 3 s        92    3580 0 ));
-DATA(insert (   4062     18   18 4 s       634    3580 0 ));
-DATA(insert (   4062     18   18 5 s       633    3580 0 ));
+DATA(insert (	4062	 18   18 1 s	   631	  3580 0 ));
+DATA(insert (	4062	 18   18 2 s	   632	  3580 0 ));
+DATA(insert (	4062	 18   18 3 s		92	  3580 0 ));
+DATA(insert (	4062	 18   18 4 s	   634	  3580 0 ));
+DATA(insert (	4062	 18   18 5 s	   633	  3580 0 ));
 /* minmax name */
-DATA(insert (   4065     19   19 1 s       660    3580 0 ));
-DATA(insert (   4065     19   19 2 s       661    3580 0 ));
-DATA(insert (   4065     19   19 3 s        93    3580 0 ));
-DATA(insert (   4065     19   19 4 s       663    3580 0 ));
-DATA(insert (   4065     19   19 5 s       662    3580 0 ));
+DATA(insert (	4065	 19   19 1 s	   660	  3580 0 ));
+DATA(insert (	4065	 19   19 2 s	   661	  3580 0 ));
+DATA(insert (	4065	 19   19 3 s		93	  3580 0 ));
+DATA(insert (	4065	 19   19 4 s	   663	  3580 0 ));
+DATA(insert (	4065	 19   19 5 s	   662	  3580 0 ));
 /* minmax integer */
-DATA(insert (   4054     20   20 1 s       412    3580 0 ));
-DATA(insert (   4054     20   20 2 s       414    3580 0 ));
-DATA(insert (   4054     20   20 3 s       410    3580 0 ));
-DATA(insert (   4054     20   20 4 s       415    3580 0 ));
-DATA(insert (   4054     20   20 5 s       413    3580 0 ));
-DATA(insert (   4054     20   21 1 s      1870    3580 0 ));
-DATA(insert (   4054     20   21 2 s      1872    3580 0 ));
-DATA(insert (   4054     20   21 3 s      1868    3580 0 ));
-DATA(insert (   4054     20   21 4 s      1873    3580 0 ));
-DATA(insert (   4054     20   21 5 s      1871    3580 0 ));
-DATA(insert (   4054     20   23 1 s       418    3580 0 ));
-DATA(insert (   4054     20   23 2 s       420    3580 0 ));
-DATA(insert (   4054     20   23 3 s       416    3580 0 ));
-DATA(insert (   4054     20   23 4 s       430    3580 0 ));
-DATA(insert (   4054     20   23 5 s       419    3580 0 ));
-DATA(insert (   4054     21   21 1 s        95    3580 0 ));
-DATA(insert (   4054     21   21 2 s       522    3580 0 ));
-DATA(insert (   4054     21   21 3 s        94    3580 0 ));
-DATA(insert (   4054     21   21 4 s       524    3580 0 ));
-DATA(insert (   4054     21   21 5 s       520    3580 0 ));
-DATA(insert (   4054     21   20 1 s      1864    3580 0 ));
-DATA(insert (   4054     21   20 2 s      1866    3580 0 ));
-DATA(insert (   4054     21   20 3 s      1862    3580 0 ));
-DATA(insert (   4054     21   20 4 s      1867    3580 0 ));
-DATA(insert (   4054     21   20 5 s      1865    3580 0 ));
-DATA(insert (   4054     21   23 1 s       534    3580 0 ));
-DATA(insert (   4054     21   23 2 s       540    3580 0 ));
-DATA(insert (   4054     21   23 3 s       532    3580 0 ));
-DATA(insert (   4054     21   23 4 s       542    3580 0 ));
-DATA(insert (   4054     21   23 5 s       536    3580 0 ));
-DATA(insert (   4054     23   23 1 s        97    3580 0 ));
-DATA(insert (   4054     23   23 2 s       523    3580 0 ));
-DATA(insert (   4054     23   23 3 s        96    3580 0 ));
-DATA(insert (   4054     23   23 4 s       525    3580 0 ));
-DATA(insert (   4054     23   23 5 s       521    3580 0 ));
-DATA(insert (   4054     23   21 1 s       535    3580 0 ));
-DATA(insert (   4054     23   21 2 s       541    3580 0 ));
-DATA(insert (   4054     23   21 3 s       533    3580 0 ));
-DATA(insert (   4054     23   21 4 s       543    3580 0 ));
-DATA(insert (   4054     23   21 5 s       537    3580 0 ));
-DATA(insert (   4054     23   20 1 s        37    3580 0 ));
-DATA(insert (   4054     23   20 2 s        80    3580 0 ));
-DATA(insert (   4054     23   20 3 s        15    3580 0 ));
-DATA(insert (   4054     23   20 4 s        82    3580 0 ));
-DATA(insert (   4054     23   20 5 s        76    3580 0 ));
+DATA(insert (	4054	 20   20 1 s	   412	  3580 0 ));
+DATA(insert (	4054	 20   20 2 s	   414	  3580 0 ));
+DATA(insert (	4054	 20   20 3 s	   410	  3580 0 ));
+DATA(insert (	4054	 20   20 4 s	   415	  3580 0 ));
+DATA(insert (	4054	 20   20 5 s	   413	  3580 0 ));
+DATA(insert (	4054	 20   21 1 s	  1870	  3580 0 ));
+DATA(insert (	4054	 20   21 2 s	  1872	  3580 0 ));
+DATA(insert (	4054	 20   21 3 s	  1868	  3580 0 ));
+DATA(insert (	4054	 20   21 4 s	  1873	  3580 0 ));
+DATA(insert (	4054	 20   21 5 s	  1871	  3580 0 ));
+DATA(insert (	4054	 20   23 1 s	   418	  3580 0 ));
+DATA(insert (	4054	 20   23 2 s	   420	  3580 0 ));
+DATA(insert (	4054	 20   23 3 s	   416	  3580 0 ));
+DATA(insert (	4054	 20   23 4 s	   430	  3580 0 ));
+DATA(insert (	4054	 20   23 5 s	   419	  3580 0 ));
+DATA(insert (	4054	 21   21 1 s		95	  3580 0 ));
+DATA(insert (	4054	 21   21 2 s	   522	  3580 0 ));
+DATA(insert (	4054	 21   21 3 s		94	  3580 0 ));
+DATA(insert (	4054	 21   21 4 s	   524	  3580 0 ));
+DATA(insert (	4054	 21   21 5 s	   520	  3580 0 ));
+DATA(insert (	4054	 21   20 1 s	  1864	  3580 0 ));
+DATA(insert (	4054	 21   20 2 s	  1866	  3580 0 ));
+DATA(insert (	4054	 21   20 3 s	  1862	  3580 0 ));
+DATA(insert (	4054	 21   20 4 s	  1867	  3580 0 ));
+DATA(insert (	4054	 21   20 5 s	  1865	  3580 0 ));
+DATA(insert (	4054	 21   23 1 s	   534	  3580 0 ));
+DATA(insert (	4054	 21   23 2 s	   540	  3580 0 ));
+DATA(insert (	4054	 21   23 3 s	   532	  3580 0 ));
+DATA(insert (	4054	 21   23 4 s	   542	  3580 0 ));
+DATA(insert (	4054	 21   23 5 s	   536	  3580 0 ));
+DATA(insert (	4054	 23   23 1 s		97	  3580 0 ));
+DATA(insert (	4054	 23   23 2 s	   523	  3580 0 ));
+DATA(insert (	4054	 23   23 3 s		96	  3580 0 ));
+DATA(insert (	4054	 23   23 4 s	   525	  3580 0 ));
+DATA(insert (	4054	 23   23 5 s	   521	  3580 0 ));
+DATA(insert (	4054	 23   21 1 s	   535	  3580 0 ));
+DATA(insert (	4054	 23   21 2 s	   541	  3580 0 ));
+DATA(insert (	4054	 23   21 3 s	   533	  3580 0 ));
+DATA(insert (	4054	 23   21 4 s	   543	  3580 0 ));
+DATA(insert (	4054	 23   21 5 s	   537	  3580 0 ));
+DATA(insert (	4054	 23   20 1 s		37	  3580 0 ));
+DATA(insert (	4054	 23   20 2 s		80	  3580 0 ));
+DATA(insert (	4054	 23   20 3 s		15	  3580 0 ));
+DATA(insert (	4054	 23   20 4 s		82	  3580 0 ));
+DATA(insert (	4054	 23   20 5 s		76	  3580 0 ));
 
 /* minmax text */
-DATA(insert (   4056     25   25 1 s       664    3580 0 ));
-DATA(insert (   4056     25   25 2 s       665    3580 0 ));
-DATA(insert (   4056     25   25 3 s        98    3580 0 ));
-DATA(insert (   4056     25   25 4 s       667    3580 0 ));
-DATA(insert (   4056     25   25 5 s       666    3580 0 ));
+DATA(insert (	4056	 25   25 1 s	   664	  3580 0 ));
+DATA(insert (	4056	 25   25 2 s	   665	  3580 0 ));
+DATA(insert (	4056	 25   25 3 s		98	  3580 0 ));
+DATA(insert (	4056	 25   25 4 s	   667	  3580 0 ));
+DATA(insert (	4056	 25   25 5 s	   666	  3580 0 ));
 /* minmax oid */
-DATA(insert (   4068     26   26 1 s       609    3580 0 ));
-DATA(insert (   4068     26   26 2 s       611    3580 0 ));
-DATA(insert (   4068     26   26 3 s       607    3580 0 ));
-DATA(insert (   4068     26   26 4 s       612    3580 0 ));
-DATA(insert (   4068     26   26 5 s       610    3580 0 ));
+DATA(insert (	4068	 26   26 1 s	   609	  3580 0 ));
+DATA(insert (	4068	 26   26 2 s	   611	  3580 0 ));
+DATA(insert (	4068	 26   26 3 s	   607	  3580 0 ));
+DATA(insert (	4068	 26   26 4 s	   612	  3580 0 ));
+DATA(insert (	4068	 26   26 5 s	   610	  3580 0 ));
 /* minmax tid */
-DATA(insert (   4069     27   27 1 s      2799    3580 0 ));
-DATA(insert (   4069     27   27 2 s      2801    3580 0 ));
-DATA(insert (   4069     27   27 3 s       387    3580 0 ));
-DATA(insert (   4069     27   27 4 s      2802    3580 0 ));
-DATA(insert (   4069     27   27 5 s      2800    3580 0 ));
+DATA(insert (	4069	 27   27 1 s	  2799	  3580 0 ));
+DATA(insert (	4069	 27   27 2 s	  2801	  3580 0 ));
+DATA(insert (	4069	 27   27 3 s	   387	  3580 0 ));
+DATA(insert (	4069	 27   27 4 s	  2802	  3580 0 ));
+DATA(insert (	4069	 27   27 5 s	  2800	  3580 0 ));
 /* minmax float (float4, float8) */
-DATA(insert (   4070    700  700 1 s       622    3580 0 ));
-DATA(insert (   4070    700  700 2 s       624    3580 0 ));
-DATA(insert (   4070    700  700 3 s       620    3580 0 ));
-DATA(insert (   4070    700  700 4 s       625    3580 0 ));
-DATA(insert (   4070    700  700 5 s       623    3580 0 ));
-DATA(insert (   4070    700  701 1 s      1122    3580 0 ));
-DATA(insert (   4070    700  701 2 s      1124    3580 0 ));
-DATA(insert (   4070    700  701 3 s      1120    3580 0 ));
-DATA(insert (   4070    700  701 4 s      1125    3580 0 ));
-DATA(insert (   4070    700  701 5 s      1123    3580 0 ));
-DATA(insert (   4070    701  700 1 s      1132    3580 0 ));
-DATA(insert (   4070    701  700 2 s      1134    3580 0 ));
-DATA(insert (   4070    701  700 3 s      1130    3580 0 ));
-DATA(insert (   4070    701  700 4 s      1135    3580 0 ));
-DATA(insert (   4070    701  700 5 s      1133    3580 0 ));
-DATA(insert (   4070    701  701 1 s       672    3580 0 ));
-DATA(insert (   4070    701  701 2 s       673    3580 0 ));
-DATA(insert (   4070    701  701 3 s       670    3580 0 ));
-DATA(insert (   4070    701  701 4 s       675    3580 0 ));
-DATA(insert (   4070    701  701 5 s       674    3580 0 ));
+DATA(insert (	4070	700  700 1 s	   622	  3580 0 ));
+DATA(insert (	4070	700  700 2 s	   624	  3580 0 ));
+DATA(insert (	4070	700  700 3 s	   620	  3580 0 ));
+DATA(insert (	4070	700  700 4 s	   625	  3580 0 ));
+DATA(insert (	4070	700  700 5 s	   623	  3580 0 ));
+DATA(insert (	4070	700  701 1 s	  1122	  3580 0 ));
+DATA(insert (	4070	700  701 2 s	  1124	  3580 0 ));
+DATA(insert (	4070	700  701 3 s	  1120	  3580 0 ));
+DATA(insert (	4070	700  701 4 s	  1125	  3580 0 ));
+DATA(insert (	4070	700  701 5 s	  1123	  3580 0 ));
+DATA(insert (	4070	701  700 1 s	  1132	  3580 0 ));
+DATA(insert (	4070	701  700 2 s	  1134	  3580 0 ));
+DATA(insert (	4070	701  700 3 s	  1130	  3580 0 ));
+DATA(insert (	4070	701  700 4 s	  1135	  3580 0 ));
+DATA(insert (	4070	701  700 5 s	  1133	  3580 0 ));
+DATA(insert (	4070	701  701 1 s	   672	  3580 0 ));
+DATA(insert (	4070	701  701 2 s	   673	  3580 0 ));
+DATA(insert (	4070	701  701 3 s	   670	  3580 0 ));
+DATA(insert (	4070	701  701 4 s	   675	  3580 0 ));
+DATA(insert (	4070	701  701 5 s	   674	  3580 0 ));
 
 /* minmax abstime */
-DATA(insert (   4072    702  702 1 s       562    3580 0 ));
-DATA(insert (   4072    702  702 2 s       564    3580 0 ));
-DATA(insert (   4072    702  702 3 s       560    3580 0 ));
-DATA(insert (   4072    702  702 4 s       565    3580 0 ));
-DATA(insert (   4072    702  702 5 s       563    3580 0 ));
+DATA(insert (	4072	702  702 1 s	   562	  3580 0 ));
+DATA(insert (	4072	702  702 2 s	   564	  3580 0 ));
+DATA(insert (	4072	702  702 3 s	   560	  3580 0 ));
+DATA(insert (	4072	702  702 4 s	   565	  3580 0 ));
+DATA(insert (	4072	702  702 5 s	   563	  3580 0 ));
 /* minmax reltime */
-DATA(insert (   4073    703  703 1 s       568    3580 0 ));
-DATA(insert (   4073    703  703 2 s       570    3580 0 ));
-DATA(insert (   4073    703  703 3 s       566    3580 0 ));
-DATA(insert (   4073    703  703 4 s       571    3580 0 ));
-DATA(insert (   4073    703  703 5 s       569    3580 0 ));
+DATA(insert (	4073	703  703 1 s	   568	  3580 0 ));
+DATA(insert (	4073	703  703 2 s	   570	  3580 0 ));
+DATA(insert (	4073	703  703 3 s	   566	  3580 0 ));
+DATA(insert (	4073	703  703 4 s	   571	  3580 0 ));
+DATA(insert (	4073	703  703 5 s	   569	  3580 0 ));
 /* minmax macaddr */
-DATA(insert (   4074    829  829 1 s      1222    3580 0 ));
-DATA(insert (   4074    829  829 2 s      1223    3580 0 ));
-DATA(insert (   4074    829  829 3 s      1220    3580 0 ));
-DATA(insert (   4074    829  829 4 s      1225    3580 0 ));
-DATA(insert (   4074    829  829 5 s      1224    3580 0 ));
+DATA(insert (	4074	829  829 1 s	  1222	  3580 0 ));
+DATA(insert (	4074	829  829 2 s	  1223	  3580 0 ));
+DATA(insert (	4074	829  829 3 s	  1220	  3580 0 ));
+DATA(insert (	4074	829  829 4 s	  1225	  3580 0 ));
+DATA(insert (	4074	829  829 5 s	  1224	  3580 0 ));
 /* minmax inet */
-DATA(insert (   4075    869  869 1 s      1203    3580 0 ));
-DATA(insert (   4075    869  869 2 s      1204    3580 0 ));
-DATA(insert (   4075    869  869 3 s      1201    3580 0 ));
-DATA(insert (   4075    869  869 4 s      1206    3580 0 ));
-DATA(insert (   4075    869  869 5 s      1205    3580 0 ));
+DATA(insert (	4075	869  869 1 s	  1203	  3580 0 ));
+DATA(insert (	4075	869  869 2 s	  1204	  3580 0 ));
+DATA(insert (	4075	869  869 3 s	  1201	  3580 0 ));
+DATA(insert (	4075	869  869 4 s	  1206	  3580 0 ));
+DATA(insert (	4075	869  869 5 s	  1205	  3580 0 ));
 /* inclusion inet */
-DATA(insert (   4102    869  869 3 s      3552    3580 0 ));
-DATA(insert (   4102    869  869 7 s       934    3580 0 ));
-DATA(insert (   4102    869  869 8 s       932    3580 0 ));
-DATA(insert (   4102    869  869 18 s     1201    3580 0 ));
-DATA(insert (   4102    869  869 24 s      933    3580 0 ));
-DATA(insert (   4102    869  869 26 s      931    3580 0 ));
+DATA(insert (	4102	869  869 3 s	  3552	  3580 0 ));
+DATA(insert (	4102	869  869 7 s	   934	  3580 0 ));
+DATA(insert (	4102	869  869 8 s	   932	  3580 0 ));
+DATA(insert (	4102	869  869 18 s	  1201	  3580 0 ));
+DATA(insert (	4102	869  869 24 s	   933	  3580 0 ));
+DATA(insert (	4102	869  869 26 s	   931	  3580 0 ));
 /* minmax character */
-DATA(insert (   4076   1042 1042 1 s      1058    3580 0 ));
-DATA(insert (   4076   1042 1042 2 s      1059    3580 0 ));
-DATA(insert (   4076   1042 1042 3 s      1054    3580 0 ));
-DATA(insert (   4076   1042 1042 4 s      1061    3580 0 ));
-DATA(insert (   4076   1042 1042 5 s      1060    3580 0 ));
+DATA(insert (	4076   1042 1042 1 s	  1058	  3580 0 ));
+DATA(insert (	4076   1042 1042 2 s	  1059	  3580 0 ));
+DATA(insert (	4076   1042 1042 3 s	  1054	  3580 0 ));
+DATA(insert (	4076   1042 1042 4 s	  1061	  3580 0 ));
+DATA(insert (	4076   1042 1042 5 s	  1060	  3580 0 ));
 /* minmax time without time zone */
-DATA(insert (   4077   1083 1083 1 s      1110    3580 0 ));
-DATA(insert (   4077   1083 1083 2 s      1111    3580 0 ));
-DATA(insert (   4077   1083 1083 3 s      1108    3580 0 ));
-DATA(insert (   4077   1083 1083 4 s      1113    3580 0 ));
-DATA(insert (   4077   1083 1083 5 s      1112    3580 0 ));
+DATA(insert (	4077   1083 1083 1 s	  1110	  3580 0 ));
+DATA(insert (	4077   1083 1083 2 s	  1111	  3580 0 ));
+DATA(insert (	4077   1083 1083 3 s	  1108	  3580 0 ));
+DATA(insert (	4077   1083 1083 4 s	  1113	  3580 0 ));
+DATA(insert (	4077   1083 1083 5 s	  1112	  3580 0 ));
 /* minmax datetime (date, timestamp, timestamptz) */
-DATA(insert (   4059   1114 1114 1 s      2062    3580 0 ));
-DATA(insert (   4059   1114 1114 2 s      2063    3580 0 ));
-DATA(insert (   4059   1114 1114 3 s      2060    3580 0 ));
-DATA(insert (   4059   1114 1114 4 s      2065    3580 0 ));
-DATA(insert (   4059   1114 1114 5 s      2064    3580 0 ));
-DATA(insert (   4059   1114 1082 1 s      2371    3580 0 ));
-DATA(insert (   4059   1114 1082 2 s      2372    3580 0 ));
-DATA(insert (   4059   1114 1082 3 s      2373    3580 0 ));
-DATA(insert (   4059   1114 1082 4 s      2374    3580 0 ));
-DATA(insert (   4059   1114 1082 5 s      2375    3580 0 ));
-DATA(insert (   4059   1114 1184 1 s      2534    3580 0 ));
-DATA(insert (   4059   1114 1184 2 s      2535    3580 0 ));
-DATA(insert (   4059   1114 1184 3 s      2536    3580 0 ));
-DATA(insert (   4059   1114 1184 4 s      2537    3580 0 ));
-DATA(insert (   4059   1114 1184 5 s      2538    3580 0 ));
-DATA(insert (   4059   1082 1082 1 s      1095    3580 0 ));
-DATA(insert (   4059   1082 1082 2 s      1096    3580 0 ));
-DATA(insert (   4059   1082 1082 3 s      1093    3580 0 ));
-DATA(insert (   4059   1082 1082 4 s      1098    3580 0 ));
-DATA(insert (   4059   1082 1082 5 s      1097    3580 0 ));
-DATA(insert (   4059   1082 1114 1 s      2345    3580 0 ));
-DATA(insert (   4059   1082 1114 2 s      2346    3580 0 ));
-DATA(insert (   4059   1082 1114 3 s      2347    3580 0 ));
-DATA(insert (   4059   1082 1114 4 s      2348    3580 0 ));
-DATA(insert (   4059   1082 1114 5 s      2349    3580 0 ));
-DATA(insert (   4059   1082 1184 1 s      2358    3580 0 ));
-DATA(insert (   4059   1082 1184 2 s      2359    3580 0 ));
-DATA(insert (   4059   1082 1184 3 s      2360    3580 0 ));
-DATA(insert (   4059   1082 1184 4 s      2361    3580 0 ));
-DATA(insert (   4059   1082 1184 5 s      2362    3580 0 ));
-DATA(insert (   4059   1184 1082 1 s      2384    3580 0 ));
-DATA(insert (   4059   1184 1082 2 s      2385    3580 0 ));
-DATA(insert (   4059   1184 1082 3 s      2386    3580 0 ));
-DATA(insert (   4059   1184 1082 4 s      2387    3580 0 ));
-DATA(insert (   4059   1184 1082 5 s      2388    3580 0 ));
-DATA(insert (   4059   1184 1114 1 s      2540    3580 0 ));
-DATA(insert (   4059   1184 1114 2 s      2541    3580 0 ));
-DATA(insert (   4059   1184 1114 3 s      2542    3580 0 ));
-DATA(insert (   4059   1184 1114 4 s      2543    3580 0 ));
-DATA(insert (   4059   1184 1114 5 s      2544    3580 0 ));
-DATA(insert (   4059   1184 1184 1 s      1322    3580 0 ));
-DATA(insert (   4059   1184 1184 2 s      1323    3580 0 ));
-DATA(insert (   4059   1184 1184 3 s      1320    3580 0 ));
-DATA(insert (   4059   1184 1184 4 s      1325    3580 0 ));
-DATA(insert (   4059   1184 1184 5 s      1324    3580 0 ));
+DATA(insert (	4059   1114 1114 1 s	  2062	  3580 0 ));
+DATA(insert (	4059   1114 1114 2 s	  2063	  3580 0 ));
+DATA(insert (	4059   1114 1114 3 s	  2060	  3580 0 ));
+DATA(insert (	4059   1114 1114 4 s	  2065	  3580 0 ));
+DATA(insert (	4059   1114 1114 5 s	  2064	  3580 0 ));
+DATA(insert (	4059   1114 1082 1 s	  2371	  3580 0 ));
+DATA(insert (	4059   1114 1082 2 s	  2372	  3580 0 ));
+DATA(insert (	4059   1114 1082 3 s	  2373	  3580 0 ));
+DATA(insert (	4059   1114 1082 4 s	  2374	  3580 0 ));
+DATA(insert (	4059   1114 1082 5 s	  2375	  3580 0 ));
+DATA(insert (	4059   1114 1184 1 s	  2534	  3580 0 ));
+DATA(insert (	4059   1114 1184 2 s	  2535	  3580 0 ));
+DATA(insert (	4059   1114 1184 3 s	  2536	  3580 0 ));
+DATA(insert (	4059   1114 1184 4 s	  2537	  3580 0 ));
+DATA(insert (	4059   1114 1184 5 s	  2538	  3580 0 ));
+DATA(insert (	4059   1082 1082 1 s	  1095	  3580 0 ));
+DATA(insert (	4059   1082 1082 2 s	  1096	  3580 0 ));
+DATA(insert (	4059   1082 1082 3 s	  1093	  3580 0 ));
+DATA(insert (	4059   1082 1082 4 s	  1098	  3580 0 ));
+DATA(insert (	4059   1082 1082 5 s	  1097	  3580 0 ));
+DATA(insert (	4059   1082 1114 1 s	  2345	  3580 0 ));
+DATA(insert (	4059   1082 1114 2 s	  2346	  3580 0 ));
+DATA(insert (	4059   1082 1114 3 s	  2347	  3580 0 ));
+DATA(insert (	4059   1082 1114 4 s	  2348	  3580 0 ));
+DATA(insert (	4059   1082 1114 5 s	  2349	  3580 0 ));
+DATA(insert (	4059   1082 1184 1 s	  2358	  3580 0 ));
+DATA(insert (	4059   1082 1184 2 s	  2359	  3580 0 ));
+DATA(insert (	4059   1082 1184 3 s	  2360	  3580 0 ));
+DATA(insert (	4059   1082 1184 4 s	  2361	  3580 0 ));
+DATA(insert (	4059   1082 1184 5 s	  2362	  3580 0 ));
+DATA(insert (	4059   1184 1082 1 s	  2384	  3580 0 ));
+DATA(insert (	4059   1184 1082 2 s	  2385	  3580 0 ));
+DATA(insert (	4059   1184 1082 3 s	  2386	  3580 0 ));
+DATA(insert (	4059   1184 1082 4 s	  2387	  3580 0 ));
+DATA(insert (	4059   1184 1082 5 s	  2388	  3580 0 ));
+DATA(insert (	4059   1184 1114 1 s	  2540	  3580 0 ));
+DATA(insert (	4059   1184 1114 2 s	  2541	  3580 0 ));
+DATA(insert (	4059   1184 1114 3 s	  2542	  3580 0 ));
+DATA(insert (	4059   1184 1114 4 s	  2543	  3580 0 ));
+DATA(insert (	4059   1184 1114 5 s	  2544	  3580 0 ));
+DATA(insert (	4059   1184 1184 1 s	  1322	  3580 0 ));
+DATA(insert (	4059   1184 1184 2 s	  1323	  3580 0 ));
+DATA(insert (	4059   1184 1184 3 s	  1320	  3580 0 ));
+DATA(insert (	4059   1184 1184 4 s	  1325	  3580 0 ));
+DATA(insert (	4059   1184 1184 5 s	  1324	  3580 0 ));
 
 /* minmax interval */
-DATA(insert (   4078   1186 1186 1 s      1332    3580 0 ));
-DATA(insert (   4078   1186 1186 2 s      1333    3580 0 ));
-DATA(insert (   4078   1186 1186 3 s      1330    3580 0 ));
-DATA(insert (   4078   1186 1186 4 s      1335    3580 0 ));
-DATA(insert (   4078   1186 1186 5 s      1334    3580 0 ));
+DATA(insert (	4078   1186 1186 1 s	  1332	  3580 0 ));
+DATA(insert (	4078   1186 1186 2 s	  1333	  3580 0 ));
+DATA(insert (	4078   1186 1186 3 s	  1330	  3580 0 ));
+DATA(insert (	4078   1186 1186 4 s	  1335	  3580 0 ));
+DATA(insert (	4078   1186 1186 5 s	  1334	  3580 0 ));
 /* minmax time with time zone */
-DATA(insert (   4058   1266 1266 1 s      1552    3580 0 ));
-DATA(insert (   4058   1266 1266 2 s      1553    3580 0 ));
-DATA(insert (   4058   1266 1266 3 s      1550    3580 0 ));
-DATA(insert (   4058   1266 1266 4 s      1555    3580 0 ));
-DATA(insert (   4058   1266 1266 5 s      1554    3580 0 ));
+DATA(insert (	4058   1266 1266 1 s	  1552	  3580 0 ));
+DATA(insert (	4058   1266 1266 2 s	  1553	  3580 0 ));
+DATA(insert (	4058   1266 1266 3 s	  1550	  3580 0 ));
+DATA(insert (	4058   1266 1266 4 s	  1555	  3580 0 ));
+DATA(insert (	4058   1266 1266 5 s	  1554	  3580 0 ));
 /* minmax bit */
-DATA(insert (   4079   1560 1560 1 s      1786    3580 0 ));
-DATA(insert (   4079   1560 1560 2 s      1788    3580 0 ));
-DATA(insert (   4079   1560 1560 3 s      1784    3580 0 ));
-DATA(insert (   4079   1560 1560 4 s      1789    3580 0 ));
-DATA(insert (   4079   1560 1560 5 s      1787    3580 0 ));
+DATA(insert (	4079   1560 1560 1 s	  1786	  3580 0 ));
+DATA(insert (	4079   1560 1560 2 s	  1788	  3580 0 ));
+DATA(insert (	4079   1560 1560 3 s	  1784	  3580 0 ));
+DATA(insert (	4079   1560 1560 4 s	  1789	  3580 0 ));
+DATA(insert (	4079   1560 1560 5 s	  1787	  3580 0 ));
 /* minmax bit varying */
-DATA(insert (   4080   1562 1562 1 s      1806    3580 0 ));
-DATA(insert (   4080   1562 1562 2 s      1808    3580 0 ));
-DATA(insert (   4080   1562 1562 3 s      1804    3580 0 ));
-DATA(insert (   4080   1562 1562 4 s      1809    3580 0 ));
-DATA(insert (   4080   1562 1562 5 s      1807    3580 0 ));
+DATA(insert (	4080   1562 1562 1 s	  1806	  3580 0 ));
+DATA(insert (	4080   1562 1562 2 s	  1808	  3580 0 ));
+DATA(insert (	4080   1562 1562 3 s	  1804	  3580 0 ));
+DATA(insert (	4080   1562 1562 4 s	  1809	  3580 0 ));
+DATA(insert (	4080   1562 1562 5 s	  1807	  3580 0 ));
 /* minmax numeric */
-DATA(insert (   4055   1700 1700 1 s      1754    3580 0 ));
-DATA(insert (   4055   1700 1700 2 s      1755    3580 0 ));
-DATA(insert (   4055   1700 1700 3 s      1752    3580 0 ));
-DATA(insert (   4055   1700 1700 4 s      1757    3580 0 ));
-DATA(insert (   4055   1700 1700 5 s      1756    3580 0 ));
+DATA(insert (	4055   1700 1700 1 s	  1754	  3580 0 ));
+DATA(insert (	4055   1700 1700 2 s	  1755	  3580 0 ));
+DATA(insert (	4055   1700 1700 3 s	  1752	  3580 0 ));
+DATA(insert (	4055   1700 1700 4 s	  1757	  3580 0 ));
+DATA(insert (	4055   1700 1700 5 s	  1756	  3580 0 ));
 /* minmax uuid */
-DATA(insert (   4081   2950 2950 1 s      2974    3580 0 ));
-DATA(insert (   4081   2950 2950 2 s      2976    3580 0 ));
-DATA(insert (   4081   2950 2950 3 s      2972    3580 0 ));
-DATA(insert (   4081   2950 2950 4 s      2977    3580 0 ));
-DATA(insert (   4081   2950 2950 5 s      2975    3580 0 ));
+DATA(insert (	4081   2950 2950 1 s	  2974	  3580 0 ));
+DATA(insert (	4081   2950 2950 2 s	  2976	  3580 0 ));
+DATA(insert (	4081   2950 2950 3 s	  2972	  3580 0 ));
+DATA(insert (	4081   2950 2950 4 s	  2977	  3580 0 ));
+DATA(insert (	4081   2950 2950 5 s	  2975	  3580 0 ));
 /* inclusion range types */
-DATA(insert (   4103   3831 3831  1 s     3893    3580 0 ));
-DATA(insert (   4103   3831 3831  2 s     3895    3580 0 ));
-DATA(insert (   4103   3831 3831  3 s     3888    3580 0 ));
-DATA(insert (   4103   3831 3831  4 s     3896    3580 0 ));
-DATA(insert (   4103   3831 3831  5 s     3894    3580 0 ));
-DATA(insert (   4103   3831 3831  7 s     3890    3580 0 ));
-DATA(insert (   4103   3831 3831  8 s     3892    3580 0 ));
-DATA(insert (   4103   3831 2283 16 s     3889    3580 0 ));
-DATA(insert (   4103   3831 3831 17 s     3897    3580 0 ));
-DATA(insert (   4103   3831 3831 18 s     3882    3580 0 ));
-DATA(insert (   4103   3831 3831 20 s     3884    3580 0 ));
-DATA(insert (   4103   3831 3831 21 s     3885    3580 0 ));
-DATA(insert (   4103   3831 3831 22 s     3887    3580 0 ));
-DATA(insert (   4103   3831 3831 23 s     3886    3580 0 ));
+DATA(insert (	4103   3831 3831  1 s	  3893	  3580 0 ));
+DATA(insert (	4103   3831 3831  2 s	  3895	  3580 0 ));
+DATA(insert (	4103   3831 3831  3 s	  3888	  3580 0 ));
+DATA(insert (	4103   3831 3831  4 s	  3896	  3580 0 ));
+DATA(insert (	4103   3831 3831  5 s	  3894	  3580 0 ));
+DATA(insert (	4103   3831 3831  7 s	  3890	  3580 0 ));
+DATA(insert (	4103   3831 3831  8 s	  3892	  3580 0 ));
+DATA(insert (	4103   3831 2283 16 s	  3889	  3580 0 ));
+DATA(insert (	4103   3831 3831 17 s	  3897	  3580 0 ));
+DATA(insert (	4103   3831 3831 18 s	  3882	  3580 0 ));
+DATA(insert (	4103   3831 3831 20 s	  3884	  3580 0 ));
+DATA(insert (	4103   3831 3831 21 s	  3885	  3580 0 ));
+DATA(insert (	4103   3831 3831 22 s	  3887	  3580 0 ));
+DATA(insert (	4103   3831 3831 23 s	  3886	  3580 0 ));
 /* minmax pg_lsn */
-DATA(insert (   4082   3220 3220 1 s      3224    3580 0 ));
-DATA(insert (   4082   3220 3220 2 s      3226    3580 0 ));
-DATA(insert (   4082   3220 3220 3 s      3222    3580 0 ));
-DATA(insert (   4082   3220 3220 4 s      3227    3580 0 ));
-DATA(insert (   4082   3220 3220 5 s      3225    3580 0 ));
+DATA(insert (	4082   3220 3220 1 s	  3224	  3580 0 ));
+DATA(insert (	4082   3220 3220 2 s	  3226	  3580 0 ));
+DATA(insert (	4082   3220 3220 3 s	  3222	  3580 0 ));
+DATA(insert (	4082   3220 3220 4 s	  3227	  3580 0 ));
+DATA(insert (	4082   3220 3220 5 s	  3225	  3580 0 ));
 /* inclusion box */
-DATA(insert (	4104    603	 603  1 s	   493	  3580 0 ));
-DATA(insert (	4104    603  603  2 s	   494	  3580 0 ));
-DATA(insert (	4104    603  603  3 s	   500	  3580 0 ));
-DATA(insert (	4104    603  603  4 s	   495	  3580 0 ));
-DATA(insert (	4104    603  603  5 s	   496	  3580 0 ));
-DATA(insert (	4104    603  603  6 s	   499	  3580 0 ));
-DATA(insert (	4104    603  603  7 s	   498	  3580 0 ));
-DATA(insert (	4104    603  603  8 s	   497	  3580 0 ));
-DATA(insert (	4104    603  603  9 s	  2571	  3580 0 ));
-DATA(insert (	4104    603  603 10 s 	  2570	  3580 0 ));
-DATA(insert (	4104    603  603 11 s 	  2573	  3580 0 ));
-DATA(insert (	4104    603  603 12 s 	  2572	  3580 0 ));
+DATA(insert (	4104	603  603  1 s	   493	  3580 0 ));
+DATA(insert (	4104	603  603  2 s	   494	  3580 0 ));
+DATA(insert (	4104	603  603  3 s	   500	  3580 0 ));
+DATA(insert (	4104	603  603  4 s	   495	  3580 0 ));
+DATA(insert (	4104	603  603  5 s	   496	  3580 0 ));
+DATA(insert (	4104	603  603  6 s	   499	  3580 0 ));
+DATA(insert (	4104	603  603  7 s	   498	  3580 0 ));
+DATA(insert (	4104	603  603  8 s	   497	  3580 0 ));
+DATA(insert (	4104	603  603  9 s	  2571	  3580 0 ));
+DATA(insert (	4104	603  603 10 s	  2570	  3580 0 ));
+DATA(insert (	4104	603  603 11 s	  2573	  3580 0 ));
+DATA(insert (	4104	603  603 12 s	  2572	  3580 0 ));
 /* we could, but choose not to, supply entries for strategies 13 and 14 */
-DATA(insert (	4104    603  600  7 s	   433	  3580 0 ));
+DATA(insert (	4104	603  600  7 s	   433	  3580 0 ));
 
 #endif   /* PG_AMOP_H */
diff --git a/src/include/catalog/pg_amproc.h b/src/include/catalog/pg_amproc.h
index f22e9a61ef6..b57d6e65ca2 100644
--- a/src/include/catalog/pg_amproc.h
+++ b/src/include/catalog/pg_amproc.h
@@ -441,223 +441,223 @@ DATA(insert (	4017   25 25 5 4031 ));
 
 /* BRIN opclasses */
 /* minmax bytea */
-DATA(insert (   4064    17    17  1  3383 ));
-DATA(insert (   4064    17    17  2  3384 ));
-DATA(insert (   4064    17    17  3  3385 ));
-DATA(insert (   4064    17    17  4  3386 ));
+DATA(insert (	4064	17	  17  1  3383 ));
+DATA(insert (	4064	17	  17  2  3384 ));
+DATA(insert (	4064	17	  17  3  3385 ));
+DATA(insert (	4064	17	  17  4  3386 ));
 /* minmax "char" */
-DATA(insert (   4062    18    18  1  3383 ));
-DATA(insert (   4062    18    18  2  3384 ));
-DATA(insert (   4062    18    18  3  3385 ));
-DATA(insert (   4062    18    18  4  3386 ));
+DATA(insert (	4062	18	  18  1  3383 ));
+DATA(insert (	4062	18	  18  2  3384 ));
+DATA(insert (	4062	18	  18  3  3385 ));
+DATA(insert (	4062	18	  18  4  3386 ));
 /* minmax name */
-DATA(insert (   4065    19    19  1  3383 ));
-DATA(insert (   4065    19    19  2  3384 ));
-DATA(insert (   4065    19    19  3  3385 ));
-DATA(insert (   4065    19    19  4  3386 ));
+DATA(insert (	4065	19	  19  1  3383 ));
+DATA(insert (	4065	19	  19  2  3384 ));
+DATA(insert (	4065	19	  19  3  3385 ));
+DATA(insert (	4065	19	  19  4  3386 ));
 /* minmax integer: int2, int4, int8 */
-DATA(insert (   4054    20    20  1  3383 ));
-DATA(insert (   4054    20    20  2  3384 ));
-DATA(insert (   4054    20    20  3  3385 ));
-DATA(insert (   4054    20    20  4  3386 ));
-DATA(insert (   4054    20    21  1  3383 ));
-DATA(insert (   4054    20    21  2  3384 ));
-DATA(insert (   4054    20    21  3  3385 ));
-DATA(insert (   4054    20    21  4  3386 ));
-DATA(insert (   4054    20    23  1  3383 ));
-DATA(insert (   4054    20    23  2  3384 ));
-DATA(insert (   4054    20    23  3  3385 ));
-DATA(insert (   4054    20    23  4  3386 ));
+DATA(insert (	4054	20	  20  1  3383 ));
+DATA(insert (	4054	20	  20  2  3384 ));
+DATA(insert (	4054	20	  20  3  3385 ));
+DATA(insert (	4054	20	  20  4  3386 ));
+DATA(insert (	4054	20	  21  1  3383 ));
+DATA(insert (	4054	20	  21  2  3384 ));
+DATA(insert (	4054	20	  21  3  3385 ));
+DATA(insert (	4054	20	  21  4  3386 ));
+DATA(insert (	4054	20	  23  1  3383 ));
+DATA(insert (	4054	20	  23  2  3384 ));
+DATA(insert (	4054	20	  23  3  3385 ));
+DATA(insert (	4054	20	  23  4  3386 ));
 
-DATA(insert (   4054    21    21  1  3383 ));
-DATA(insert (   4054    21    21  2  3384 ));
-DATA(insert (   4054    21    21  3  3385 ));
-DATA(insert (   4054    21    21  4  3386 ));
-DATA(insert (   4054    21    20  1  3383 ));
-DATA(insert (   4054    21    20  2  3384 ));
-DATA(insert (   4054    21    20  3  3385 ));
-DATA(insert (   4054    21    20  4  3386 ));
-DATA(insert (   4054    21    23  1  3383 ));
-DATA(insert (   4054    21    23  2  3384 ));
-DATA(insert (   4054    21    23  3  3385 ));
-DATA(insert (   4054    21    23  4  3386 ));
+DATA(insert (	4054	21	  21  1  3383 ));
+DATA(insert (	4054	21	  21  2  3384 ));
+DATA(insert (	4054	21	  21  3  3385 ));
+DATA(insert (	4054	21	  21  4  3386 ));
+DATA(insert (	4054	21	  20  1  3383 ));
+DATA(insert (	4054	21	  20  2  3384 ));
+DATA(insert (	4054	21	  20  3  3385 ));
+DATA(insert (	4054	21	  20  4  3386 ));
+DATA(insert (	4054	21	  23  1  3383 ));
+DATA(insert (	4054	21	  23  2  3384 ));
+DATA(insert (	4054	21	  23  3  3385 ));
+DATA(insert (	4054	21	  23  4  3386 ));
 
-DATA(insert (   4054    23    23  1  3383 ));
-DATA(insert (   4054    23    23  2  3384 ));
-DATA(insert (   4054    23    23  3  3385 ));
-DATA(insert (   4054    23    23  4  3386 ));
-DATA(insert (   4054    23    20  1  3383 ));
-DATA(insert (   4054    23    20  2  3384 ));
-DATA(insert (   4054    23    20  3  3385 ));
-DATA(insert (   4054    23    20  4  3386 ));
-DATA(insert (   4054    23    21  1  3383 ));
-DATA(insert (   4054    23    21  2  3384 ));
-DATA(insert (   4054    23    21  3  3385 ));
-DATA(insert (   4054    23    21  4  3386 ));
+DATA(insert (	4054	23	  23  1  3383 ));
+DATA(insert (	4054	23	  23  2  3384 ));
+DATA(insert (	4054	23	  23  3  3385 ));
+DATA(insert (	4054	23	  23  4  3386 ));
+DATA(insert (	4054	23	  20  1  3383 ));
+DATA(insert (	4054	23	  20  2  3384 ));
+DATA(insert (	4054	23	  20  3  3385 ));
+DATA(insert (	4054	23	  20  4  3386 ));
+DATA(insert (	4054	23	  21  1  3383 ));
+DATA(insert (	4054	23	  21  2  3384 ));
+DATA(insert (	4054	23	  21  3  3385 ));
+DATA(insert (	4054	23	  21  4  3386 ));
 
 /* minmax text */
-DATA(insert (   4056    25    25  1  3383 ));
-DATA(insert (   4056    25    25  2  3384 ));
-DATA(insert (   4056    25    25  3  3385 ));
-DATA(insert (   4056    25    25  4  3386 ));
+DATA(insert (	4056	25	  25  1  3383 ));
+DATA(insert (	4056	25	  25  2  3384 ));
+DATA(insert (	4056	25	  25  3  3385 ));
+DATA(insert (	4056	25	  25  4  3386 ));
 /* minmax oid */
-DATA(insert (   4068    26    26  1  3383 ));
-DATA(insert (   4068    26    26  2  3384 ));
-DATA(insert (   4068    26    26  3  3385 ));
-DATA(insert (   4068    26    26  4  3386 ));
+DATA(insert (	4068	26	  26  1  3383 ));
+DATA(insert (	4068	26	  26  2  3384 ));
+DATA(insert (	4068	26	  26  3  3385 ));
+DATA(insert (	4068	26	  26  4  3386 ));
 /* minmax tid */
-DATA(insert (   4069    27    27  1  3383 ));
-DATA(insert (   4069    27    27  2  3384 ));
-DATA(insert (   4069    27    27  3  3385 ));
-DATA(insert (   4069    27    27  4  3386 ));
+DATA(insert (	4069	27	  27  1  3383 ));
+DATA(insert (	4069	27	  27  2  3384 ));
+DATA(insert (	4069	27	  27  3  3385 ));
+DATA(insert (	4069	27	  27  4  3386 ));
 /* minmax float */
-DATA(insert (   4070   700   700  1  3383 ));
-DATA(insert (   4070   700   700  2  3384 ));
-DATA(insert (   4070   700   700  3  3385 ));
-DATA(insert (   4070   700   700  4  3386 ));
+DATA(insert (	4070   700	 700  1  3383 ));
+DATA(insert (	4070   700	 700  2  3384 ));
+DATA(insert (	4070   700	 700  3  3385 ));
+DATA(insert (	4070   700	 700  4  3386 ));
 
-DATA(insert (   4070   700   701  1  3383 ));
-DATA(insert (   4070   700   701  2  3384 ));
-DATA(insert (   4070   700   701  3  3385 ));
-DATA(insert (   4070   700   701  4  3386 ));
+DATA(insert (	4070   700	 701  1  3383 ));
+DATA(insert (	4070   700	 701  2  3384 ));
+DATA(insert (	4070   700	 701  3  3385 ));
+DATA(insert (	4070   700	 701  4  3386 ));
 
-DATA(insert (   4070   701   701  1  3383 ));
-DATA(insert (   4070   701   701  2  3384 ));
-DATA(insert (   4070   701   701  3  3385 ));
-DATA(insert (   4070   701   701  4  3386 ));
+DATA(insert (	4070   701	 701  1  3383 ));
+DATA(insert (	4070   701	 701  2  3384 ));
+DATA(insert (	4070   701	 701  3  3385 ));
+DATA(insert (	4070   701	 701  4  3386 ));
 
-DATA(insert (   4070   701   700  1  3383 ));
-DATA(insert (   4070   701   700  2  3384 ));
-DATA(insert (   4070   701   700  3  3385 ));
-DATA(insert (   4070   701   700  4  3386 ));
+DATA(insert (	4070   701	 700  1  3383 ));
+DATA(insert (	4070   701	 700  2  3384 ));
+DATA(insert (	4070   701	 700  3  3385 ));
+DATA(insert (	4070   701	 700  4  3386 ));
 
 /* minmax abstime */
-DATA(insert (   4072   702   702  1  3383 ));
-DATA(insert (   4072   702   702  2  3384 ));
-DATA(insert (   4072   702   702  3  3385 ));
-DATA(insert (   4072   702   702  4  3386 ));
+DATA(insert (	4072   702	 702  1  3383 ));
+DATA(insert (	4072   702	 702  2  3384 ));
+DATA(insert (	4072   702	 702  3  3385 ));
+DATA(insert (	4072   702	 702  4  3386 ));
 /* minmax reltime */
-DATA(insert (   4073   703   703  1  3383 ));
-DATA(insert (   4073   703   703  2  3384 ));
-DATA(insert (   4073   703   703  3  3385 ));
-DATA(insert (   4073   703   703  4  3386 ));
+DATA(insert (	4073   703	 703  1  3383 ));
+DATA(insert (	4073   703	 703  2  3384 ));
+DATA(insert (	4073   703	 703  3  3385 ));
+DATA(insert (	4073   703	 703  4  3386 ));
 /* minmax macaddr */
-DATA(insert (   4074   829   829  1  3383 ));
-DATA(insert (   4074   829   829  2  3384 ));
-DATA(insert (   4074   829   829  3  3385 ));
-DATA(insert (   4074   829   829  4  3386 ));
+DATA(insert (	4074   829	 829  1  3383 ));
+DATA(insert (	4074   829	 829  2  3384 ));
+DATA(insert (	4074   829	 829  3  3385 ));
+DATA(insert (	4074   829	 829  4  3386 ));
 /* minmax inet */
-DATA(insert (   4075   869   869  1  3383 ));
-DATA(insert (   4075   869   869  2  3384 ));
-DATA(insert (   4075   869   869  3  3385 ));
-DATA(insert (   4075   869   869  4  3386 ));
+DATA(insert (	4075   869	 869  1  3383 ));
+DATA(insert (	4075   869	 869  2  3384 ));
+DATA(insert (	4075   869	 869  3  3385 ));
+DATA(insert (	4075   869	 869  4  3386 ));
 /* inclusion inet */
-DATA(insert (   4102   869   869  1  4105 ));
-DATA(insert (   4102   869   869  2  4106 ));
-DATA(insert (   4102   869   869  3  4107 ));
-DATA(insert (   4102   869   869  4  4108 ));
-DATA(insert (   4102   869   869 11  4063 ));
-DATA(insert (   4102   869   869 12  4071 ));
-DATA(insert (   4102   869   869 13   930 ));
+DATA(insert (	4102   869	 869  1  4105 ));
+DATA(insert (	4102   869	 869  2  4106 ));
+DATA(insert (	4102   869	 869  3  4107 ));
+DATA(insert (	4102   869	 869  4  4108 ));
+DATA(insert (	4102   869	 869 11  4063 ));
+DATA(insert (	4102   869	 869 12  4071 ));
+DATA(insert (	4102   869	 869 13   930 ));
 /* minmax character */
-DATA(insert (   4076  1042  1042  1  3383 ));
-DATA(insert (   4076  1042  1042  2  3384 ));
-DATA(insert (   4076  1042  1042  3  3385 ));
-DATA(insert (   4076  1042  1042  4  3386 ));
+DATA(insert (	4076  1042	1042  1  3383 ));
+DATA(insert (	4076  1042	1042  2  3384 ));
+DATA(insert (	4076  1042	1042  3  3385 ));
+DATA(insert (	4076  1042	1042  4  3386 ));
 /* minmax time without time zone */
-DATA(insert (   4077  1083  1083  1  3383 ));
-DATA(insert (   4077  1083  1083  2  3384 ));
-DATA(insert (   4077  1083  1083  3  3385 ));
-DATA(insert (   4077  1083  1083  4  3386 ));
+DATA(insert (	4077  1083	1083  1  3383 ));
+DATA(insert (	4077  1083	1083  2  3384 ));
+DATA(insert (	4077  1083	1083  3  3385 ));
+DATA(insert (	4077  1083	1083  4  3386 ));
 /* minmax datetime (date, timestamp, timestamptz) */
-DATA(insert (   4059  1114  1114  1  3383 ));
-DATA(insert (   4059  1114  1114  2  3384 ));
-DATA(insert (   4059  1114  1114  3  3385 ));
-DATA(insert (   4059  1114  1114  4  3386 ));
-DATA(insert (   4059  1114  1184  1  3383 ));
-DATA(insert (   4059  1114  1184  2  3384 ));
-DATA(insert (   4059  1114  1184  3  3385 ));
-DATA(insert (   4059  1114  1184  4  3386 ));
-DATA(insert (   4059  1114  1082  1  3383 ));
-DATA(insert (   4059  1114  1082  2  3384 ));
-DATA(insert (   4059  1114  1082  3  3385 ));
-DATA(insert (   4059  1114  1082  4  3386 ));
+DATA(insert (	4059  1114	1114  1  3383 ));
+DATA(insert (	4059  1114	1114  2  3384 ));
+DATA(insert (	4059  1114	1114  3  3385 ));
+DATA(insert (	4059  1114	1114  4  3386 ));
+DATA(insert (	4059  1114	1184  1  3383 ));
+DATA(insert (	4059  1114	1184  2  3384 ));
+DATA(insert (	4059  1114	1184  3  3385 ));
+DATA(insert (	4059  1114	1184  4  3386 ));
+DATA(insert (	4059  1114	1082  1  3383 ));
+DATA(insert (	4059  1114	1082  2  3384 ));
+DATA(insert (	4059  1114	1082  3  3385 ));
+DATA(insert (	4059  1114	1082  4  3386 ));
 
-DATA(insert (   4059  1184  1184  1  3383 ));
-DATA(insert (   4059  1184  1184  2  3384 ));
-DATA(insert (   4059  1184  1184  3  3385 ));
-DATA(insert (   4059  1184  1184  4  3386 ));
-DATA(insert (   4059  1184  1114  1  3383 ));
-DATA(insert (   4059  1184  1114  2  3384 ));
-DATA(insert (   4059  1184  1114  3  3385 ));
-DATA(insert (   4059  1184  1114  4  3386 ));
-DATA(insert (   4059  1184  1082  1  3383 ));
-DATA(insert (   4059  1184  1082  2  3384 ));
-DATA(insert (   4059  1184  1082  3  3385 ));
-DATA(insert (   4059  1184  1082  4  3386 ));
+DATA(insert (	4059  1184	1184  1  3383 ));
+DATA(insert (	4059  1184	1184  2  3384 ));
+DATA(insert (	4059  1184	1184  3  3385 ));
+DATA(insert (	4059  1184	1184  4  3386 ));
+DATA(insert (	4059  1184	1114  1  3383 ));
+DATA(insert (	4059  1184	1114  2  3384 ));
+DATA(insert (	4059  1184	1114  3  3385 ));
+DATA(insert (	4059  1184	1114  4  3386 ));
+DATA(insert (	4059  1184	1082  1  3383 ));
+DATA(insert (	4059  1184	1082  2  3384 ));
+DATA(insert (	4059  1184	1082  3  3385 ));
+DATA(insert (	4059  1184	1082  4  3386 ));
 
-DATA(insert (   4059  1082  1082  1  3383 ));
-DATA(insert (   4059  1082  1082  2  3384 ));
-DATA(insert (   4059  1082  1082  3  3385 ));
-DATA(insert (   4059  1082  1082  4  3386 ));
-DATA(insert (   4059  1082  1114  1  3383 ));
-DATA(insert (   4059  1082  1114  2  3384 ));
-DATA(insert (   4059  1082  1114  3  3385 ));
-DATA(insert (   4059  1082  1114  4  3386 ));
-DATA(insert (   4059  1082  1184  1  3383 ));
-DATA(insert (   4059  1082  1184  2  3384 ));
-DATA(insert (   4059  1082  1184  3  3385 ));
-DATA(insert (   4059  1082  1184  4  3386 ));
+DATA(insert (	4059  1082	1082  1  3383 ));
+DATA(insert (	4059  1082	1082  2  3384 ));
+DATA(insert (	4059  1082	1082  3  3385 ));
+DATA(insert (	4059  1082	1082  4  3386 ));
+DATA(insert (	4059  1082	1114  1  3383 ));
+DATA(insert (	4059  1082	1114  2  3384 ));
+DATA(insert (	4059  1082	1114  3  3385 ));
+DATA(insert (	4059  1082	1114  4  3386 ));
+DATA(insert (	4059  1082	1184  1  3383 ));
+DATA(insert (	4059  1082	1184  2  3384 ));
+DATA(insert (	4059  1082	1184  3  3385 ));
+DATA(insert (	4059  1082	1184  4  3386 ));
 
 /* minmax interval */
-DATA(insert (   4078  1186  1186  1  3383 ));
-DATA(insert (   4078  1186  1186  2  3384 ));
-DATA(insert (   4078  1186  1186  3  3385 ));
-DATA(insert (   4078  1186  1186  4  3386 ));
+DATA(insert (	4078  1186	1186  1  3383 ));
+DATA(insert (	4078  1186	1186  2  3384 ));
+DATA(insert (	4078  1186	1186  3  3385 ));
+DATA(insert (	4078  1186	1186  4  3386 ));
 /* minmax time with time zone */
-DATA(insert (   4058  1266  1266  1  3383 ));
-DATA(insert (   4058  1266  1266  2  3384 ));
-DATA(insert (   4058  1266  1266  3  3385 ));
-DATA(insert (   4058  1266  1266  4  3386 ));
+DATA(insert (	4058  1266	1266  1  3383 ));
+DATA(insert (	4058  1266	1266  2  3384 ));
+DATA(insert (	4058  1266	1266  3  3385 ));
+DATA(insert (	4058  1266	1266  4  3386 ));
 /* minmax bit */
-DATA(insert (   4079  1560  1560  1  3383 ));
-DATA(insert (   4079  1560  1560  2  3384 ));
-DATA(insert (   4079  1560  1560  3  3385 ));
-DATA(insert (   4079  1560  1560  4  3386 ));
+DATA(insert (	4079  1560	1560  1  3383 ));
+DATA(insert (	4079  1560	1560  2  3384 ));
+DATA(insert (	4079  1560	1560  3  3385 ));
+DATA(insert (	4079  1560	1560  4  3386 ));
 /* minmax bit varying */
-DATA(insert (   4080  1562  1562  1  3383 ));
-DATA(insert (   4080  1562  1562  2  3384 ));
-DATA(insert (   4080  1562  1562  3  3385 ));
-DATA(insert (   4080  1562  1562  4  3386 ));
+DATA(insert (	4080  1562	1562  1  3383 ));
+DATA(insert (	4080  1562	1562  2  3384 ));
+DATA(insert (	4080  1562	1562  3  3385 ));
+DATA(insert (	4080  1562	1562  4  3386 ));
 /* minmax numeric */
-DATA(insert (   4055  1700  1700  1  3383 ));
-DATA(insert (   4055  1700  1700  2  3384 ));
-DATA(insert (   4055  1700  1700  3  3385 ));
-DATA(insert (   4055  1700  1700  4  3386 ));
+DATA(insert (	4055  1700	1700  1  3383 ));
+DATA(insert (	4055  1700	1700  2  3384 ));
+DATA(insert (	4055  1700	1700  3  3385 ));
+DATA(insert (	4055  1700	1700  4  3386 ));
 /* minmax uuid */
-DATA(insert (   4081  2950  2950  1  3383 ));
-DATA(insert (   4081  2950  2950  2  3384 ));
-DATA(insert (   4081  2950  2950  3  3385 ));
-DATA(insert (   4081  2950  2950  4  3386 ));
+DATA(insert (	4081  2950	2950  1  3383 ));
+DATA(insert (	4081  2950	2950  2  3384 ));
+DATA(insert (	4081  2950	2950  3  3385 ));
+DATA(insert (	4081  2950	2950  4  3386 ));
 /* inclusion range types */
-DATA(insert (   4103  3831  3831  1  4105 ));
-DATA(insert (   4103  3831  3831  2  4106 ));
-DATA(insert (   4103  3831  3831  3  4107 ));
-DATA(insert (   4103  3831  3831  4  4108 ));
-DATA(insert (   4103  3831  3831  11 4057 ));
-DATA(insert (   4103  3831  3831  13 3859 ));
-DATA(insert (   4103  3831  3831  14 3850 ));
+DATA(insert (	4103  3831	3831  1  4105 ));
+DATA(insert (	4103  3831	3831  2  4106 ));
+DATA(insert (	4103  3831	3831  3  4107 ));
+DATA(insert (	4103  3831	3831  4  4108 ));
+DATA(insert (	4103  3831	3831  11 4057 ));
+DATA(insert (	4103  3831	3831  13 3859 ));
+DATA(insert (	4103  3831	3831  14 3850 ));
 /* minmax pg_lsn */
-DATA(insert (   4082  3220  3220  1  3383 ));
-DATA(insert (   4082  3220  3220  2  3384 ));
-DATA(insert (   4082  3220  3220  3  3385 ));
-DATA(insert (   4082  3220  3220  4  3386 ));
+DATA(insert (	4082  3220	3220  1  3383 ));
+DATA(insert (	4082  3220	3220  2  3384 ));
+DATA(insert (	4082  3220	3220  3  3385 ));
+DATA(insert (	4082  3220	3220  4  3386 ));
 /* inclusion box */
-DATA(insert (   4104   603   603  1  4105 ));
-DATA(insert (   4104   603   603  2  4106 ));
-DATA(insert (   4104   603   603  3  4107 ));
-DATA(insert (   4104   603   603  4  4108 ));
-DATA(insert (   4104   603   603  11 4067 ));
-DATA(insert (   4104   603   603  13  187 ));
+DATA(insert (	4104   603	 603  1  4105 ));
+DATA(insert (	4104   603	 603  2  4106 ));
+DATA(insert (	4104   603	 603  3  4107 ));
+DATA(insert (	4104   603	 603  4  4108 ));
+DATA(insert (	4104   603	 603  11 4067 ));
+DATA(insert (	4104   603	 603  13  187 ));
 
 #endif   /* PG_AMPROC_H */
diff --git a/src/include/catalog/pg_attribute.h b/src/include/catalog/pg_attribute.h
index 87a34623532..f0b28b01eb2 100644
--- a/src/include/catalog/pg_attribute.h
+++ b/src/include/catalog/pg_attribute.h
@@ -138,12 +138,12 @@ CATALOG(pg_attribute,1249) BKI_BOOTSTRAP BKI_WITHOUT_OIDS BKI_ROWTYPE_OID(75) BK
 
 	/*
 	 * This flag specifies whether this column has ever had a local
-	 * definition.  It is set for normal non-inherited columns, but also
-	 * for columns that are inherited from parents if also explicitly listed
-	 * in CREATE TABLE INHERITS.  It is also set when inheritance is removed
-	 * from a table with ALTER TABLE NO INHERIT.  If the flag is set, the
-	 * column is not dropped by a parent's DROP COLUMN even if this causes
-	 * the column's attinhcount to become zero.
+	 * definition.  It is set for normal non-inherited columns, but also for
+	 * columns that are inherited from parents if also explicitly listed in
+	 * CREATE TABLE INHERITS.  It is also set when inheritance is removed from
+	 * a table with ALTER TABLE NO INHERIT.  If the flag is set, the column is
+	 * not dropped by a parent's DROP COLUMN even if this causes the column's
+	 * attinhcount to become zero.
 	 */
 	bool		attislocal;
 
diff --git a/src/include/catalog/pg_cast.h b/src/include/catalog/pg_cast.h
index bf6ef108211..9f7733f584f 100644
--- a/src/include/catalog/pg_cast.h
+++ b/src/include/catalog/pg_cast.h
@@ -376,6 +376,6 @@ DATA(insert ( 1700 1700 1703 i f ));
 
 /* json to/from jsonb */
 DATA(insert (  114 3802    0 a i ));
-DATA(insert ( 3802  114    0 a i ));
+DATA(insert ( 3802	114    0 a i ));
 
 #endif   /* PG_CAST_H */
diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h
index 48a72628957..fea99c700ff 100644
--- a/src/include/catalog/pg_class.h
+++ b/src/include/catalog/pg_class.h
@@ -65,7 +65,7 @@ CATALOG(pg_class,1259) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83) BKI_SCHEMA_MACRO
 	bool		relhasrules;	/* has (or has had) any rules */
 	bool		relhastriggers; /* has (or has had) any TRIGGERs */
 	bool		relhassubclass; /* has (or has had) derived classes */
-	bool		relrowsecurity;	/* row security is enabled or not */
+	bool		relrowsecurity; /* row security is enabled or not */
 	bool		relispopulated; /* matview currently holds query results */
 	char		relreplident;	/* see REPLICA_IDENTITY_xxx constants  */
 	TransactionId relfrozenxid; /* all Xids < this are frozen in this rel */
diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h
index 2e4c3813612..ad1eb4b9ccd 100644
--- a/src/include/catalog/pg_control.h
+++ b/src/include/catalog/pg_control.h
@@ -46,8 +46,10 @@ typedef struct CheckPoint
 	MultiXactId oldestMulti;	/* cluster-wide minimum datminmxid */
 	Oid			oldestMultiDB;	/* database with minimum datminmxid */
 	pg_time_t	time;			/* time stamp of checkpoint */
-	TransactionId oldestCommitTs; /* oldest Xid with valid commit timestamp */
-	TransactionId newestCommitTs; /* newest Xid with valid commit timestamp */
+	TransactionId oldestCommitTs;		/* oldest Xid with valid commit
+										 * timestamp */
+	TransactionId newestCommitTs;		/* newest Xid with valid commit
+										 * timestamp */
 
 	/*
 	 * Oldest XID still running. This is only needed to initialize hot standby
diff --git a/src/include/catalog/pg_description.h b/src/include/catalog/pg_description.h
index 692455f3610..8985aed64ef 100644
--- a/src/include/catalog/pg_description.h
+++ b/src/include/catalog/pg_description.h
@@ -52,7 +52,7 @@ CATALOG(pg_description,2609) BKI_WITHOUT_OIDS
 	int32		objsubid;		/* column number, or 0 if not used */
 
 #ifdef CATALOG_VARLEN			/* variable-length fields start here */
-	text		description BKI_FORCE_NOT_NULL;	/* description of object */
+	text description BKI_FORCE_NOT_NULL;		/* description of object */
 #endif
 } FormData_pg_description;
 
diff --git a/src/include/catalog/pg_extension.h b/src/include/catalog/pg_extension.h
index 99ab35bb052..de95e481fd0 100644
--- a/src/include/catalog/pg_extension.h
+++ b/src/include/catalog/pg_extension.h
@@ -37,7 +37,7 @@ CATALOG(pg_extension,3079)
 
 #ifdef CATALOG_VARLEN			/* variable-length fields start here */
 	/* extversion may never be null, but the others can be. */
-	text		extversion BKI_FORCE_NOT_NULL;		/* extension version name */
+	text extversion BKI_FORCE_NOT_NULL; /* extension version name */
 	Oid			extconfig[1];	/* dumpable configuration tables */
 	text		extcondition[1];	/* WHERE clauses for config tables */
 #endif
diff --git a/src/include/catalog/pg_largeobject.h b/src/include/catalog/pg_largeobject.h
index 4a33752040d..d7b55faf976 100644
--- a/src/include/catalog/pg_largeobject.h
+++ b/src/include/catalog/pg_largeobject.h
@@ -34,7 +34,7 @@ CATALOG(pg_largeobject,2613) BKI_WITHOUT_OIDS
 	int32		pageno;			/* Page number (starting from 0) */
 
 	/* data has variable length, but we allow direct access; see inv_api.c */
-	bytea		data BKI_FORCE_NOT_NULL; /* Data for page (may be zero-length) */
+	bytea data	BKI_FORCE_NOT_NULL;		/* Data for page (may be zero-length) */
 } FormData_pg_largeobject;
 
 /* ----------------
diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h
index a13e0828005..e7b3148980c 100644
--- a/src/include/catalog/pg_opclass.h
+++ b/src/include/catalog/pg_opclass.h
@@ -238,15 +238,15 @@ DATA(insert (	2742	jsonb_path_ops		PGNSP PGUID 4037  3802 f 23 ));
 
 /* BRIN operator classes */
 /* no brin opclass for bool */
-DATA(insert (	3580	bytea_minmax_ops		PGNSP PGUID 4064    17 t 17 ));
-DATA(insert (	3580	char_minmax_ops			PGNSP PGUID 4062    18 t 18 ));
-DATA(insert (	3580	name_minmax_ops			PGNSP PGUID 4065    19 t 19 ));
-DATA(insert (	3580	int8_minmax_ops			PGNSP PGUID 4054    20 t 20 ));
-DATA(insert (	3580	int2_minmax_ops			PGNSP PGUID 4054    21 t 21 ));
-DATA(insert (	3580	int4_minmax_ops			PGNSP PGUID 4054    23 t 23 ));
-DATA(insert (	3580	text_minmax_ops			PGNSP PGUID 4056    25 t 25 ));
-DATA(insert (	3580	oid_minmax_ops			PGNSP PGUID 4068    26 t 26 ));
-DATA(insert (	3580	tid_minmax_ops			PGNSP PGUID 4069    27 t 27 ));
+DATA(insert (	3580	bytea_minmax_ops		PGNSP PGUID 4064	17 t 17 ));
+DATA(insert (	3580	char_minmax_ops			PGNSP PGUID 4062	18 t 18 ));
+DATA(insert (	3580	name_minmax_ops			PGNSP PGUID 4065	19 t 19 ));
+DATA(insert (	3580	int8_minmax_ops			PGNSP PGUID 4054	20 t 20 ));
+DATA(insert (	3580	int2_minmax_ops			PGNSP PGUID 4054	21 t 21 ));
+DATA(insert (	3580	int4_minmax_ops			PGNSP PGUID 4054	23 t 23 ));
+DATA(insert (	3580	text_minmax_ops			PGNSP PGUID 4056	25 t 25 ));
+DATA(insert (	3580	oid_minmax_ops			PGNSP PGUID 4068	26 t 26 ));
+DATA(insert (	3580	tid_minmax_ops			PGNSP PGUID 4069	27 t 27 ));
 DATA(insert (	3580	float4_minmax_ops		PGNSP PGUID 4070   700 t 700 ));
 DATA(insert (	3580	float8_minmax_ops		PGNSP PGUID 4070   701 t 701 ));
 DATA(insert (	3580	abstime_minmax_ops		PGNSP PGUID 4072   702 t 702 ));
diff --git a/src/include/catalog/pg_operator.h b/src/include/catalog/pg_operator.h
index 6e260cb3047..773f4fd731b 100644
--- a/src/include/catalog/pg_operator.h
+++ b/src/include/catalog/pg_operator.h
@@ -1019,9 +1019,9 @@ DATA(insert OID = 1522 (  "<->"   PGNSP PGUID b f f  600	718  701   3291    0 di
 DESCR("distance between");
 DATA(insert OID = 3291 (  "<->"   PGNSP PGUID b f f  718	600  701   1522    0 dist_cpoint - - ));
 DESCR("distance between");
-DATA(insert OID = 3276 (  "<->"	  PGNSP PGUID b f f	 600	604	 701   3289	   0 dist_ppoly - - ));
+DATA(insert OID = 3276 (  "<->"   PGNSP PGUID b f f  600	604  701   3289    0 dist_ppoly - - ));
 DESCR("distance between");
-DATA(insert OID = 3289 (  "<->"	  PGNSP PGUID b f f  604 	600  701   3276	   0 dist_polyp - - ));
+DATA(insert OID = 3289 (  "<->"   PGNSP PGUID b f f  604	600  701   3276    0 dist_polyp - - ));
 DESCR("distance between");
 DATA(insert OID = 1523 (  "<->"   PGNSP PGUID b f f  718	604  701	  0    0 dist_cpoly - - ));
 DESCR("distance between");
diff --git a/src/include/catalog/pg_pltemplate.h b/src/include/catalog/pg_pltemplate.h
index 569d724036c..754965a9a8d 100644
--- a/src/include/catalog/pg_pltemplate.h
+++ b/src/include/catalog/pg_pltemplate.h
@@ -35,10 +35,11 @@ CATALOG(pg_pltemplate,1136) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
 	bool		tmpldbacreate;	/* PL is installable by db owner? */
 
 #ifdef CATALOG_VARLEN			/* variable-length fields start here */
-	text		tmplhandler BKI_FORCE_NOT_NULL;	/* name of call handler function */
+	text tmplhandler BKI_FORCE_NOT_NULL;		/* name of call handler
+												 * function */
 	text		tmplinline;		/* name of anonymous-block handler, or NULL */
 	text		tmplvalidator;	/* name of validator function, or NULL */
-	text		tmpllibrary BKI_FORCE_NOT_NULL;	/* path of shared library */
+	text tmpllibrary BKI_FORCE_NOT_NULL;		/* path of shared library */
 	aclitem		tmplacl[1];		/* access privileges for template */
 #endif
 } FormData_pg_pltemplate;
diff --git a/src/include/catalog/pg_policy.h b/src/include/catalog/pg_policy.h
index ae71f3f3a2f..da404c61e94 100644
--- a/src/include/catalog/pg_policy.h
+++ b/src/include/catalog/pg_policy.h
@@ -1,6 +1,6 @@
 /*
  * pg_policy.h
- *   definition of the system "policy" relation (pg_policy)
+ *	 definition of the system "policy" relation (pg_policy)
  *
  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
@@ -20,14 +20,14 @@
 
 CATALOG(pg_policy,3256)
 {
-	NameData		polname;		/* Policy name. */
-	Oid				polrelid;		/* Oid of the relation with policy. */
-	char			polcmd;			/* One of ACL_*_CHR, or '*' for all */
+	NameData	polname;		/* Policy name. */
+	Oid			polrelid;		/* Oid of the relation with policy. */
+	char		polcmd;			/* One of ACL_*_CHR, or '*' for all */
 
 #ifdef CATALOG_VARLEN
-	Oid				polroles[1];	/* Roles associated with policy, not-NULL */
-	pg_node_tree	polqual;		/* Policy quals. */
-	pg_node_tree	polwithcheck;	/* WITH CHECK quals. */
+	Oid			polroles[1];	/* Roles associated with policy, not-NULL */
+	pg_node_tree polqual;		/* Policy quals. */
+	pg_node_tree polwithcheck;	/* WITH CHECK quals. */
 #endif
 } FormData_pg_policy;
 
@@ -39,7 +39,7 @@ CATALOG(pg_policy,3256)
 typedef FormData_pg_policy *Form_pg_policy;
 
 /* ----------------
- * 		compiler constants for pg_policy
+ *		compiler constants for pg_policy
  * ----------------
  */
 #define Natts_pg_policy				6
@@ -48,6 +48,6 @@ typedef FormData_pg_policy *Form_pg_policy;
 #define Anum_pg_policy_polcmd		3
 #define Anum_pg_policy_polroles		4
 #define Anum_pg_policy_polqual		5
-#define Anum_pg_policy_polwithcheck	6
+#define Anum_pg_policy_polwithcheck 6
 
-#endif  /* PG_POLICY_H */
+#endif   /* PG_POLICY_H */
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 0405027e01c..c0aab38292c 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -66,8 +66,8 @@ CATALOG(pg_proc,1255) BKI_BOOTSTRAP BKI_ROWTYPE_OID(81) BKI_SCHEMA_MACRO
 	text		proargnames[1]; /* parameter names (NULL if no names) */
 	pg_node_tree proargdefaults;/* list of expression trees for argument
 								 * defaults (NULL if none) */
-	Oid			protrftypes[1];	/* types for which to apply transforms */
-	text		prosrc BKI_FORCE_NOT_NULL; /* procedure source text */
+	Oid			protrftypes[1]; /* types for which to apply transforms */
+	text prosrc BKI_FORCE_NOT_NULL;		/* procedure source text */
 	text		probin;			/* secondary procedure info (can be NULL) */
 	text		proconfig[1];	/* procedure-local GUC settings */
 	aclitem		proacl[1];		/* access permissions */
@@ -216,9 +216,9 @@ DATA(insert OID = 1246 (  charlt		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16
 DATA(insert OID =  72 (  charle			   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ charle _null_ _null_ _null_ ));
 DATA(insert OID =  73 (  chargt			   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ chargt _null_ _null_ _null_ ));
 DATA(insert OID =  74 (  charge			   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "18 18" _null_ _null_ _null_ _null_ _null_ charge _null_ _null_ _null_ ));
-DATA(insert OID =  77 (  int4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23	"18" _null_ _null_ _null_ _null_ _null_	chartoi4 _null_ _null_ _null_ ));
+DATA(insert OID =  77 (  int4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23	"18" _null_ _null_ _null_ _null_ _null_ chartoi4 _null_ _null_ _null_ ));
 DESCR("convert char to int4");
-DATA(insert OID =  78 (  char			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 18	"23" _null_ _null_ _null_ _null_ _null_	i4tochar _null_ _null_ _null_ ));
+DATA(insert OID =  78 (  char			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 18	"23" _null_ _null_ _null_ _null_ _null_ i4tochar _null_ _null_ _null_ ));
 DESCR("convert int4 to char");
 
 DATA(insert OID =  79 (  nameregexeq	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ nameregexeq _null_ _null_ _null_ ));
@@ -267,8 +267,8 @@ DATA(insert OID =  110 (  unknownout	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0
 DESCR("I/O");
 DATA(insert OID = 111 (  numeric_fac	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_	numeric_fac _null_ _null_ _null_ ));
 
-DATA(insert OID = 115 (  box_above_eq	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_	box_above_eq _null_ _null_ _null_ ));
-DATA(insert OID = 116 (  box_below_eq	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_	box_below_eq _null_ _null_ _null_ ));
+DATA(insert OID = 115 (  box_above_eq	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_above_eq _null_ _null_ _null_ ));
+DATA(insert OID = 116 (  box_below_eq	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_below_eq _null_ _null_ _null_ ));
 
 DATA(insert OID = 117 (  point_in		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 600 "2275" _null_ _null_ _null_ _null_ _null_	point_in _null_ _null_ _null_ ));
 DESCR("I/O");
@@ -425,13 +425,13 @@ DATA(insert OID = 233 (  dexp			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701
 DESCR("natural exponential (e^x)");
 DATA(insert OID = 234 (  dlog1			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "701" _null_ _null_ _null_ _null_ _null_	dlog1 _null_ _null_ _null_ ));
 DESCR("natural logarithm");
-DATA(insert OID = 235 (  float8			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "21" _null_ _null_ _null_ _null_ _null_	i2tod _null_ _null_ _null_ ));
+DATA(insert OID = 235 (  float8			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "21" _null_ _null_ _null_ _null_ _null_ i2tod _null_ _null_ _null_ ));
 DESCR("convert int2 to float8");
-DATA(insert OID = 236 (  float4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 700 "21" _null_ _null_ _null_ _null_ _null_	i2tof _null_ _null_ _null_ ));
+DATA(insert OID = 236 (  float4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 700 "21" _null_ _null_ _null_ _null_ _null_ i2tof _null_ _null_ _null_ ));
 DESCR("convert int2 to float4");
-DATA(insert OID = 237 (  int2			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "701" _null_ _null_ _null_ _null_ _null_	dtoi2 _null_ _null_ _null_ ));
+DATA(insert OID = 237 (  int2			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "701" _null_ _null_ _null_ _null_ _null_ dtoi2 _null_ _null_ _null_ ));
 DESCR("convert float8 to int2");
-DATA(insert OID = 238 (  int2			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "700" _null_ _null_ _null_ _null_ _null_	ftoi2 _null_ _null_ _null_ ));
+DATA(insert OID = 238 (  int2			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "700" _null_ _null_ _null_ _null_ _null_ ftoi2 _null_ _null_ _null_ ));
 DESCR("convert float4 to int2");
 DATA(insert OID = 239 (  line_distance	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "628 628" _null_ _null_ _null_ _null_ _null_	line_distance _null_ _null_ _null_ ));
 
@@ -531,14 +531,14 @@ DATA(insert OID = 311 (  float8			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 70
 DESCR("convert float4 to float8");
 DATA(insert OID = 312 (  float4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 700 "701" _null_ _null_ _null_ _null_ _null_	dtof _null_ _null_ _null_ ));
 DESCR("convert float8 to float4");
-DATA(insert OID = 313 (  int4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23	"21" _null_ _null_ _null_ _null_ _null_	i2toi4 _null_ _null_ _null_ ));
+DATA(insert OID = 313 (  int4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23	"21" _null_ _null_ _null_ _null_ _null_ i2toi4 _null_ _null_ _null_ ));
 DESCR("convert int2 to int4");
-DATA(insert OID = 314 (  int2			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21	"23" _null_ _null_ _null_ _null_ _null_	i4toi2 _null_ _null_ _null_ ));
+DATA(insert OID = 314 (  int2			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21	"23" _null_ _null_ _null_ _null_ _null_ i4toi2 _null_ _null_ _null_ ));
 DESCR("convert int4 to int2");
 DATA(insert OID = 315 (  int2vectoreq	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "22 22" _null_ _null_ _null_ _null_ _null_ int2vectoreq _null_ _null_ _null_ ));
 DATA(insert OID = 316 (  float8			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701  "23" _null_ _null_ _null_ _null_ _null_	i4tod _null_ _null_ _null_ ));
 DESCR("convert int4 to float8");
-DATA(insert OID = 317 (  int4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "701" _null_ _null_ _null_ _null_ _null_	dtoi4 _null_ _null_ _null_ ));
+DATA(insert OID = 317 (  int4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "701" _null_ _null_ _null_ _null_ _null_ dtoi4 _null_ _null_ _null_ ));
 DESCR("convert float8 to int4");
 DATA(insert OID = 318 (  float4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 700  "23" _null_ _null_ _null_ _null_ _null_	i4tof _null_ _null_ _null_ ));
 DESCR("convert int4 to float4");
@@ -787,7 +787,7 @@ DATA(insert OID = 481 (  int8			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "
 DESCR("convert int4 to int8");
 DATA(insert OID = 482 (  float8			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "20" _null_ _null_ _null_ _null_ _null_ i8tod _null_ _null_ _null_ ));
 DESCR("convert int8 to float8");
-DATA(insert OID = 483 (  int8			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "701" _null_ _null_ _null_ _null_ _null_	dtoi8 _null_ _null_ _null_ ));
+DATA(insert OID = 483 (  int8			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "701" _null_ _null_ _null_ _null_ _null_ dtoi8 _null_ _null_ _null_ ));
 DESCR("convert float8 to int8");
 
 /* OIDS 500 - 599 */
@@ -799,7 +799,7 @@ DESCR("hash");
 
 DATA(insert OID = 652 (  float4			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 700 "20" _null_ _null_ _null_ _null_ _null_ i8tof _null_ _null_ _null_ ));
 DESCR("convert int8 to float4");
-DATA(insert OID = 653 (  int8			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "700" _null_ _null_ _null_ _null_ _null_	ftoi8 _null_ _null_ _null_ ));
+DATA(insert OID = 653 (  int8			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 20 "700" _null_ _null_ _null_ _null_ _null_ ftoi8 _null_ _null_ _null_ ));
 DESCR("convert float4 to int8");
 
 DATA(insert OID = 714 (  int2			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 21 "20" _null_ _null_ _null_ _null_ _null_ int82 _null_ _null_ _null_ ));
@@ -845,7 +845,7 @@ DATA(insert OID = 723 (  get_bit		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 23
 DESCR("get bit");
 DATA(insert OID = 724 (  set_bit		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 17 "17 23 23" _null_ _null_ _null_ _null_ _null_	byteaSetBit _null_ _null_ _null_ ));
 DESCR("set bit");
-DATA(insert OID = 749 (  overlay		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 4 0 17 "17 17 23 23" _null_ _null_ _null_ _null_ _null_	byteaoverlay _null_ _null_ _null_ ));
+DATA(insert OID = 749 (  overlay		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 4 0 17 "17 17 23 23" _null_ _null_ _null_ _null_ _null_ byteaoverlay _null_ _null_ _null_ ));
 DESCR("substitute portion of string");
 DATA(insert OID = 752 (  overlay		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 17 "17 17 23" _null_ _null_ _null_ _null_ _null_	byteaoverlay_no_len _null_ _null_ _null_ ));
 DESCR("substitute portion of string");
@@ -857,7 +857,7 @@ DATA(insert OID = 728 (  dist_cpoly		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0
 DATA(insert OID = 729 (  poly_distance	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "604 604" _null_ _null_ _null_ _null_ _null_	poly_distance _null_ _null_ _null_ ));
 DATA(insert OID = 3275 (  dist_ppoly	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "600 604" _null_ _null_ _null_ _null_ _null_	dist_ppoly _null_ _null_ _null_ ));
 DATA(insert OID = 3292 (  dist_polyp	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "604 600" _null_ _null_ _null_ _null_ _null_	dist_polyp _null_ _null_ _null_ ));
-DATA(insert OID = 3290 (  dist_cpoint	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "718 600" _null_ _null_ _null_ _null_ _null_ 	dist_cpoint _null_ _null_ _null_ ));
+DATA(insert OID = 3290 (  dist_cpoint	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "718 600" _null_ _null_ _null_ _null_ _null_	dist_cpoint _null_ _null_ _null_ ));
 
 DATA(insert OID = 740 (  text_lt		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_lt _null_ _null_ _null_ ));
 DATA(insert OID = 741 (  text_le		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_le _null_ _null_ _null_ ));
@@ -1000,7 +1000,7 @@ DATA(insert OID = 776 (  gistbulkdelete    PGNSP PGUID 12 1 0 0 0 f f f f t f v
 DESCR("gist(internal)");
 DATA(insert OID = 2561 (  gistvacuumcleanup   PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ gistvacuumcleanup _null_ _null_ _null_ ));
 DESCR("gist(internal)");
-DATA(insert OID = 3280 (  gistcanreturn	   PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 16 "2281 23" _null_ _null_ _null_ _null_ _null_ gistcanreturn _null_ _null_ _null_ ));
+DATA(insert OID = 3280 (  gistcanreturn    PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 16 "2281 23" _null_ _null_ _null_ _null_ _null_ gistcanreturn _null_ _null_ _null_ ));
 DESCR("gist(internal)");
 DATA(insert OID = 772 (  gistcostestimate  PGNSP PGUID 12 1 0 0 0 f f f f t f v 7 0 2278 "2281 2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gistcostestimate _null_ _null_ _null_ ));
 DESCR("gist(internal)");
@@ -1054,12 +1054,12 @@ DATA(insert OID =  886 (  cash_in		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 7
 DESCR("I/O");
 DATA(insert OID =  887 (  cash_out		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 2275 "790" _null_ _null_ _null_ _null_ _null_	cash_out _null_ _null_ _null_ ));
 DESCR("I/O");
-DATA(insert OID =  888 (  cash_eq		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_	cash_eq _null_ _null_ _null_ ));
-DATA(insert OID =  889 (  cash_ne		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_	cash_ne _null_ _null_ _null_ ));
-DATA(insert OID =  890 (  cash_lt		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_	cash_lt _null_ _null_ _null_ ));
-DATA(insert OID =  891 (  cash_le		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_	cash_le _null_ _null_ _null_ ));
-DATA(insert OID =  892 (  cash_gt		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_	cash_gt _null_ _null_ _null_ ));
-DATA(insert OID =  893 (  cash_ge		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_	cash_ge _null_ _null_ _null_ ));
+DATA(insert OID =  888 (  cash_eq		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_eq _null_ _null_ _null_ ));
+DATA(insert OID =  889 (  cash_ne		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_ne _null_ _null_ _null_ ));
+DATA(insert OID =  890 (  cash_lt		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_lt _null_ _null_ _null_ ));
+DATA(insert OID =  891 (  cash_le		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_le _null_ _null_ _null_ ));
+DATA(insert OID =  892 (  cash_gt		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_gt _null_ _null_ _null_ ));
+DATA(insert OID =  893 (  cash_ge		   PGNSP PGUID 12 1 0 0 0 f f f t t f i 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_ge _null_ _null_ _null_ ));
 DATA(insert OID =  894 (  cash_pl		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 790 "790 790" _null_ _null_ _null_ _null_ _null_	cash_pl _null_ _null_ _null_ ));
 DATA(insert OID =  895 (  cash_mi		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 790 "790 790" _null_ _null_ _null_ _null_ _null_	cash_mi _null_ _null_ _null_ ));
 DATA(insert OID =  896 (  cash_mul_flt8    PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 790 "790 701" _null_ _null_ _null_ _null_ _null_	cash_mul_flt8 _null_ _null_ _null_ ));
@@ -1069,16 +1069,16 @@ DESCR("larger of two");
 DATA(insert OID =  899 (  cashsmaller	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 790 "790 790" _null_ _null_ _null_ _null_ _null_	cashsmaller _null_ _null_ _null_ ));
 DESCR("smaller of two");
 DATA(insert OID =  919 (  flt8_mul_cash    PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 790 "701 790" _null_ _null_ _null_ _null_ _null_	flt8_mul_cash _null_ _null_ _null_ ));
-DATA(insert OID =  935 (  cash_words	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "790" _null_ _null_ _null_ _null_ _null_	cash_words _null_ _null_ _null_ ));
+DATA(insert OID =  935 (  cash_words	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "790" _null_ _null_ _null_ _null_ _null_ cash_words _null_ _null_ _null_ ));
 DESCR("output money amount as words");
 DATA(insert OID = 3822 (  cash_div_cash    PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "790 790" _null_ _null_ _null_ _null_ _null_	cash_div_cash _null_ _null_ _null_ ));
 DATA(insert OID = 3823 (  numeric		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 1700 "790" _null_ _null_ _null_ _null_ _null_	cash_numeric _null_ _null_ _null_ ));
 DESCR("convert money to numeric");
 DATA(insert OID = 3824 (  money			   PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 790 "1700" _null_ _null_ _null_ _null_ _null_	numeric_cash _null_ _null_ _null_ ));
 DESCR("convert numeric to money");
-DATA(insert OID = 3811 (  money			   PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 790 "23" _null_ _null_ _null_ _null_ _null_	int4_cash _null_ _null_ _null_ ));
+DATA(insert OID = 3811 (  money			   PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 790 "23" _null_ _null_ _null_ _null_ _null_ int4_cash _null_ _null_ _null_ ));
 DESCR("convert int4 to money");
-DATA(insert OID = 3812 (  money			   PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 790 "20" _null_ _null_ _null_ _null_ _null_	int8_cash _null_ _null_ _null_ ));
+DATA(insert OID = 3812 (  money			   PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 790 "20" _null_ _null_ _null_ _null_ _null_ int8_cash _null_ _null_ _null_ ));
 DESCR("convert int8 to money");
 
 /* OIDS 900 - 999 */
@@ -1131,8 +1131,8 @@ DESCR("read large object from offset for length");
 DATA(insert OID = 3460 (  lo_put		   PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "26 20 17" _null_ _null_ _null_ _null_ _null_ lo_put _null_ _null_ _null_ ));
 DESCR("write data at offset");
 
-DATA(insert OID = 959 (  on_pl			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "600 628" _null_ _null_ _null_ _null_ _null_	on_pl _null_ _null_ _null_ ));
-DATA(insert OID = 960 (  on_sl			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "601 628" _null_ _null_ _null_ _null_ _null_	on_sl _null_ _null_ _null_ ));
+DATA(insert OID = 959 (  on_pl			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "600 628" _null_ _null_ _null_ _null_ _null_ on_pl _null_ _null_ _null_ ));
+DATA(insert OID = 960 (  on_sl			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "601 628" _null_ _null_ _null_ _null_ _null_ on_sl _null_ _null_ _null_ ));
 DATA(insert OID = 961 (  close_pl		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 600 "600 628" _null_ _null_ _null_ _null_ _null_	close_pl _null_ _null_ _null_ ));
 DATA(insert OID = 962 (  close_sl		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 600 "601 628" _null_ _null_ _null_ _null_ _null_	close_sl _null_ _null_ _null_ ));
 DATA(insert OID = 963 (  close_lb		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 600 "628 603" _null_ _null_ _null_ _null_ _null_	close_lb _null_ _null_ _null_ ));
@@ -1140,7 +1140,7 @@ DATA(insert OID = 963 (  close_lb		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 6
 DATA(insert OID = 964 (  lo_unlink		   PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 23 "26" _null_ _null_ _null_ _null_ _null_ lo_unlink _null_ _null_ _null_ ));
 DESCR("large object unlink (delete)");
 
-DATA(insert OID = 973 (  path_inter		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "602 602" _null_ _null_ _null_ _null_ _null_	path_inter _null_ _null_ _null_ ));
+DATA(insert OID = 973 (  path_inter		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "602 602" _null_ _null_ _null_ _null_ _null_ path_inter _null_ _null_ _null_ ));
 DATA(insert OID = 975 (  area			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "603" _null_ _null_ _null_ _null_ _null_	box_area _null_ _null_ _null_ ));
 DESCR("box area");
 DATA(insert OID = 976 (  width			   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 701 "603" _null_ _null_ _null_ _null_ _null_	box_width _null_ _null_ _null_ ));
@@ -1571,7 +1571,7 @@ DESCR("convert abstime to time");
 
 DATA(insert OID = 1367 (  character_length	PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_	bpcharlen _null_ _null_ _null_ ));
 DESCR("character length");
-DATA(insert OID = 1369 (  character_length	PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "25" _null_ _null_ _null_ _null_ _null_	textlen _null_ _null_ _null_ ));
+DATA(insert OID = 1369 (  character_length	PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ textlen _null_ _null_ _null_ ));
 DESCR("character length");
 
 DATA(insert OID = 1370 (  interval			 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 1186 "1083" _null_ _null_ _null_ _null_ _null_	time_interval _null_ _null_ _null_ ));
@@ -2046,7 +2046,7 @@ DATA(insert OID = 1716 (  pg_get_expr		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 2
 DESCR("deparse an encoded expression");
 DATA(insert OID = 1665 (  pg_get_serial_sequence	PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ pg_get_serial_sequence _null_ _null_ _null_ ));
 DESCR("name of sequence for a serial column");
-DATA(insert OID = 2098 (  pg_get_functiondef	PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_	pg_get_functiondef _null_ _null_ _null_ ));
+DATA(insert OID = 2098 (  pg_get_functiondef	PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_functiondef _null_ _null_ _null_ ));
 DESCR("definition of a function");
 DATA(insert OID = 2162 (  pg_get_function_arguments    PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 25 "26" _null_ _null_ _null_ _null_ _null_ pg_get_function_arguments _null_ _null_ _null_ ));
 DESCR("argument list of a function");
@@ -2412,9 +2412,9 @@ DATA(insert OID = 1773 ( to_char			PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "
 DESCR("format int4 to text");
 DATA(insert OID = 1774 ( to_char			PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "20 25" _null_ _null_ _null_ _null_ _null_ int8_to_char _null_ _null_ _null_ ));
 DESCR("format int8 to text");
-DATA(insert OID = 1775 ( to_char			PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "700 25" _null_ _null_ _null_ _null_ _null_	float4_to_char _null_ _null_ _null_ ));
+DATA(insert OID = 1775 ( to_char			PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "700 25" _null_ _null_ _null_ _null_ _null_ float4_to_char _null_ _null_ _null_ ));
 DESCR("format float4 to text");
-DATA(insert OID = 1776 ( to_char			PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "701 25" _null_ _null_ _null_ _null_ _null_	float8_to_char _null_ _null_ _null_ ));
+DATA(insert OID = 1776 ( to_char			PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 25 "701 25" _null_ _null_ _null_ _null_ _null_ float8_to_char _null_ _null_ _null_ ));
 DESCR("format float8 to text");
 DATA(insert OID = 1777 ( to_number			PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 1700 "25 25" _null_ _null_ _null_ _null_  _null_ numeric_to_number _null_ _null_ _null_ ));
 DESCR("convert text to numeric");
@@ -2552,7 +2552,7 @@ DATA(insert OID = 3388 (  numeric_poly_sum	   PGNSP PGUID 12 1 0 0 0 f f f f f f
 DESCR("aggregate final function");
 DATA(insert OID = 3389 (  numeric_poly_avg	   PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_avg _null_ _null_ _null_ ));
 DESCR("aggregate final function");
-DATA(insert OID = 3390 (  numeric_poly_var_pop  PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ ));
+DATA(insert OID = 3390 (  numeric_poly_var_pop	PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_pop _null_ _null_ _null_ ));
 DESCR("aggregate final function");
 DATA(insert OID = 3391 (  numeric_poly_var_samp PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_var_samp _null_ _null_ _null_ ));
 DESCR("aggregate final function");
@@ -2620,7 +2620,7 @@ DATA(insert OID = 3545 (  string_agg				PGNSP PGUID 12 1 0 0 0 t f f f f f i 2 0
 DESCR("concatenate aggregate input into a bytea");
 
 /* To ASCII conversion */
-DATA(insert OID = 1845 ( to_ascii	PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "25" _null_ _null_ _null_ _null_ _null_	to_ascii_default _null_ _null_ _null_ ));
+DATA(insert OID = 1845 ( to_ascii	PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ to_ascii_default _null_ _null_ _null_ ));
 DESCR("encode text from DB encoding to ASCII text");
 DATA(insert OID = 1846 ( to_ascii	PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ to_ascii_enc _null_ _null_ _null_ ));
 DESCR("encode text from encoding to ASCII text");
@@ -2697,21 +2697,21 @@ DESCR("current user privilege on sequence by seq name");
 DATA(insert OID = 2186 (  has_sequence_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_sequence_privilege_id _null_ _null_ _null_ ));
 DESCR("current user privilege on sequence by seq oid");
 
-DATA(insert OID = 3012 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 25 25 25" _null_ _null_ _null_ _null_ _null_	has_column_privilege_name_name_name _null_ _null_ _null_ ));
+DATA(insert OID = 3012 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name_name _null_ _null_ _null_ ));
 DESCR("user privilege on column by username, rel name, col name");
-DATA(insert OID = 3013 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 25 21 25" _null_ _null_ _null_ _null_ _null_	has_column_privilege_name_name_attnum _null_ _null_ _null_ ));
+DATA(insert OID = 3013 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 25 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name_attnum _null_ _null_ _null_ ));
 DESCR("user privilege on column by username, rel name, col attnum");
-DATA(insert OID = 3014 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 26 25 25" _null_ _null_ _null_ _null_ _null_	has_column_privilege_name_id_name _null_ _null_ _null_ ));
+DATA(insert OID = 3014 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 26 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_id_name _null_ _null_ _null_ ));
 DESCR("user privilege on column by username, rel oid, col name");
-DATA(insert OID = 3015 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 26 21 25" _null_ _null_ _null_ _null_ _null_	has_column_privilege_name_id_attnum _null_ _null_ _null_ ));
+DATA(insert OID = 3015 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "19 26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_id_attnum _null_ _null_ _null_ ));
 DESCR("user privilege on column by username, rel oid, col attnum");
-DATA(insert OID = 3016 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 25 25 25" _null_ _null_ _null_ _null_ _null_	has_column_privilege_id_name_name _null_ _null_ _null_ ));
+DATA(insert OID = 3016 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_name_name _null_ _null_ _null_ ));
 DESCR("user privilege on column by user oid, rel name, col name");
-DATA(insert OID = 3017 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 25 21 25" _null_ _null_ _null_ _null_ _null_	has_column_privilege_id_name_attnum _null_ _null_ _null_ ));
+DATA(insert OID = 3017 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 25 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_name_attnum _null_ _null_ _null_ ));
 DESCR("user privilege on column by user oid, rel name, col attnum");
-DATA(insert OID = 3018 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 26 25 25" _null_ _null_ _null_ _null_ _null_	has_column_privilege_id_id_name _null_ _null_ _null_ ));
+DATA(insert OID = 3018 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 26 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_id_name _null_ _null_ _null_ ));
 DESCR("user privilege on column by user oid, rel oid, col name");
-DATA(insert OID = 3019 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 26 21 25" _null_ _null_ _null_ _null_ _null_	has_column_privilege_id_id_attnum _null_ _null_ _null_ ));
+DATA(insert OID = 3019 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 4 0 16 "26 26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_id_attnum _null_ _null_ _null_ ));
 DESCR("user privilege on column by user oid, rel oid, col attnum");
 DATA(insert OID = 3020 (  has_column_privilege		   PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 16 "25 25 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_name_name _null_ _null_ _null_ ));
 DESCR("current user privilege on column by rel name, col name");
@@ -2906,9 +2906,9 @@ DESCR("statistics: self execution time of function in current transaction, in ms
 
 DATA(insert OID = 3788 (  pg_stat_get_snapshot_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s 0 0 1184 "" _null_ _null_ _null_ _null_ _null_	pg_stat_get_snapshot_timestamp _null_ _null_ _null_ ));
 DESCR("statistics: timestamp of the current statistics snapshot");
-DATA(insert OID = 2230 (  pg_stat_clear_snapshot		PGNSP PGUID 12 1 0 0 0 f f f f f f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_	pg_stat_clear_snapshot _null_ _null_ _null_ ));
+DATA(insert OID = 2230 (  pg_stat_clear_snapshot		PGNSP PGUID 12 1 0 0 0 f f f f f f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_stat_clear_snapshot _null_ _null_ _null_ ));
 DESCR("statistics: discard current transaction's statistics snapshot");
-DATA(insert OID = 2274 (  pg_stat_reset					PGNSP PGUID 12 1 0 0 0 f f f f f f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_	pg_stat_reset _null_ _null_ _null_ ));
+DATA(insert OID = 2274 (  pg_stat_reset					PGNSP PGUID 12 1 0 0 0 f f f f f f v 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_stat_reset _null_ _null_ _null_ ));
 DESCR("statistics: reset collected statistics for current database");
 DATA(insert OID = 3775 (  pg_stat_reset_shared			PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_	pg_stat_reset_shared _null_ _null_ _null_ ));
 DESCR("statistics: reset collected statistics shared across the cluster");
@@ -3071,7 +3071,7 @@ DATA(insert OID = 2078 (  set_config		PGNSP PGUID 12 1 0 0 0 f f f f f f v 3 0 2
 DESCR("SET X as a function");
 DATA(insert OID = 2084 (  pg_show_all_settings	PGNSP PGUID 12 1 1000 0 0 f f f f t t s 0 0 2249 "" "{25,25,25,25,25,25,25,25,25,25,25,1009,25,25,25,23,16}" "{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{name,setting,unit,category,short_desc,extra_desc,context,vartype,source,min_val,max_val,enumvals,boot_val,reset_val,sourcefile,sourceline,pending_restart}" _null_ _null_ show_all_settings _null_ _null_ _null_ ));
 DESCR("SHOW ALL as a function");
-DATA(insert OID = 3329 (  pg_show_all_file_settings	PGNSP PGUID 12 1 1000 0 0 f f f f t t s 0 0 2249 "" "{25,23,23,25,25}" "{o,o,o,o,o}" "{sourcefile,sourceline,seqno,name,setting}" _null_ _null_ show_all_file_settings _null_ _null_ _null_ ));
+DATA(insert OID = 3329 (  pg_show_all_file_settings PGNSP PGUID 12 1 1000 0 0 f f f f t t s 0 0 2249 "" "{25,23,23,25,25}" "{o,o,o,o,o}" "{sourcefile,sourceline,seqno,name,setting}" _null_ _null_ show_all_file_settings _null_ _null_ _null_ ));
 DESCR("show config file settings");
 DATA(insert OID = 1371 (  pg_lock_status   PGNSP PGUID 12 1 1000 0 0 f f f f t t v 0 0 2249 "" "{25,26,26,23,21,25,28,26,26,21,25,23,25,16,16}" "{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{locktype,database,relation,page,tuple,virtualxid,transactionid,classid,objid,objsubid,virtualtransaction,pid,mode,granted,fastpath}" _null_ _null_ pg_lock_status _null_ _null_ _null_ ));
 DESCR("view system lock information");
@@ -3092,7 +3092,7 @@ DESCR("get identification of SQL object");
 DATA(insert OID = 3839 (  pg_identify_object		PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 2249 "26 26 23" "{26,26,23,25,25,25,25}" "{i,i,i,o,o,o,o}" "{classid,objid,subobjid,type,schema,name,identity}" _null_ _null_ pg_identify_object _null_ _null_ _null_ ));
 DESCR("get machine-parseable identification of SQL object");
 
-DATA(insert OID = 3382 (  pg_identify_object_as_address	PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 2249 "26 26 23" "{26,26,23,25,1009,1009}" "{i,i,i,o,o,o}" "{classid,objid,subobjid,type,object_names,object_args}" _null_ _null_ pg_identify_object_as_address _null_ _null_ _null_ ));
+DATA(insert OID = 3382 (  pg_identify_object_as_address PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 2249 "26 26 23" "{26,26,23,25,1009,1009}" "{i,i,i,o,o,o}" "{classid,objid,subobjid,type,object_names,object_args}" _null_ _null_ pg_identify_object_as_address _null_ _null_ _null_ ));
 DESCR("get identification of SQL object for pg_get_object_address()");
 
 DATA(insert OID = 3954 (  pg_get_object_address    PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 2249 "25 1009 1009" "{25,1009,1009,26,26,23}" "{i,i,i,o,o,o}" "{type,name,args,classid,objid,subobjid}" _null_ _null_ pg_get_object_address _null_ _null_ _null_ ));
@@ -3902,9 +3902,9 @@ DESCR("I/O");
 DATA(insert OID = 2455 (  regtypesend		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "2206" _null_ _null_ _null_ _null_ _null_	regtypesend _null_ _null_ _null_ ));
 DESCR("I/O");
 
-DATA(insert OID = 4094 (  regrolerecv	  	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 4096 "2281" _null_ _null_ _null_ _null_ _null_	regrolerecv _null_ _null_ _null_ ));
+DATA(insert OID = 4094 (  regrolerecv		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 4096 "2281" _null_ _null_ _null_ _null_ _null_	regrolerecv _null_ _null_ _null_ ));
 DESCR("I/O");
-DATA(insert OID = 4095 (  regrolesend	       PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "4096" _null_ _null_ _null_ _null_ _null_	regrolesend _null_ _null_ _null_ ));
+DATA(insert OID = 4095 (  regrolesend		   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 17 "4096" _null_ _null_ _null_ _null_ _null_	regrolesend _null_ _null_ _null_ ));
 DESCR("I/O");
 DATA(insert OID = 4087 (  regnamespacerecv	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 4089 "2281" _null_ _null_ _null_ _null_ _null_ regnamespacerecv _null_ _null_ _null_ ));
 DESCR("I/O");
@@ -4232,7 +4232,7 @@ DATA(insert OID = 4106 ( brin_inclusion_add_value PGNSP PGUID 12 1 0 0 0 f f f f
 DESCR("BRIN inclusion support");
 DATA(insert OID = 4107 ( brin_inclusion_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 16 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_inclusion_consistent _null_ _null_ _null_ ));
 DESCR("BRIN inclusion support");
-DATA(insert OID = 4108 ( brin_inclusion_union 	PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 16 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_inclusion_union _null_ _null_ _null_ ));
+DATA(insert OID = 4108 ( brin_inclusion_union	PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 16 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ brin_inclusion_union _null_ _null_ _null_ ));
 DESCR("BRIN inclusion support");
 
 /* userlock replacements */
@@ -4574,7 +4574,7 @@ DATA(insert OID = 3657 (  gin_extract_tsquery	PGNSP PGUID 12 1 0 0 0 f f f f t f
 DESCR("GIN tsvector support");
 DATA(insert OID = 3658 (  gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 8 0 16 "2281 21 3615 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_	gin_tsquery_consistent _null_ _null_ _null_ ));
 DESCR("GIN tsvector support");
-DATA(insert OID = 3921 (  gin_tsquery_triconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 7 0 18 "2281 21 3615 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_	gin_tsquery_triconsistent _null_ _null_ _null_ ));
+DATA(insert OID = 3921 (  gin_tsquery_triconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 7 0 18 "2281 21 3615 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_triconsistent _null_ _null_ _null_ ));
 DESCR("GIN tsvector support");
 DATA(insert OID = 3724 (  gin_cmp_tslexeme		PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 23 "25 25" _null_ _null_ _null_ _null_ _null_ gin_cmp_tslexeme _null_ _null_ _null_ ));
 DESCR("GIN tsvector support");
@@ -4584,7 +4584,7 @@ DATA(insert OID = 3077 (  gin_extract_tsvector	PGNSP PGUID 12 1 0 0 0 f f f f t
 DESCR("GIN tsvector support (obsolete)");
 DATA(insert OID = 3087 (  gin_extract_tsquery	PGNSP PGUID 12 1 0 0 0 f f f f t f i 5 0 2281 "3615 2281 21 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsquery_5args _null_ _null_ _null_ ));
 DESCR("GIN tsvector support (obsolete)");
-DATA(insert OID = 3088 (  gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 6 0 16 "2281 21 3615 23 2281 2281" _null_ _null_ _null_ _null_ _null_	gin_tsquery_consistent_6args _null_ _null_ _null_ ));
+DATA(insert OID = 3088 (  gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 6 0 16 "2281 21 3615 23 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent_6args _null_ _null_ _null_ ));
 DESCR("GIN tsvector support (obsolete)");
 
 DATA(insert OID = 3662 (  tsquery_lt			PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "3615 3615" _null_ _null_ _null_ _null_ _null_ tsquery_lt _null_ _null_ _null_ ));
@@ -4764,9 +4764,9 @@ DATA(insert OID = 3264 (  jsonb_object	 PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0
 DESCR("map text array of key value pairs to jsonb object");
 DATA(insert OID = 3787 (  to_jsonb	   PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 3802 "2283" _null_ _null_ _null_ _null_ _null_ to_jsonb _null_ _null_ _null_ ));
 DESCR("map input to jsonb");
-DATA(insert OID = 3265 (  jsonb_agg_transfn	 PGNSP PGUID 12 1 0 0 0 f f f f f f s 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ _null_ jsonb_agg_transfn _null_ _null_ _null_ ));
+DATA(insert OID = 3265 (  jsonb_agg_transfn  PGNSP PGUID 12 1 0 0 0 f f f f f f s 2 0 2281 "2281 2283" _null_ _null_ _null_ _null_ _null_ jsonb_agg_transfn _null_ _null_ _null_ ));
 DESCR("jsonb aggregate transition function");
-DATA(insert OID = 3266 (  jsonb_agg_finalfn	 PGNSP PGUID 12 1 0 0 0 f f f f f f s 1 0 3802 "2281" _null_ _null_ _null_ _null_ _null_ jsonb_agg_finalfn _null_ _null_ _null_ ));
+DATA(insert OID = 3266 (  jsonb_agg_finalfn  PGNSP PGUID 12 1 0 0 0 f f f f f f s 1 0 3802 "2281" _null_ _null_ _null_ _null_ _null_ jsonb_agg_finalfn _null_ _null_ _null_ ));
 DESCR("jsonb aggregate final function");
 DATA(insert OID = 3267 (  jsonb_agg		   PGNSP PGUID 12 1 0 0 0 t f f f f f s 1 0 3802 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
 DESCR("aggregate input into jsonb");
@@ -4776,15 +4776,15 @@ DATA(insert OID = 3269 (  jsonb_object_agg_finalfn	 PGNSP PGUID 12 1 0 0 0 f f f
 DESCR("jsonb object aggregate final function");
 DATA(insert OID = 3270 (  jsonb_object_agg		   PGNSP PGUID 12 1 0 0 0 t f f f f f i 2 0 3802 "2276 2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
 DESCR("aggregate inputs into jsonb object");
-DATA(insert OID = 3271 (  jsonb_build_array	   PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_array _null_ _null_ _null_ ));
+DATA(insert OID = 3271 (  jsonb_build_array    PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_array _null_ _null_ _null_ ));
 DESCR("build a jsonb array from any inputs");
-DATA(insert OID = 3272 (  jsonb_build_array	   PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 3802  "" _null_ _null_ _null_ _null_ _null_ jsonb_build_array_noargs _null_ _null_ _null_ ));
+DATA(insert OID = 3272 (  jsonb_build_array    PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 3802  "" _null_ _null_ _null_ _null_ _null_ jsonb_build_array_noargs _null_ _null_ _null_ ));
 DESCR("build an empty jsonb array");
-DATA(insert OID = 3273 (  jsonb_build_object    PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_object _null_ _null_ _null_ ));
+DATA(insert OID = 3273 (  jsonb_build_object	PGNSP PGUID 12 1 0 2276 0 f f f f f f s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_object _null_ _null_ _null_ ));
 DESCR("build a jsonb object from pairwise key/value inputs");
-DATA(insert OID = 3274 (  jsonb_build_object    PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 3802  "" _null_ _null_ _null_ _null_ _null_ jsonb_build_object_noargs _null_ _null_ _null_ ));
+DATA(insert OID = 3274 (  jsonb_build_object	PGNSP PGUID 12 1 0 0 0 f f f f f f s 0 0 3802  "" _null_ _null_ _null_ _null_ _null_ jsonb_build_object_noargs _null_ _null_ _null_ ));
 DESCR("build an empty jsonb object");
-DATA(insert OID = 3262 (  jsonb_strip_nulls	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 3802 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_strip_nulls _null_ _null_ _null_ ));
+DATA(insert OID = 3262 (  jsonb_strip_nulls    PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 3802 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_strip_nulls _null_ _null_ _null_ ));
 DESCR("remove object fields with null values from jsonb");
 
 DATA(insert OID = 3478 (  jsonb_object_field			PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 3802 "3802 25" _null_ _null_ "{from_json, field_name}" _null_ _null_ jsonb_object_field _null_ _null_ _null_ ));
@@ -4859,7 +4859,7 @@ DATA(insert OID = 3301 (  jsonb_concat	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2
 DATA(insert OID = 3302 (  jsonb_delete	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 3802 "3802 25" _null_ _null_ _null_ _null_ _null_ jsonb_delete _null_ _null_ _null_ ));
 DATA(insert OID = 3303 (  jsonb_delete	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 3802 "3802 23" _null_ _null_ _null_ _null_ _null_ jsonb_delete_idx _null_ _null_ _null_ ));
 DATA(insert OID = 3304 (  jsonb_delete	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 3802 "3802 1009" _null_ _null_ _null_ _null_ _null_ jsonb_delete_path _null_ _null_ _null_ ));
-DATA(insert OID = 3305 (  jsonb_replace	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 3802 "3802 1009 3802" _null_ _null_ _null_ _null_ _null_ jsonb_replace _null_ _null_ _null_ ));
+DATA(insert OID = 3305 (  jsonb_replace    PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 3802 "3802 1009 3802" _null_ _null_ _null_ _null_ _null_ jsonb_replace _null_ _null_ _null_ ));
 DESCR("Replace part of a jsonb");
 DATA(insert OID = 3306 (  jsonb_pretty	   PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_pretty _null_ _null_ _null_ ));
 DESCR("Indented text from jsonb");
@@ -5227,7 +5227,7 @@ DATA(insert OID = 3982 ( percentile_cont		PGNSP PGUID 12 1 0 0 0 t f f f f f i 2
 DESCR("multiple continuous percentiles");
 DATA(insert OID = 3983 ( percentile_cont_interval_multi_final	PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 1187 "2281 1022" _null_ _null_ _null_ _null_ _null_ percentile_cont_interval_multi_final _null_ _null_ _null_ ));
 DESCR("aggregate final function");
-DATA(insert OID = 3984 ( mode					PGNSP PGUID 12 1 0 0 0 t f f f f f i 1 0 2283 "2283" _null_ _null_ _null_ _null_ _null_	aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 3984 ( mode					PGNSP PGUID 12 1 0 0 0 t f f f f f i 1 0 2283 "2283" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
 DESCR("most common value");
 DATA(insert OID = 3985 ( mode_final				PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 2283 "2281 2283" _null_ _null_ _null_ _null_ _null_	mode_final _null_ _null_ _null_ ));
 DESCR("aggregate final function");
@@ -5253,11 +5253,11 @@ DESCR("aggregate final function");
 /* pg_upgrade support */
 DATA(insert OID = 3582 ( binary_upgrade_set_next_pg_type_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_type_oid _null_ _null_ _null_ ));
 DESCR("for use by pg_upgrade");
-DATA(insert OID = 3584 ( binary_upgrade_set_next_array_pg_type_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_array_pg_type_oid _null_ _null_ _null_ ));
+DATA(insert OID = 3584 ( binary_upgrade_set_next_array_pg_type_oid PGNSP PGUID	12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_array_pg_type_oid _null_ _null_ _null_ ));
 DESCR("for use by pg_upgrade");
-DATA(insert OID = 3585 ( binary_upgrade_set_next_toast_pg_type_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_type_oid _null_ _null_ _null_ ));
+DATA(insert OID = 3585 ( binary_upgrade_set_next_toast_pg_type_oid PGNSP PGUID	12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_toast_pg_type_oid _null_ _null_ _null_ ));
 DESCR("for use by pg_upgrade");
-DATA(insert OID = 3586 ( binary_upgrade_set_next_heap_pg_class_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_heap_pg_class_oid _null_ _null_ _null_ ));
+DATA(insert OID = 3586 ( binary_upgrade_set_next_heap_pg_class_oid PGNSP PGUID	12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_heap_pg_class_oid _null_ _null_ _null_ ));
 DESCR("for use by pg_upgrade");
 DATA(insert OID = 3587 ( binary_upgrade_set_next_index_pg_class_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_index_pg_class_oid _null_ _null_ _null_ ));
 DESCR("for use by pg_upgrade");
@@ -5265,9 +5265,9 @@ DATA(insert OID = 3588 ( binary_upgrade_set_next_toast_pg_class_oid PGNSP PGUID
 DESCR("for use by pg_upgrade");
 DATA(insert OID = 3589 ( binary_upgrade_set_next_pg_enum_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_enum_oid _null_ _null_ _null_ ));
 DESCR("for use by pg_upgrade");
-DATA(insert OID = 3590 ( binary_upgrade_set_next_pg_authid_oid PGNSP PGUID  12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_authid_oid _null_ _null_ _null_ ));
+DATA(insert OID = 3590 ( binary_upgrade_set_next_pg_authid_oid PGNSP PGUID	12 1 0 0 0 f f f f t f v 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ binary_upgrade_set_next_pg_authid_oid _null_ _null_ _null_ ));
 DESCR("for use by pg_upgrade");
-DATA(insert OID = 3591 ( binary_upgrade_create_empty_extension PGNSP PGUID  12 1 0 0 0 f f f f f f v 7 0 2278 "25 25 16 25 1028 1009 1009" _null_ _null_ _null_ _null_ _null_ binary_upgrade_create_empty_extension _null_ _null_ _null_ ));
+DATA(insert OID = 3591 ( binary_upgrade_create_empty_extension PGNSP PGUID	12 1 0 0 0 f f f f f f v 7 0 2278 "25 25 16 25 1028 1009 1009" _null_ _null_ _null_ _null_ _null_ binary_upgrade_create_empty_extension _null_ _null_ _null_ ));
 DESCR("for use by pg_upgrade");
 
 /* replication/origin.h */
@@ -5308,11 +5308,11 @@ DATA(insert OID = 6014 ( pg_show_replication_origin_status PGNSP PGUID 12 1 100
 DESCR("get progress for all replication origins");
 
 /* tablesample */
-DATA(insert OID = 3335 (  tsm_system_init		PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "2281 23 700" _null_ _null_ _null_ _null_	_null_ tsm_system_init _null_ _null_ _null_ ));
+DATA(insert OID = 3335 (  tsm_system_init		PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "2281 23 700" _null_ _null_ _null_ _null_ _null_ tsm_system_init _null_ _null_ _null_ ));
 DESCR("tsm_system_init(internal)");
 DATA(insert OID = 3336 (  tsm_system_nextblock	PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 23 "2281 16" _null_ _null_ _null_ _null_ _null_ tsm_system_nextblock _null_ _null_ _null_ ));
 DESCR("tsm_system_nextblock(internal)");
-DATA(insert OID = 3337 (  tsm_system_nexttuple	PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 21 "2281 23 21 16" _null_ _null_ _null_ _null_	_null_ tsm_system_nexttuple _null_ _null_ _null_ ));
+DATA(insert OID = 3337 (  tsm_system_nexttuple	PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 21 "2281 23 21 16" _null_ _null_ _null_ _null_ _null_ tsm_system_nexttuple _null_ _null_ _null_ ));
 DESCR("tsm_system_nexttuple(internal)");
 DATA(insert OID = 3338 (  tsm_system_end		PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ tsm_system_end _null_ _null_ _null_ ));
 DESCR("tsm_system_end(internal)");
@@ -5321,11 +5321,11 @@ DESCR("tsm_system_reset(internal)");
 DATA(insert OID = 3340 (  tsm_system_cost		PGNSP PGUID 12 1 0 0 0 f f f f t f v 7 0 2278 "2281 2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ tsm_system_cost _null_ _null_ _null_ ));
 DESCR("tsm_system_cost(internal)");
 
-DATA(insert OID = 3341 (  tsm_bernoulli_init		PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "2281 23 700" _null_ _null_ _null_ _null_	_null_ tsm_bernoulli_init _null_ _null_ _null_ ));
+DATA(insert OID = 3341 (  tsm_bernoulli_init		PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "2281 23 700" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_init _null_ _null_ _null_ ));
 DESCR("tsm_bernoulli_init(internal)");
 DATA(insert OID = 3342 (  tsm_bernoulli_nextblock	PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 23 "2281 16" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_nextblock _null_ _null_ _null_ ));
 DESCR("tsm_bernoulli_nextblock(internal)");
-DATA(insert OID = 3343 (  tsm_bernoulli_nexttuple	PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 21 "2281 23 21 16" _null_ _null_ _null_ _null_	_null_ tsm_bernoulli_nexttuple _null_ _null_ _null_ ));
+DATA(insert OID = 3343 (  tsm_bernoulli_nexttuple	PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 21 "2281 23 21 16" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_nexttuple _null_ _null_ _null_ ));
 DESCR("tsm_bernoulli_nexttuple(internal)");
 DATA(insert OID = 3344 (  tsm_bernoulli_end			PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_end _null_ _null_ _null_ ));
 DESCR("tsm_bernoulli_end(internal)");
diff --git a/src/include/catalog/pg_replication_origin.h b/src/include/catalog/pg_replication_origin.h
index 7610c911196..85061c36330 100644
--- a/src/include/catalog/pg_replication_origin.h
+++ b/src/include/catalog/pg_replication_origin.h
@@ -38,7 +38,7 @@ CATALOG(pg_replication_origin,6000) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
 	 * records. For this reason we don't use a normal Oid column here, since
 	 * we need to handle allocation of new values manually.
 	 */
-	Oid		roident;
+	Oid			roident;
 
 	/*
 	 * Variable-length fields start here, but we allow direct access to
@@ -46,9 +46,9 @@ CATALOG(pg_replication_origin,6000) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
 	 */
 
 	/* external, free-format, name */
-	text	roname BKI_FORCE_NOT_NULL;
+	text roname BKI_FORCE_NOT_NULL;
 
-#ifdef CATALOG_VARLEN		/* further variable-length fields */
+#ifdef CATALOG_VARLEN			/* further variable-length fields */
 #endif
 } FormData_pg_replication_origin;
 
diff --git a/src/include/catalog/pg_seclabel.h b/src/include/catalog/pg_seclabel.h
index c9f5b0cfdfc..e13c48d787f 100644
--- a/src/include/catalog/pg_seclabel.h
+++ b/src/include/catalog/pg_seclabel.h
@@ -27,8 +27,8 @@ CATALOG(pg_seclabel,3596) BKI_WITHOUT_OIDS
 	int32		objsubid;		/* column number, or 0 if not used */
 
 #ifdef CATALOG_VARLEN			/* variable-length fields start here */
-	text		provider BKI_FORCE_NOT_NULL; /* name of label provider */
-	text		label BKI_FORCE_NOT_NULL; /* security label of the object */
+	text provider BKI_FORCE_NOT_NULL;	/* name of label provider */
+	text label	BKI_FORCE_NOT_NULL;		/* security label of the object */
 #endif
 } FormData_pg_seclabel;
 
diff --git a/src/include/catalog/pg_shdescription.h b/src/include/catalog/pg_shdescription.h
index c5240998986..bff2850dba3 100644
--- a/src/include/catalog/pg_shdescription.h
+++ b/src/include/catalog/pg_shdescription.h
@@ -44,7 +44,7 @@ CATALOG(pg_shdescription,2396) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
 	Oid			classoid;		/* OID of table containing object */
 
 #ifdef CATALOG_VARLEN			/* variable-length fields start here */
-	text		description BKI_FORCE_NOT_NULL; /* description of object */
+	text description BKI_FORCE_NOT_NULL;		/* description of object */
 #endif
 } FormData_pg_shdescription;
 
diff --git a/src/include/catalog/pg_shseclabel.h b/src/include/catalog/pg_shseclabel.h
index 3977b42f874..0ff41f34bc7 100644
--- a/src/include/catalog/pg_shseclabel.h
+++ b/src/include/catalog/pg_shseclabel.h
@@ -26,8 +26,8 @@ CATALOG(pg_shseclabel,3592) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
 	Oid			classoid;		/* OID of table containing the shared object */
 
 #ifdef CATALOG_VARLEN			/* variable-length fields start here */
-	text		provider BKI_FORCE_NOT_NULL; /* name of label provider */
-	text		label BKI_FORCE_NOT_NULL; /* security label of the object */
+	text provider BKI_FORCE_NOT_NULL;	/* name of label provider */
+	text label	BKI_FORCE_NOT_NULL;		/* security label of the object */
 #endif
 } FormData_pg_shseclabel;
 
diff --git a/src/include/catalog/pg_tablesample_method.h b/src/include/catalog/pg_tablesample_method.h
index 968d1e696ab..b422414d080 100644
--- a/src/include/catalog/pg_tablesample_method.h
+++ b/src/include/catalog/pg_tablesample_method.h
@@ -23,21 +23,24 @@
  *		typedef struct FormData_pg_tablesample_method
  * ----------------
  */
-#define TableSampleMethodRelationId	3330
+#define TableSampleMethodRelationId 3330
 
 CATALOG(pg_tablesample_method,3330)
 {
 	NameData	tsmname;		/* tablesample method name */
-	bool		tsmseqscan;		/* does this method scan whole table sequentially? */
+	bool		tsmseqscan;		/* does this method scan whole table
+								 * sequentially? */
 	bool		tsmpagemode;	/* does this method scan page at a time? */
 	regproc		tsminit;		/* init scan function */
-	regproc		tsmnextblock;	/* function returning next block to sample
-								   or InvalidBlockOffset if finished */
-	regproc		tsmnexttuple;	/* function returning next tuple offset from current block
-								   or InvalidOffsetNumber if end of the block was reacher */
-	regproc		tsmexaminetuple;	/* optional function which can examine tuple contents and
-								   decide if tuple should be returned or not */
-	regproc		tsmend;			/* end scan function*/
+	regproc		tsmnextblock;	/* function returning next block to sample or
+								 * InvalidBlockOffset if finished */
+	regproc		tsmnexttuple;	/* function returning next tuple offset from
+								 * current block or InvalidOffsetNumber if end
+								 * of the block was reacher */
+	regproc		tsmexaminetuple;/* optional function which can examine tuple
+								 * contents and decide if tuple should be
+								 * returned or not */
+	regproc		tsmend;			/* end scan function */
 	regproc		tsmreset;		/* reset state - used by rescan */
 	regproc		tsmcost;		/* costing function */
 } FormData_pg_tablesample_method;
diff --git a/src/include/catalog/pg_transform.h b/src/include/catalog/pg_transform.h
index 0e433cf4aea..86e72b3c822 100644
--- a/src/include/catalog/pg_transform.h
+++ b/src/include/catalog/pg_transform.h
@@ -22,7 +22,7 @@
  *		typedef struct FormData_pg_transform
  * ----------------
  */
-#define TransformRelationId	3576
+#define TransformRelationId 3576
 
 CATALOG(pg_transform,3576)
 {
diff --git a/src/include/catalog/pg_trigger.h b/src/include/catalog/pg_trigger.h
index bff8fcfddaf..a2e303f9967 100644
--- a/src/include/catalog/pg_trigger.h
+++ b/src/include/catalog/pg_trigger.h
@@ -57,7 +57,7 @@ CATALOG(pg_trigger,2620)
 	int2vector	tgattr;			/* column numbers, if trigger is on columns */
 
 #ifdef CATALOG_VARLEN
-	bytea		tgargs BKI_FORCE_NOT_NULL; /* first\000second\000tgnargs\000 */
+	bytea tgargs BKI_FORCE_NOT_NULL;	/* first\000second\000tgnargs\000 */
 	pg_node_tree tgqual;		/* WHEN expression, or NULL if none */
 #endif
 } FormData_pg_trigger;
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index 4284a704d3c..da123f6c495 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -364,7 +364,7 @@ DATA(insert OID = 194 ( pg_node_tree	PGNSP PGUID -1 f b S f t \054 0 0 0 pg_node
 DESCR("string representing an internal node tree");
 #define PGNODETREEOID	194
 
-DATA(insert OID = 32 ( pg_ddl_command   PGNSP PGUID SIZEOF_POINTER t p P f t \054 0 0 0 pg_ddl_command_in pg_ddl_command_out pg_ddl_command_recv pg_ddl_command_send - - - ALIGNOF_POINTER p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 32 ( pg_ddl_command	PGNSP PGUID SIZEOF_POINTER t p P f t \054 0 0 0 pg_ddl_command_in pg_ddl_command_out pg_ddl_command_recv pg_ddl_command_send - - - ALIGNOF_POINTER p f 0 -1 0 0 _null_ _null_ _null_ ));
 DESCR("internal type for passing CollectedCommand");
 #define PGDDLCOMMANDOID 32
 
@@ -568,7 +568,7 @@ DATA(insert OID = 2206 ( regtype	   PGNSP PGUID	4 t b N f t \054 0	 0 2211 regty
 DESCR("registered type");
 #define REGTYPEOID		2206
 
-DATA(insert OID = 4096 ( regrole       PGNSP PGUID	4 t b N f t \054 0	 0 4097 regrolein regroleout regrolerecv regrolesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 4096 ( regrole	   PGNSP PGUID	4 t b N f t \054 0	 0 4097 regrolein regroleout regrolerecv regrolesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
 DESCR("registered role");
 #define REGROLEOID		4096
 
@@ -582,7 +582,7 @@ DATA(insert OID = 2209 ( _regoperator  PGNSP PGUID -1 f b A f t \054 0 2204 0 ar
 DATA(insert OID = 2210 ( _regclass	   PGNSP PGUID -1 f b A f t \054 0 2205 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
 DATA(insert OID = 2211 ( _regtype	   PGNSP PGUID -1 f b A f t \054 0 2206 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
 #define REGTYPEARRAYOID 2211
-DATA(insert OID = 4097 ( _regrole      PGNSP PGUID -1 f b A f t \054 0 4096 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 4097 ( _regrole	   PGNSP PGUID -1 f b A f t \054 0 4096 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
 DATA(insert OID = 4090 ( _regnamespace PGNSP PGUID -1 f b A f t \054 0 4089 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
 
 /* uuid */
diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h
index d6257250cbe..dcb6c082c52 100644
--- a/src/include/commands/defrem.h
+++ b/src/include/commands/defrem.h
@@ -32,7 +32,7 @@ extern ObjectAddress DefineIndex(Oid relationId,
 extern Oid	ReindexIndex(RangeVar *indexRelation, int options);
 extern Oid	ReindexTable(RangeVar *relation, int options);
 extern void ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind,
-								  int options);
+					  int options);
 extern char *makeObjectName(const char *name1, const char *name2,
 			   const char *label);
 extern char *ChooseRelationName(const char *name1, const char *name2,
@@ -51,13 +51,13 @@ extern void SetFunctionArgType(Oid funcOid, int argIndex, Oid newArgType);
 extern ObjectAddress AlterFunction(AlterFunctionStmt *stmt);
 extern ObjectAddress CreateCast(CreateCastStmt *stmt);
 extern void DropCastById(Oid castOid);
-extern Oid CreateTransform(CreateTransformStmt *stmt);
+extern Oid	CreateTransform(CreateTransformStmt *stmt);
 extern void DropTransformById(Oid transformOid);
 extern void IsThereFunctionInNamespace(const char *proname, int pronargs,
 						   oidvector *proargtypes, Oid nspOid);
 extern void ExecuteDoStmt(DoStmt *stmt);
 extern Oid	get_cast_oid(Oid sourcetypeid, Oid targettypeid, bool missing_ok);
-extern Oid get_transform_oid(Oid type_id, Oid lang_id, bool missing_ok);
+extern Oid	get_transform_oid(Oid type_id, Oid lang_id, bool missing_ok);
 extern void interpret_function_parameter_list(List *parameters,
 								  Oid languageOid,
 								  bool is_aggregate,
diff --git a/src/include/commands/event_trigger.h b/src/include/commands/event_trigger.h
index 579e1ef8bd2..8ba7db92f25 100644
--- a/src/include/commands/event_trigger.h
+++ b/src/include/commands/event_trigger.h
@@ -72,7 +72,7 @@ extern void EventTriggerCollectSimpleCommand(ObjectAddress address,
 extern void EventTriggerAlterTableStart(Node *parsetree);
 extern void EventTriggerAlterTableRelid(Oid objectId);
 extern void EventTriggerCollectAlterTableSubcmd(Node *subcmd,
-								  ObjectAddress address);
+									ObjectAddress address);
 extern void EventTriggerAlterTableEnd(void);
 
 extern void EventTriggerCollectGrant(InternalGrant *istmt);
diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h
index 4df44d02420..26fcc5b6433 100644
--- a/src/include/commands/explain.h
+++ b/src/include/commands/explain.h
@@ -84,7 +84,7 @@ extern void ExplainSeparatePlans(ExplainState *es);
 extern void ExplainPropertyList(const char *qlabel, List *data,
 					ExplainState *es);
 extern void ExplainPropertyListNested(const char *qlabel, List *data,
-					ExplainState *es);
+						  ExplainState *es);
 extern void ExplainPropertyText(const char *qlabel, const char *value,
 					ExplainState *es);
 extern void ExplainPropertyInteger(const char *qlabel, int value,
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 4fb91e79cba..e3a31afdf77 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -135,16 +135,16 @@ typedef struct VacAttrStats
  */
 typedef struct VacuumParams
 {
-	int		freeze_min_age;		/* min freeze age, -1 to use default */
-	int		freeze_table_age;	/* age at which to scan whole table */
-	int		multixact_freeze_min_age;	/* min multixact freeze age,
-										 * -1 to use default */
-	int		multixact_freeze_table_age;	/* multixact age at which to
-										 * scan whole table */
-	bool	is_wraparound;		/* force a for-wraparound vacuum */
-	int		log_min_duration;	/* minimum execution threshold in ms at
-								 * which  verbose logs are activated,
-								 * -1 to use default */
+	int			freeze_min_age; /* min freeze age, -1 to use default */
+	int			freeze_table_age;		/* age at which to scan whole table */
+	int			multixact_freeze_min_age;		/* min multixact freeze age,
+												 * -1 to use default */
+	int			multixact_freeze_table_age;		/* multixact age at which to
+												 * scan whole table */
+	bool		is_wraparound;	/* force a for-wraparound vacuum */
+	int			log_min_duration;		/* minimum execution threshold in ms
+										 * at which  verbose logs are
+										 * activated, -1 to use default */
 } VacuumParams;
 
 /* GUC parameters */
diff --git a/src/include/common/fe_memutils.h b/src/include/common/fe_memutils.h
index 51f12eb8251..36882035a16 100644
--- a/src/include/common/fe_memutils.h
+++ b/src/include/common/fe_memutils.h
@@ -13,8 +13,8 @@
  * Flags for pg_malloc_extended and palloc_extended, deliberately named
  * the same as the backend flags.
  */
-#define MCXT_ALLOC_HUGE			0x01	/* allow huge allocation (> 1 GB)
-										 * not actually used for frontends */
+#define MCXT_ALLOC_HUGE			0x01	/* allow huge allocation (> 1 GB) not
+										 * actually used for frontends */
 #define MCXT_ALLOC_NO_OOM		0x02	/* no failure if out-of-memory */
 #define MCXT_ALLOC_ZERO			0x04	/* zero allocated memory */
 
diff --git a/src/include/common/pg_lzcompress.h b/src/include/common/pg_lzcompress.h
index 52bcaf14b11..dbd51d58efb 100644
--- a/src/include/common/pg_lzcompress.h
+++ b/src/include/common/pg_lzcompress.h
@@ -86,6 +86,6 @@ extern const PGLZ_Strategy *const PGLZ_strategy_always;
 extern int32 pglz_compress(const char *source, int32 slen, char *dest,
 			  const PGLZ_Strategy *strategy);
 extern int32 pglz_decompress(const char *source, int32 slen, char *dest,
-			  int32 rawsize);
+				int32 rawsize);
 
 #endif   /* _PG_LZCOMPRESS_H_ */
diff --git a/src/include/common/restricted_token.h b/src/include/common/restricted_token.h
index e24374483c7..272ad9b21dd 100644
--- a/src/include/common/restricted_token.h
+++ b/src/include/common/restricted_token.h
@@ -2,8 +2,8 @@
  *	restricted_token.h
  *		helper routine to ensure restricted token on Windows
  *
- *  Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
- *  Portions Copyright (c) 1994, Regents of the University of California
+ *	Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ *	Portions Copyright (c) 1994, Regents of the University of California
  *
  *	src/include/common/restricted_token.h
  */
@@ -14,11 +14,11 @@
  * On Windows make sure that we are running with a restricted token,
  * On other platforms do nothing.
  */
-void	get_restricted_token(const char *progname);
+void		get_restricted_token(const char *progname);
 
 #ifdef WIN32
 /* Create a restricted token and execute the specified process with it. */
-HANDLE	CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, const char	*progname);
+HANDLE		CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, const char *progname);
 #endif
 
 #endif   /* COMMON_RESTRICTED_TOKEN_H */
diff --git a/src/include/common/string.h b/src/include/common/string.h
index 023385856fa..9f485c355ad 100644
--- a/src/include/common/string.h
+++ b/src/include/common/string.h
@@ -2,8 +2,8 @@
  *	string.h
  *		string handling helpers
  *
- *  Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
- *  Portions Copyright (c) 1994, Regents of the University of California
+ *	Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+ *	Portions Copyright (c) 1994, Regents of the University of California
  *
  *	src/include/common/string.h
  */
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index e60ab9fd963..193a6546277 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -369,7 +369,7 @@ extern List *ExecInsertIndexTuples(TupleTableSlot *slot, ItemPointer tupleid,
 					  EState *estate, bool noDupErr, bool *specConflict,
 					  List *arbiterIndexes);
 extern bool ExecCheckIndexConstraints(TupleTableSlot *slot, EState *estate,
-					  ItemPointer conflictTid, List *arbiterIndexes);
+						  ItemPointer conflictTid, List *arbiterIndexes);
 extern void check_exclusion_constraint(Relation heap, Relation index,
 						   IndexInfo *indexInfo,
 						   ItemPointer tupleid,
diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h
index 71099b15bbc..9d0b85c77db 100644
--- a/src/include/executor/hashjoin.h
+++ b/src/include/executor/hashjoin.h
@@ -108,14 +108,15 @@ typedef struct HashSkewBucket
  */
 typedef struct HashMemoryChunkData
 {
-	int			ntuples;	/* number of tuples stored in this chunk */
-	size_t		maxlen;		/* size of the buffer holding the tuples */
-	size_t		used;		/* number of buffer bytes already used */
+	int			ntuples;		/* number of tuples stored in this chunk */
+	size_t		maxlen;			/* size of the buffer holding the tuples */
+	size_t		used;			/* number of buffer bytes already used */
 
-	struct HashMemoryChunkData *next; /* pointer to the next chunk (linked list) */
+	struct HashMemoryChunkData *next;	/* pointer to the next chunk (linked
+										 * list) */
 
 	char		data[FLEXIBLE_ARRAY_MEMBER];	/* buffer allocated at the end */
-} HashMemoryChunkData;
+}	HashMemoryChunkData;
 
 typedef struct HashMemoryChunkData *HashMemoryChunk;
 
@@ -127,8 +128,9 @@ typedef struct HashJoinTableData
 	int			nbuckets;		/* # buckets in the in-memory hash table */
 	int			log2_nbuckets;	/* its log2 (nbuckets must be a power of 2) */
 
-	int			nbuckets_original;	/* # buckets when starting the first hash */
-	int			nbuckets_optimal;	/* optimal # buckets (per batch) */
+	int			nbuckets_original;		/* # buckets when starting the first
+										 * hash */
+	int			nbuckets_optimal;		/* optimal # buckets (per batch) */
 	int			log2_nbuckets_optimal;	/* same as log2_nbuckets optimal */
 
 	/* buckets[i] is head of list of tuples in i'th in-memory bucket */
@@ -183,7 +185,7 @@ typedef struct HashJoinTableData
 	MemoryContext batchCxt;		/* context for this-batch-only storage */
 
 	/* used for dense allocation of tuples (into linked chunks) */
-	HashMemoryChunk chunks;		/*  one list for the whole batch */
+	HashMemoryChunk chunks;		/* one list for the whole batch */
 }	HashJoinTableData;
 
 #endif   /* HASHJOIN_H */
diff --git a/src/include/fmgr.h b/src/include/fmgr.h
index b9a5c40f598..4e8f68c7ceb 100644
--- a/src/include/fmgr.h
+++ b/src/include/fmgr.h
@@ -298,7 +298,7 @@ extern struct varlena *pg_detoast_datum_packed(struct varlena * datum);
 #define PG_RETURN_INT32(x)	 return Int32GetDatum(x)
 #define PG_RETURN_UINT32(x)  return UInt32GetDatum(x)
 #define PG_RETURN_INT16(x)	 return Int16GetDatum(x)
-#define PG_RETURN_UINT16(x)	 return UInt16GetDatum(x)
+#define PG_RETURN_UINT16(x)  return UInt16GetDatum(x)
 #define PG_RETURN_CHAR(x)	 return CharGetDatum(x)
 #define PG_RETURN_BOOL(x)	 return BoolGetDatum(x)
 #define PG_RETURN_OID(x)	 return ObjectIdGetDatum(x)
diff --git a/src/include/funcapi.h b/src/include/funcapi.h
index 694f9ddf6df..5dd556baf95 100644
--- a/src/include/funcapi.h
+++ b/src/include/funcapi.h
@@ -176,7 +176,7 @@ extern int get_func_arg_info(HeapTuple procTup,
 extern int get_func_input_arg_names(Datum proargnames, Datum proargmodes,
 						 char ***arg_names);
 
-extern int get_func_trftypes(HeapTuple procTup, Oid **p_trftypes);
+extern int	get_func_trftypes(HeapTuple procTup, Oid **p_trftypes);
 extern char *get_func_result_name(Oid functionId);
 
 extern TupleDesc build_function_result_tupdesc_d(Datum proallargtypes,
diff --git a/src/include/lib/bipartite_match.h b/src/include/lib/bipartite_match.h
index c80f9bfdd0c..373bbede1e1 100644
--- a/src/include/lib/bipartite_match.h
+++ b/src/include/lib/bipartite_match.h
@@ -39,6 +39,6 @@ typedef struct bipartite_match_state
 
 BipartiteMatchState *BipartiteMatch(int u_size, int v_size, short **adjacency);
 
-void BipartiteMatchFree(BipartiteMatchState *state);
+void		BipartiteMatchFree(BipartiteMatchState *state);
 
 #endif   /* BIPARTITE_MATCH_H */
diff --git a/src/include/lib/hyperloglog.h b/src/include/lib/hyperloglog.h
index a6cbffc4c32..fd8280c5b00 100644
--- a/src/include/lib/hyperloglog.h
+++ b/src/include/lib/hyperloglog.h
@@ -60,7 +60,7 @@ typedef struct hyperLogLogState
 } hyperLogLogState;
 
 extern void initHyperLogLog(hyperLogLogState *cState, uint8 bwidth);
-extern void	addHyperLogLog(hyperLogLogState *cState, uint32 hash);
+extern void addHyperLogLog(hyperLogLogState *cState, uint32 hash);
 extern double estimateHyperLogLog(hyperLogLogState *cState);
 extern void mergeHyperLogLog(hyperLogLogState *cState, const hyperLogLogState *oState);
 
diff --git a/src/include/lib/pairingheap.h b/src/include/lib/pairingheap.h
index eb1856a7c10..e7713a211f3 100644
--- a/src/include/lib/pairingheap.h
+++ b/src/include/lib/pairingheap.h
@@ -58,8 +58,8 @@ typedef struct pairingheap_node
  * and >0 iff a > b.  For a min-heap, the conditions are reversed.
  */
 typedef int (*pairingheap_comparator) (const pairingheap_node *a,
-									   const pairingheap_node *b,
-									   void *arg);
+												   const pairingheap_node *b,
+												   void *arg);
 
 /*
  * A pairing heap.
@@ -71,12 +71,12 @@ typedef int (*pairingheap_comparator) (const pairingheap_node *a,
 typedef struct pairingheap
 {
 	pairingheap_comparator ph_compare;	/* comparison function */
-	void	   *ph_arg;					/* opaque argument to ph_compare */
-	pairingheap_node *ph_root;			/* current root of the heap */
+	void	   *ph_arg;			/* opaque argument to ph_compare */
+	pairingheap_node *ph_root;	/* current root of the heap */
 } pairingheap;
 
 extern pairingheap *pairingheap_allocate(pairingheap_comparator compare,
-					void *arg);
+					 void *arg);
 extern void pairingheap_free(pairingheap *heap);
 extern void pairingheap_add(pairingheap *heap, pairingheap_node *node);
 extern pairingheap_node *pairingheap_first(pairingheap *heap);
@@ -85,8 +85,8 @@ extern void pairingheap_remove(pairingheap *heap, pairingheap_node *node);
 
 #ifdef PAIRINGHEAP_DEBUG
 extern char *pairingheap_dump(pairingheap *heap,
-							  void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
-							  void *opaque);
+	 void (*dumpfunc) (pairingheap_node *node, StringInfo buf, void *opaque),
+				 void *opaque);
 #endif
 
 /* Resets the heap to be empty. */
diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h
index f323ed87106..6171ef3a1ff 100644
--- a/src/include/libpq/libpq-be.h
+++ b/src/include/libpq/libpq-be.h
@@ -207,12 +207,12 @@ typedef struct Port
  * SSL implementation (e.g. be-secure-openssl.c)
  */
 extern void be_tls_init(void);
-extern int be_tls_open_server(Port *port);
+extern int	be_tls_open_server(Port *port);
 extern void be_tls_close(Port *port);
 extern ssize_t be_tls_read(Port *port, void *ptr, size_t len, int *waitfor);
 extern ssize_t be_tls_write(Port *port, void *ptr, size_t len, int *waitfor);
 
-extern int be_tls_get_cipher_bits(Port *port);
+extern int	be_tls_get_cipher_bits(Port *port);
 extern bool be_tls_get_compression(Port *port);
 extern void be_tls_get_version(Port *port, char *ptr, size_t len);
 extern void be_tls_get_cipher(Port *port, char *ptr, size_t len);
diff --git a/src/include/libpq/libpq.h b/src/include/libpq/libpq.h
index 8fa896eb39e..c408e5b5517 100644
--- a/src/include/libpq/libpq.h
+++ b/src/include/libpq/libpq.h
@@ -23,19 +23,19 @@
 
 typedef struct
 {
-	void (*comm_reset)(void);
-	int	(*flush)(void);
-	int	(*flush_if_writable)(void);
-	bool (*is_send_pending)(void);
-	int	(*putmessage)(char msgtype, const char *s, size_t len);
-	void (*putmessage_noblock)(char msgtype, const char *s, size_t len);
-	void (*startcopyout)(void);
-	void (*endcopyout)(bool errorAbort);
+	void		(*comm_reset) (void);
+	int			(*flush) (void);
+	int			(*flush_if_writable) (void);
+	bool		(*is_send_pending) (void);
+	int			(*putmessage) (char msgtype, const char *s, size_t len);
+	void		(*putmessage_noblock) (char msgtype, const char *s, size_t len);
+	void		(*startcopyout) (void);
+	void		(*endcopyout) (bool errorAbort);
 } PQcommMethods;
 
 extern PGDLLIMPORT PQcommMethods *PqCommMethods;
 
-#define pq_comm_reset()	(PqCommMethods->comm_reset())
+#define pq_comm_reset() (PqCommMethods->comm_reset())
 #define pq_flush() (PqCommMethods->flush())
 #define pq_flush_if_writable() (PqCommMethods->flush_if_writable())
 #define pq_is_send_pending() (PqCommMethods->is_send_pending())
@@ -79,8 +79,8 @@ extern char *ssl_key_file;
 extern char *ssl_ca_file;
 extern char *ssl_crl_file;
 
-extern int	(*pq_putmessage_hook)(char msgtype, const char *s, size_t len);
-extern int  (*pq_flush_hook)(void);
+extern int	(*pq_putmessage_hook) (char msgtype, const char *s, size_t len);
+extern int	(*pq_flush_hook) (void);
 
 extern int	secure_initialize(void);
 extern bool secure_loaded_verify_locations(void);
diff --git a/src/include/libpq/pqmq.h b/src/include/libpq/pqmq.h
index ad7589d4edb..901756596a4 100644
--- a/src/include/libpq/pqmq.h
+++ b/src/include/libpq/pqmq.h
@@ -16,7 +16,7 @@
 #include "lib/stringinfo.h"
 #include "storage/shm_mq.h"
 
-extern void	pq_redirect_to_shm_mq(shm_mq *, shm_mq_handle *);
+extern void pq_redirect_to_shm_mq(shm_mq *, shm_mq_handle *);
 extern void pq_set_parallel_master(pid_t pid, BackendId backend_id);
 
 extern void pq_parse_errornotice(StringInfo str, ErrorData *edata);
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 0a92cc4efc9..db5bd7faf04 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -68,8 +68,8 @@ typedef struct IndexInfo
 	Oid		   *ii_ExclusionProcs;		/* array with one entry per column */
 	uint16	   *ii_ExclusionStrats;		/* array with one entry per column */
 	Oid		   *ii_UniqueOps;	/* array with one entry per column */
-	Oid		   *ii_UniqueProcs;		/* array with one entry per column */
-	uint16	   *ii_UniqueStrats;		/* array with one entry per column */
+	Oid		   *ii_UniqueProcs; /* array with one entry per column */
+	uint16	   *ii_UniqueStrats;	/* array with one entry per column */
 	bool		ii_Unique;
 	bool		ii_ReadyForInserts;
 	bool		ii_Concurrent;
@@ -1128,11 +1128,14 @@ typedef struct ModifyTableState
 	List	  **mt_arowmarks;	/* per-subplan ExecAuxRowMark lists */
 	EPQState	mt_epqstate;	/* for evaluating EvalPlanQual rechecks */
 	bool		fireBSTriggers; /* do we need to fire stmt triggers? */
-	OnConflictAction mt_onconflict; /* ON CONFLICT type */
-	List	   *mt_arbiterindexes;	/* unique index OIDs to arbitrate taking alt path */
-	TupleTableSlot *mt_existing; /* slot to store existing target tuple in */
-	List	   *mt_excludedtlist; /* the excluded pseudo relation's tlist  */
-	TupleTableSlot *mt_conflproj; /* CONFLICT ... SET ... projection target */
+	OnConflictAction mt_onconflict;		/* ON CONFLICT type */
+	List	   *mt_arbiterindexes;		/* unique index OIDs to arbitrate
+										 * taking alt path */
+	TupleTableSlot *mt_existing;	/* slot to store existing target tuple in */
+	List	   *mt_excludedtlist;		/* the excluded pseudo relation's
+										 * tlist  */
+	TupleTableSlot *mt_conflproj;		/* CONFLICT ... SET ... projection
+										 * target */
 } ModifyTableState;
 
 /* ----------------
@@ -1828,12 +1831,13 @@ typedef struct AggState
 	ExprContext **aggcontexts;	/* econtexts for long-lived data (per GS) */
 	ExprContext *tmpcontext;	/* econtext for input expressions */
 	AggStatePerAgg curperagg;	/* identifies currently active aggregate */
-	bool        input_done;     /* indicates end of input */
+	bool		input_done;		/* indicates end of input */
 	bool		agg_done;		/* indicates completion of Agg scan */
 	int			projected_set;	/* The last projected grouping set */
 	int			current_set;	/* The current grouping set being evaluated */
 	Bitmapset  *grouped_cols;	/* grouped cols in current projection */
-	List	   *all_grouped_cols; /* list of all grouped cols in DESC order */
+	List	   *all_grouped_cols;		/* list of all grouped cols in DESC
+										 * order */
 	/* These fields are for grouping set phase data */
 	int			maxsets;		/* The max number of sets in any phase */
 	AggStatePerPhase phases;	/* array of all phases */
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 669a0afa09c..290cdb30585 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -645,9 +645,9 @@ typedef enum JoinType
  */
 typedef enum OnConflictAction
 {
-	ONCONFLICT_NONE,		/* No "ON CONFLICT" clause */
-	ONCONFLICT_NOTHING,		/* ON CONFLICT ... DO NOTHING */
-	ONCONFLICT_UPDATE		/* ON CONFLICT ... DO UPDATE */
+	ONCONFLICT_NONE,			/* No "ON CONFLICT" clause */
+	ONCONFLICT_NOTHING,			/* ON CONFLICT ... DO NOTHING */
+	ONCONFLICT_UPDATE			/* ON CONFLICT ... DO UPDATE */
 } OnConflictAction;
 
 #endif   /* NODES_H */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 23190e1af05..868905b0c16 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -121,7 +121,7 @@ typedef struct Query
 	bool		hasRecursive;	/* WITH RECURSIVE was specified */
 	bool		hasModifyingCTE;	/* has INSERT/UPDATE/DELETE in WITH */
 	bool		hasForUpdate;	/* FOR [KEY] UPDATE/SHARE was specified */
-	bool		hasRowSecurity;	/* row security applied? */
+	bool		hasRowSecurity; /* row security applied? */
 
 	List	   *cteList;		/* WITH list (of CommonTableExpr's) */
 
@@ -132,7 +132,7 @@ typedef struct Query
 
 	List	   *withCheckOptions;		/* a list of WithCheckOption's */
 
-	OnConflictExpr *onConflict;	/* ON CONFLICT DO [NOTHING | UPDATE] */
+	OnConflictExpr *onConflict; /* ON CONFLICT DO [NOTHING | UPDATE] */
 
 	List	   *returningList;	/* return-values list (of TargetEntry) */
 
@@ -294,18 +294,18 @@ typedef struct CollateClause
  */
 typedef enum RoleSpecType
 {
-	ROLESPEC_CSTRING,		/* role name is stored as a C string */
-	ROLESPEC_CURRENT_USER,	/* role spec is CURRENT_USER */
-	ROLESPEC_SESSION_USER,	/* role spec is SESSION_USER */
-	ROLESPEC_PUBLIC			/* role name is "public" */
+	ROLESPEC_CSTRING,			/* role name is stored as a C string */
+	ROLESPEC_CURRENT_USER,		/* role spec is CURRENT_USER */
+	ROLESPEC_SESSION_USER,		/* role spec is SESSION_USER */
+	ROLESPEC_PUBLIC				/* role name is "public" */
 } RoleSpecType;
 
 typedef struct RoleSpec
 {
 	NodeTag		type;
-	RoleSpecType roletype;	/* Type of this rolespec */
-	char	   *rolename;	/* filled only for ROLESPEC_CSTRING */
-	int			location;	/* token location, or -1 if unknown */
+	RoleSpecType roletype;		/* Type of this rolespec */
+	char	   *rolename;		/* filled only for ROLESPEC_CSTRING */
+	int			location;		/* token location, or -1 if unknown */
 } RoleSpec;
 
 /*
@@ -568,9 +568,9 @@ typedef struct RangeTableSample
 {
 	NodeTag		type;
 	RangeVar   *relation;
-	char	   *method;		/* sampling method */
+	char	   *method;			/* sampling method */
 	Node	   *repeatable;
-	List	   *args;		/* arguments for sampling method */
+	List	   *args;			/* arguments for sampling method */
 } RangeTableSample;
 
 /*
@@ -690,7 +690,7 @@ typedef struct LockingClause
 	NodeTag		type;
 	List	   *lockedRels;		/* FOR [KEY] UPDATE/SHARE relations */
 	LockClauseStrength strength;
-	LockWaitPolicy	waitPolicy;	/* NOWAIT and SKIP LOCKED */
+	LockWaitPolicy waitPolicy;	/* NOWAIT and SKIP LOCKED */
 } LockingClause;
 
 /*
@@ -810,7 +810,7 @@ typedef struct RangeTblEntry
 	 */
 	Oid			relid;			/* OID of the relation */
 	char		relkind;		/* relation kind (see pg_class.relkind) */
-	TableSampleClause	*tablesample;	/* sampling method and parameters */
+	TableSampleClause *tablesample;		/* sampling method and parameters */
 
 	/*
 	 * Fields valid for a subquery RTE (else NULL):
@@ -1157,12 +1157,12 @@ typedef struct InferClause
  */
 typedef struct OnConflictClause
 {
-	NodeTag			type;
-	OnConflictAction action;		/* DO NOTHING or UPDATE? */
-	InferClause	   *infer;			/* Optional index inference clause */
-	List		   *targetList;		/* the target list (of ResTarget) */
-	Node		   *whereClause;	/* qualifications */
-	int				location;		/* token location, or -1 if unknown */
+	NodeTag		type;
+	OnConflictAction action;	/* DO NOTHING or UPDATE? */
+	InferClause *infer;			/* Optional index inference clause */
+	List	   *targetList;		/* the target list (of ResTarget) */
+	Node	   *whereClause;	/* qualifications */
+	int			location;		/* token location, or -1 if unknown */
 } OnConflictClause;
 
 /*
@@ -1215,7 +1215,7 @@ typedef struct InsertStmt
 	RangeVar   *relation;		/* relation to insert into */
 	List	   *cols;			/* optional: names of the target columns */
 	Node	   *selectStmt;		/* the source SELECT/VALUES, or NULL */
-	OnConflictClause *onConflictClause;	/* ON CONFLICT clause */
+	OnConflictClause *onConflictClause; /* ON CONFLICT clause */
 	List	   *returningList;	/* list of expressions to return */
 	WithClause *withClause;		/* WITH clause */
 } InsertStmt;
@@ -2890,21 +2890,22 @@ typedef struct ConstraintsSetStmt
  */
 
 /* Reindex options */
-#define REINDEXOPT_VERBOSE 1 << 0	/* print progress info */
+#define REINDEXOPT_VERBOSE 1 << 0		/* print progress info */
 
 typedef enum ReindexObjectType
 {
-	REINDEX_OBJECT_INDEX,	/* index */
-	REINDEX_OBJECT_TABLE,	/* table or materialized view */
-	REINDEX_OBJECT_SCHEMA,	/* schema */
-	REINDEX_OBJECT_SYSTEM,	/* system catalogs */
-	REINDEX_OBJECT_DATABASE	/* database */
+	REINDEX_OBJECT_INDEX,		/* index */
+	REINDEX_OBJECT_TABLE,		/* table or materialized view */
+	REINDEX_OBJECT_SCHEMA,		/* schema */
+	REINDEX_OBJECT_SYSTEM,		/* system catalogs */
+	REINDEX_OBJECT_DATABASE		/* database */
 } ReindexObjectType;
 
 typedef struct ReindexStmt
 {
 	NodeTag		type;
-	ReindexObjectType	kind;	/* REINDEX_OBJECT_INDEX, REINDEX_OBJECT_TABLE, etc. */
+	ReindexObjectType kind;		/* REINDEX_OBJECT_INDEX, REINDEX_OBJECT_TABLE,
+								 * etc. */
 	RangeVar   *relation;		/* Table or index to reindex */
 	const char *name;			/* name of database to reindex */
 	int			options;		/* Reindex options flags */
@@ -3034,7 +3035,7 @@ typedef enum AlterTSConfigType
 typedef struct AlterTSConfigurationStmt
 {
 	NodeTag		type;
-	AlterTSConfigType	kind;	/* ALTER_TSCONFIG_ADD_MAPPING, etc */
+	AlterTSConfigType kind;		/* ALTER_TSCONFIG_ADD_MAPPING, etc */
 	List	   *cfgname;		/* qualified name (list of Value strings) */
 
 	/*
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 61c84041407..d967219c0b5 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -182,10 +182,10 @@ typedef struct ModifyTable
 	List	   *fdwPrivLists;	/* per-target-table FDW private data lists */
 	List	   *rowMarks;		/* PlanRowMarks (non-locking only) */
 	int			epqParam;		/* ID of Param for EvalPlanQual re-eval */
-	OnConflictAction onConflictAction; /* ON CONFLICT action */
-	List	   *arbiterIndexes;	/* List of ON CONFLICT arbiter index OIDs  */
+	OnConflictAction onConflictAction;	/* ON CONFLICT action */
+	List	   *arbiterIndexes; /* List of ON CONFLICT arbiter index OIDs  */
 	List	   *onConflictSet;	/* SET for INSERT ON CONFLICT DO UPDATE */
-	Node	   *onConflictWhere;/* WHERE for ON CONFLICT UPDATE */
+	Node	   *onConflictWhere;	/* WHERE for ON CONFLICT UPDATE */
 	Index		exclRelRTI;		/* RTI of the EXCLUDED pseudo relation */
 	List	   *exclRelTlist;	/* tlist of the EXCLUDED pseudo relation */
 } ModifyTable;
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 9f3a7267a27..60c1ca2c8dc 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -1196,9 +1196,9 @@ typedef struct CurrentOfExpr
 typedef struct InferenceElem
 {
 	Expr		xpr;
-	Node	   *expr;				/* expression to infer from, or NULL */
-	Oid			infercollid;		/* OID of collation, or InvalidOid */
-	Oid			inferopclass;		/* OID of att opclass, or InvalidOid */
+	Node	   *expr;			/* expression to infer from, or NULL */
+	Oid			infercollid;	/* OID of collation, or InvalidOid */
+	Oid			inferopclass;	/* OID of att opclass, or InvalidOid */
 } InferenceElem;
 
 /*--------------------
@@ -1380,13 +1380,14 @@ typedef struct OnConflictExpr
 	OnConflictAction action;	/* DO NOTHING or UPDATE? */
 
 	/* Arbiter */
-	List	   *arbiterElems;	/* unique index arbiter list (of InferenceElem's) */
+	List	   *arbiterElems;	/* unique index arbiter list (of
+								 * InferenceElem's) */
 	Node	   *arbiterWhere;	/* unique index arbiter WHERE clause */
 	Oid			constraint;		/* pg_constraint OID for arbiter */
 
 	/* ON CONFLICT UPDATE */
 	List	   *onConflictSet;	/* List of ON CONFLICT SET TargetEntrys */
-	Node	   *onConflictWhere;/* qualifiers to restrict UPDATE to */
+	Node	   *onConflictWhere;	/* qualifiers to restrict UPDATE to */
 	int			exclRelIndex;	/* RT index of 'excluded' relation */
 	List	   *exclRelTlist;	/* tlist of the EXCLUDED pseudo relation */
 } OnConflictExpr;
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index 89c8deda95c..161644c343b 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -33,7 +33,7 @@ extern bool add_path_precheck(RelOptInfo *parent_rel,
 extern Path *create_seqscan_path(PlannerInfo *root, RelOptInfo *rel,
 					Relids required_outer);
 extern Path *create_samplescan_path(PlannerInfo *root, RelOptInfo *rel,
-									Relids required_outer);
+					   Relids required_outer);
 extern IndexPath *create_index_path(PlannerInfo *root,
 				  IndexOptInfo *index,
 				  List *indexclauses,
diff --git a/src/include/optimizer/prep.h b/src/include/optimizer/prep.h
index dcd078ee43d..7b8c0a98f31 100644
--- a/src/include/optimizer/prep.h
+++ b/src/include/optimizer/prep.h
@@ -46,7 +46,7 @@ extern void expand_security_quals(PlannerInfo *root, List *tlist);
 extern List *preprocess_targetlist(PlannerInfo *root, List *tlist);
 
 extern List *preprocess_onconflict_targetlist(List *tlist,
-						  int result_relation, List *range_table);
+								 int result_relation, List *range_table);
 
 extern PlanRowMark *get_plan_rowmark(List *rowmarks, Index rtindex);
 
diff --git a/src/include/optimizer/tlist.h b/src/include/optimizer/tlist.h
index b0f0f196838..95cffaa60c3 100644
--- a/src/include/optimizer/tlist.h
+++ b/src/include/optimizer/tlist.h
@@ -44,7 +44,7 @@ extern List *get_sortgrouplist_exprs(List *sgClauses,
 						List *targetList);
 
 extern SortGroupClause *get_sortgroupref_clause(Index sortref,
-					 List *clauses);
+						List *clauses);
 
 extern Oid *extract_grouping_ops(List *groupClause);
 extern AttrNumber *extract_grouping_cols(List *groupClause, List *tlist);
diff --git a/src/include/parser/parse_clause.h b/src/include/parser/parse_clause.h
index cbe5e76bb84..77619e37a04 100644
--- a/src/include/parser/parse_clause.h
+++ b/src/include/parser/parse_clause.h
@@ -27,7 +27,7 @@ extern Node *transformWhereClause(ParseState *pstate, Node *clause,
 extern Node *transformLimitClause(ParseState *pstate, Node *clause,
 					 ParseExprKind exprKind, const char *constructName);
 extern List *transformGroupClause(ParseState *pstate, List *grouplist,
-								  List **groupingSets,
+					 List **groupingSets,
 					 List **targetlist, List *sortClause,
 					 ParseExprKind exprKind, bool useSQL99);
 extern List *transformSortClause(ParseState *pstate, List *orderlist,
diff --git a/src/include/parser/parse_func.h b/src/include/parser/parse_func.h
index 40c007c35fc..3194da46394 100644
--- a/src/include/parser/parse_func.h
+++ b/src/include/parser/parse_func.h
@@ -34,9 +34,9 @@ extern Node *ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
 				  FuncCall *fn, int location);
 
 extern TableSampleClause *ParseTableSample(ParseState *pstate,
-										   char *samplemethod,
-										   Node *repeatable, List *args,
-										   int location);
+				 char *samplemethod,
+				 Node *repeatable, List *args,
+				 int location);
 
 extern FuncDetailCode func_get_detail(List *funcname,
 				List *fargs, List *fargnames,
diff --git a/src/include/parser/parse_relation.h b/src/include/parser/parse_relation.h
index ce563dea256..e2875a0adb2 100644
--- a/src/include/parser/parse_relation.h
+++ b/src/include/parser/parse_relation.h
@@ -26,11 +26,11 @@
  */
 typedef struct
 {
-	int				distance;	/* Weighted distance (lowest so far) */
-	RangeTblEntry  *rfirst;		/* RTE of first */
-	AttrNumber		first;		/* Closest attribute so far */
-	RangeTblEntry  *rsecond;	/* RTE of second */
-	AttrNumber		second;		/* Second closest attribute so far */
+	int			distance;		/* Weighted distance (lowest so far) */
+	RangeTblEntry *rfirst;		/* RTE of first */
+	AttrNumber	first;			/* Closest attribute so far */
+	RangeTblEntry *rsecond;		/* RTE of second */
+	AttrNumber	second;			/* Second closest attribute so far */
 } FuzzyAttrMatchState;
 
 
@@ -106,7 +106,7 @@ extern void addRTEtoQuery(ParseState *pstate, RangeTblEntry *rte,
 			  bool addToRelNameSpace, bool addToVarNameSpace);
 extern void errorMissingRTE(ParseState *pstate, RangeVar *relation) pg_attribute_noreturn();
 extern void errorMissingColumn(ParseState *pstate,
-	   char *relname, char *colname, int location) pg_attribute_noreturn();
+		 char *relname, char *colname, int location) pg_attribute_noreturn();
 extern void expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
 		  int location, bool include_dropped,
 		  List **colnames, List **colvars);
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index e3fe06e95be..9ecc16372d1 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -710,11 +710,11 @@ typedef enum BackendState
 typedef struct PgBackendSSLStatus
 {
 	/* Information about SSL connection */
-	int		ssl_bits;
-	bool	ssl_compression;
-	char	ssl_version[NAMEDATALEN];  /* MUST be null-terminated */
-	char	ssl_cipher[NAMEDATALEN];   /* MUST be null-terminated */
-	char	ssl_clientdn[NAMEDATALEN]; /* MUST be null-terminated */
+	int			ssl_bits;
+	bool		ssl_compression;
+	char		ssl_version[NAMEDATALEN];		/* MUST be null-terminated */
+	char		ssl_cipher[NAMEDATALEN];		/* MUST be null-terminated */
+	char		ssl_clientdn[NAMEDATALEN];		/* MUST be null-terminated */
 } PgBackendSSLStatus;
 
 
@@ -738,11 +738,11 @@ typedef struct PgBackendStatus
 	 * the copy is valid; otherwise start over.  This makes updates cheap
 	 * while reads are potentially expensive, but that's the tradeoff we want.
 	 *
-	 * The above protocol needs the memory barriers to ensure that
-	 * the apparent order of execution is as it desires. Otherwise,
-	 * for example, the CPU might rearrange the code so that st_changecount
-	 * is incremented twice before the modification on a machine with
-	 * weak memory ordering. This surprising result can lead to bugs.
+	 * The above protocol needs the memory barriers to ensure that the
+	 * apparent order of execution is as it desires. Otherwise, for example,
+	 * the CPU might rearrange the code so that st_changecount is incremented
+	 * twice before the modification on a machine with weak memory ordering.
+	 * This surprising result can lead to bugs.
 	 */
 	int			st_changecount;
 
@@ -793,26 +793,26 @@ typedef struct PgBackendStatus
 #define pgstat_increment_changecount_before(beentry)	\
 	do {	\
 		beentry->st_changecount++;	\
-		pg_write_barrier();	\
+		pg_write_barrier(); \
 	} while (0)
 
-#define pgstat_increment_changecount_after(beentry)	\
+#define pgstat_increment_changecount_after(beentry) \
 	do {	\
-		pg_write_barrier();	\
+		pg_write_barrier(); \
 		beentry->st_changecount++;	\
-		Assert((beentry->st_changecount & 1) == 0);	\
+		Assert((beentry->st_changecount & 1) == 0); \
 	} while (0)
 
 #define pgstat_save_changecount_before(beentry, save_changecount)	\
 	do {	\
-		save_changecount = beentry->st_changecount;	\
+		save_changecount = beentry->st_changecount; \
 		pg_read_barrier();	\
 	} while (0)
 
 #define pgstat_save_changecount_after(beentry, save_changecount)	\
 	do {	\
 		pg_read_barrier();	\
-		save_changecount = beentry->st_changecount;	\
+		save_changecount = beentry->st_changecount; \
 	} while (0)
 
 /* ----------
diff --git a/src/include/port/atomics.h b/src/include/port/atomics.h
index e90d664d5a0..1a4c748cb97 100644
--- a/src/include/port/atomics.h
+++ b/src/include/port/atomics.h
@@ -60,15 +60,15 @@
  */
 #if defined(__arm__) || defined(__arm) || \
 	defined(__aarch64__) || defined(__aarch64)
-#	include "port/atomics/arch-arm.h"
+#include "port/atomics/arch-arm.h"
 #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
-#	include "port/atomics/arch-x86.h"
+#include "port/atomics/arch-x86.h"
 #elif defined(__ia64__) || defined(__ia64)
-#	include "port/atomics/arch-ia64.h"
+#include "port/atomics/arch-ia64.h"
 #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
-#	include "port/atomics/arch-ppc.h"
+#include "port/atomics/arch-ppc.h"
 #elif defined(__hppa) || defined(__hppa__)
-#	include "port/atomics/arch-hppa.h"
+#include "port/atomics/arch-hppa.h"
 #endif
 
 /*
@@ -83,15 +83,15 @@
  */
 /* gcc or compatible, including clang and icc */
 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
-#	include "port/atomics/generic-gcc.h"
+#include "port/atomics/generic-gcc.h"
 #elif defined(WIN32_ONLY_COMPILER)
-#	include "port/atomics/generic-msvc.h"
+#include "port/atomics/generic-msvc.h"
 #elif defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
-#	include "port/atomics/generic-acc.h"
+#include "port/atomics/generic-acc.h"
 #elif defined(__SUNPRO_C) && !defined(__GNUC__)
-#	include "port/atomics/generic-sunpro.h"
+#include "port/atomics/generic-sunpro.h"
 #elif (defined(__IBMC__) || defined(__IBMCPP__)) && !defined(__GNUC__)
-#	include "port/atomics/generic-xlc.h"
+#include "port/atomics/generic-xlc.h"
 #else
 /*
  * Unsupported compiler, we'll likely use slower fallbacks... At least
@@ -128,7 +128,7 @@ STATIC_IF_INLINE_DECLARE uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *pt
 STATIC_IF_INLINE_DECLARE void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val);
 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval);
 STATIC_IF_INLINE_DECLARE bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr,
-															 uint32 *expected, uint32 newval);
+							   uint32 *expected, uint32 newval);
 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_);
 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_);
 STATIC_IF_INLINE_DECLARE uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_);
@@ -143,7 +143,7 @@ STATIC_IF_INLINE_DECLARE uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *pt
 STATIC_IF_INLINE_DECLARE void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val);
 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval);
 STATIC_IF_INLINE_DECLARE bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
-															 uint64 *expected, uint64 newval);
+							   uint64 *expected, uint64 newval);
 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_);
 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_);
 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_);
@@ -151,7 +151,7 @@ STATIC_IF_INLINE_DECLARE uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64
 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_);
 STATIC_IF_INLINE_DECLARE uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_);
 
-#endif /* PG_HAVE_64_BIT_ATOMICS */
+#endif   /* PG_HAVE_64_BIT_ATOMICS */
 
 
 /*
@@ -175,14 +175,14 @@ STATIC_IF_INLINE_DECLARE uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint6
  * architectures) this requires issuing some sort of memory fencing
  * instruction.
  */
-#define pg_memory_barrier()	pg_memory_barrier_impl()
+#define pg_memory_barrier() pg_memory_barrier_impl()
 
 /*
  * pg_(read|write)_barrier - prevent the CPU from reordering memory access
  *
  * A read barrier must act as a compiler barrier, and in addition must
  * guarantee that any loads issued prior to the barrier are completed before
- * any loads issued after the barrier.	Similarly, a write barrier acts
+ * any loads issued after the barrier.  Similarly, a write barrier acts
  * as a compiler barrier, and also orders stores.  Read and write barriers
  * are thus weaker than a full memory barrier, but stronger than a compiler
  * barrier.  In practice, on machines with strong memory ordering, read and
@@ -194,7 +194,7 @@ STATIC_IF_INLINE_DECLARE uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint6
 /*
  * Spinloop delay - Allow CPU to relax in busy loops
  */
-#define pg_spin_delay()	pg_spin_delay_impl()
+#define pg_spin_delay() pg_spin_delay_impl()
 
 /*
  * The following functions are wrapper functions around the platform specific
@@ -522,10 +522,11 @@ pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
 	return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
 }
 
-#endif /* PG_HAVE_64_BIT_ATOMICS */
+#endif   /* PG_HAVE_64_BIT_ATOMICS */
 
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif   /* defined(PG_USE_INLINE) ||
+								 * defined(ATOMICS_INCLUDE_DEFINITIONS) */
 
 #undef INSIDE_ATOMICS_H
 
-#endif /* ATOMICS_H */
+#endif   /* ATOMICS_H */
diff --git a/src/include/port/atomics/arch-ia64.h b/src/include/port/atomics/arch-ia64.h
index 2591a0f1637..3fd3918114b 100644
--- a/src/include/port/atomics/arch-ia64.h
+++ b/src/include/port/atomics/arch-ia64.h
@@ -18,9 +18,9 @@
  * fence.
  */
 #if defined(__INTEL_COMPILER)
-#	define pg_memory_barrier_impl()		__mf()
+#define pg_memory_barrier_impl()	 __mf()
 #elif defined(__GNUC__)
-#	define pg_memory_barrier_impl()		__asm__ __volatile__ ("mf" : : : "memory")
+#define pg_memory_barrier_impl()	 __asm__ __volatile__ ("mf" : : : "memory")
 #elif defined(__hpux)
-#	define pg_memory_barrier_impl()		_Asm_mf()
+#define pg_memory_barrier_impl()	 _Asm_mf()
 #endif
diff --git a/src/include/port/atomics/arch-x86.h b/src/include/port/atomics/arch-x86.h
index 168a49c7934..d7f45f325e2 100644
--- a/src/include/port/atomics/arch-x86.h
+++ b/src/include/port/atomics/arch-x86.h
@@ -78,9 +78,10 @@ typedef struct pg_atomic_uint64
 } pg_atomic_uint64;
 #endif
 
-#endif /* defined(HAVE_ATOMICS) */
+#endif   /* defined(HAVE_ATOMICS) */
 
-#endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
+#endif   /* defined(__GNUC__) &&
+								 * !defined(__INTEL_COMPILER) */
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
 
@@ -93,20 +94,20 @@ typedef struct pg_atomic_uint64
  * PAUSE in the inner loop of a spin lock is necessary for good
  * performance:
  *
- *     The PAUSE instruction improves the performance of IA-32
- *     processors supporting Hyper-Threading Technology when
- *     executing spin-wait loops and other routines where one
- *     thread is accessing a shared lock or semaphore in a tight
- *     polling loop. When executing a spin-wait loop, the
- *     processor can suffer a severe performance penalty when
- *     exiting the loop because it detects a possible memory order
- *     violation and flushes the core processor's pipeline. The
- *     PAUSE instruction provides a hint to the processor that the
- *     code sequence is a spin-wait loop. The processor uses this
- *     hint to avoid the memory order violation and prevent the
- *     pipeline flush. In addition, the PAUSE instruction
- *     de-pipelines the spin-wait loop to prevent it from
- *     consuming execution resources excessively.
+ *	   The PAUSE instruction improves the performance of IA-32
+ *	   processors supporting Hyper-Threading Technology when
+ *	   executing spin-wait loops and other routines where one
+ *	   thread is accessing a shared lock or semaphore in a tight
+ *	   polling loop. When executing a spin-wait loop, the
+ *	   processor can suffer a severe performance penalty when
+ *	   exiting the loop because it detects a possible memory order
+ *	   violation and flushes the core processor's pipeline. The
+ *	   PAUSE instruction provides a hint to the processor that the
+ *	   code sequence is a spin-wait loop. The processor uses this
+ *	   hint to avoid the memory order violation and prevent the
+ *	   pipeline flush. In addition, the PAUSE instruction
+ *	   de-pipelines the spin-wait loop to prevent it from
+ *	   consuming execution resources excessively.
  */
 #if defined(__INTEL_COMPILER)
 #define PG_HAVE_SPIN_DELAY
@@ -120,8 +121,8 @@ pg_spin_delay_impl(void)
 static __inline__ void
 pg_spin_delay_impl(void)
 {
-	__asm__ __volatile__(
-		" rep; nop			\n");
+	__asm__		__volatile__(
+										 " rep; nop			\n");
 }
 #elif defined(WIN32_ONLY_COMPILER) && defined(__x86_64__)
 #define PG_HAVE_SPIN_DELAY
@@ -136,10 +137,10 @@ static __forceinline void
 pg_spin_delay_impl(void)
 {
 	/* See comment for gcc code. Same code, MASM syntax */
-	__asm rep nop;
+	__asm rep	nop;
 }
 #endif
-#endif /* !defined(PG_HAVE_SPIN_DELAY) */
+#endif   /* !defined(PG_HAVE_SPIN_DELAY) */
 
 
 #if defined(HAVE_ATOMICS)
@@ -153,12 +154,13 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
 {
 	register char _res = 1;
 
-	__asm__ __volatile__(
-		"	lock			\n"
-		"	xchgb	%0,%1	\n"
-:		"+q"(_res), "+m"(ptr->value)
-:
-:		"memory");
+	__asm__		__volatile__(
+										 "	lock			\n"
+										 "	xchgb	%0,%1	\n"
+							 :			 "+q"(_res), "+m"(ptr->value)
+							 :
+							 :			 "memory");
+
 	return _res == 0;
 }
 
@@ -170,7 +172,8 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
 	 * On a TSO architecture like x86 it's sufficient to use a compiler
 	 * barrier to achieve release semantics.
 	 */
-	__asm__ __volatile__("" ::: "memory");
+	__asm__		__volatile__("":::"memory");
+
 	ptr->value = 0;
 }
 
@@ -179,19 +182,20 @@ static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 									uint32 *expected, uint32 newval)
 {
-	char	ret;
+	char		ret;
 
 	/*
 	 * Perform cmpxchg and use the zero flag which it implicitly sets when
 	 * equal to measure the success.
 	 */
-	__asm__ __volatile__(
-		"	lock				\n"
-		"	cmpxchgl	%4,%5	\n"
-		"   setz		%2		\n"
-:		"=a" (*expected), "=m"(ptr->value), "=q" (ret)
-:		"a" (*expected), "r" (newval), "m"(ptr->value)
-:		"memory", "cc");
+	__asm__		__volatile__(
+								   "	lock				\n"
+										 "	cmpxchgl	%4,%5	\n"
+								   "   setz		%2		\n"
+					 :			 "=a"(*expected), "=m"(ptr->value), "=q"(ret)
+					 :			 "a"(*expected), "r"(newval), "m"(ptr->value)
+							 :			 "memory", "cc");
+
 	return (bool) ret;
 }
 
@@ -199,13 +203,14 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 static inline uint32
 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
 {
-	uint32 res;
-	__asm__ __volatile__(
-		"	lock				\n"
-		"	xaddl	%0,%1		\n"
-:		"=q"(res), "=m"(ptr->value)
-:		"0" (add_), "m"(ptr->value)
-:		"memory", "cc");
+	uint32		res;
+	__asm__		__volatile__(
+								   "	lock				\n"
+										 "	xaddl	%0,%1		\n"
+							 :			 "=q"(res), "=m"(ptr->value)
+							 :			 "0"(add_), "m"(ptr->value)
+							 :			 "memory", "cc");
+
 	return res;
 }
 
@@ -216,19 +221,20 @@ static inline bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 									uint64 *expected, uint64 newval)
 {
-	char	ret;
+	char		ret;
 
 	/*
 	 * Perform cmpxchg and use the zero flag which it implicitly sets when
 	 * equal to measure the success.
 	 */
-	__asm__ __volatile__(
-		"	lock				\n"
-		"	cmpxchgq	%4,%5	\n"
-		"   setz		%2		\n"
-:		"=a" (*expected), "=m"(ptr->value), "=q" (ret)
-:		"a" (*expected), "r" (newval), "m"(ptr->value)
-:		"memory", "cc");
+	__asm__		__volatile__(
+								   "	lock				\n"
+										 "	cmpxchgq	%4,%5	\n"
+								   "   setz		%2		\n"
+					 :			 "=a"(*expected), "=m"(ptr->value), "=q"(ret)
+					 :			 "a"(*expected), "r"(newval), "m"(ptr->value)
+							 :			 "memory", "cc");
+
 	return (bool) ret;
 }
 
@@ -236,20 +242,23 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 static inline uint64
 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
 {
-	uint64 res;
-	__asm__ __volatile__(
-		"	lock				\n"
-		"	xaddq	%0,%1		\n"
-:		"=q"(res), "=m"(ptr->value)
-:		"0" (add_), "m"(ptr->value)
-:		"memory", "cc");
+	uint64		res;
+	__asm__		__volatile__(
+								   "	lock				\n"
+										 "	xaddq	%0,%1		\n"
+							 :			 "=q"(res), "=m"(ptr->value)
+							 :			 "0"(add_), "m"(ptr->value)
+							 :			 "memory", "cc");
+
 	return res;
 }
 
-#endif /* __x86_64__ */
+#endif   /* __x86_64__ */
 
-#endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
+#endif   /* defined(__GNUC__) &&
+								 * !defined(__INTEL_COMPILER) */
 
-#endif /* HAVE_ATOMICS */
+#endif   /* HAVE_ATOMICS */
 
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif   /* defined(PG_USE_INLINE) ||
+								 * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/fallback.h b/src/include/port/atomics/fallback.h
index 4e04f9758b9..01af089f7b5 100644
--- a/src/include/port/atomics/fallback.h
+++ b/src/include/port/atomics/fallback.h
@@ -1,8 +1,8 @@
 /*-------------------------------------------------------------------------
  *
  * fallback.h
- *    Fallback for platforms without spinlock and/or atomics support. Slower
- *    than native atomics support, but not unusably slow.
+ *	  Fallback for platforms without spinlock and/or atomics support. Slower
+ *	  than native atomics support, but not unusably slow.
  *
  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
@@ -14,7 +14,7 @@
 
 /* intentionally no include guards, should only be included by atomics.h */
 #ifndef INSIDE_ATOMICS_H
-#	error "should be included via atomics.h"
+#error "should be included via atomics.h"
 #endif
 
 #ifndef pg_memory_barrier_impl
@@ -75,14 +75,15 @@ typedef struct pg_atomic_flag
 	 * be content with just one byte instead of 4, but that's not too much
 	 * waste.
 	 */
-#if defined(__hppa) || defined(__hppa__)	/* HP PA-RISC, GCC and HP compilers */
+#if defined(__hppa) || defined(__hppa__)		/* HP PA-RISC, GCC and HP
+												 * compilers */
 	int			sema[4];
 #else
 	int			sema;
 #endif
 } pg_atomic_flag;
 
-#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
+#endif   /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
 
 #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
 
@@ -92,7 +93,8 @@ typedef struct pg_atomic_flag
 typedef struct pg_atomic_uint32
 {
 	/* Check pg_atomic_flag's definition above for an explanation */
-#if defined(__hppa) || defined(__hppa__)	/* HP PA-RISC, GCC and HP compilers */
+#if defined(__hppa) || defined(__hppa__)		/* HP PA-RISC, GCC and HP
+												 * compilers */
 	int			sema[4];
 #else
 	int			sema;
@@ -100,7 +102,7 @@ typedef struct pg_atomic_uint32
 	volatile uint32 value;
 } pg_atomic_uint32;
 
-#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
+#endif   /* PG_HAVE_ATOMIC_U32_SUPPORT */
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
 
@@ -128,7 +130,7 @@ pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
 	return true;
 }
 
-#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
+#endif   /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
 
 #ifdef PG_HAVE_ATOMIC_U32_SIMULATION
 
@@ -137,12 +139,13 @@ extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
 
 #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
 extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
-												uint32 *expected, uint32 newval);
+									uint32 *expected, uint32 newval);
 
 #define PG_HAVE_ATOMIC_FETCH_ADD_U32
 extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_);
 
-#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
+#endif   /* PG_HAVE_ATOMIC_U32_SIMULATION */
 
 
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif   /* defined(PG_USE_INLINE) ||
+								 * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/generic-acc.h b/src/include/port/atomics/generic-acc.h
index c5639aadda0..e16cc6f7dc2 100644
--- a/src/include/port/atomics/generic-acc.h
+++ b/src/include/port/atomics/generic-acc.h
@@ -10,9 +10,9 @@
  *
  * Documentation:
  * * inline assembly for Itanium-based HP-UX:
- *   http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
+ *	 http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
  * * Implementing Spinlocks on the Intel (R) Itanium (R) Architecture and PA-RISC
- *   http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
+ *	 http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
  *
  * Itanium only supports a small set of numbers (6, -8, -4, -1, 1, 4, 8, 16)
  * for atomic add/sub, so we just implement everything but compare_exchange
@@ -49,7 +49,7 @@ typedef struct pg_atomic_uint64
 	volatile uint64 value;
 } pg_atomic_uint64;
 
-#endif /* defined(HAVE_ATOMICS) */
+#endif   /* defined(HAVE_ATOMICS) */
 
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -64,23 +64,25 @@ STATIC_IF_INLINE bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 									uint32 *expected, uint32 newval)
 {
-	bool	ret;
-	uint32	current;
+	bool		ret;
+	uint32		current;
 
 	_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
+
 	/*
 	 * We want a barrier, not just release/acquire semantics.
 	 */
 	_Asm_mf();
+
 	/*
-	 * Notes:
-	 * DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the compiler
+	 * Notes: DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the
+	 * compiler
 	 */
-	current =  _Asm_cmpxchg(_SZ_W, /* word */
-							_SEM_REL,
-							&ptr->value,
-							newval, _LDHINT_NONE,
-							_DOWN_MEM_FENCE | _UP_MEM_FENCE);
+	current = _Asm_cmpxchg(_SZ_W,		/* word */
+						   _SEM_REL,
+						   &ptr->value,
+						   newval, _LDHINT_NONE,
+						   _DOWN_MEM_FENCE | _UP_MEM_FENCE);
 	ret = current == *expected;
 	*expected = current;
 	return ret;
@@ -92,16 +94,16 @@ STATIC_IF_INLINE bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 									uint64 *expected, uint64 newval)
 {
-	bool	ret;
-	uint64	current;
+	bool		ret;
+	uint64		current;
 
 	_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
 	_Asm_mf();
-	current =  _Asm_cmpxchg(_SZ_D, /* doubleword */
-							_SEM_REL,
-							&ptr->value,
-							newval, _LDHINT_NONE,
-							_DOWN_MEM_FENCE | _UP_MEM_FENCE);
+	current = _Asm_cmpxchg(_SZ_D,		/* doubleword */
+						   _SEM_REL,
+						   &ptr->value,
+						   newval, _LDHINT_NONE,
+						   _DOWN_MEM_FENCE | _UP_MEM_FENCE);
 	ret = current == *expected;
 	*expected = current;
 	return ret;
@@ -109,6 +111,7 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 
 #undef MINOR_FENCE
 
-#endif /* defined(HAVE_ATOMICS) */
+#endif   /* defined(HAVE_ATOMICS) */
 
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif   /* defined(PG_USE_INLINE) ||
+								 * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/generic-gcc.h b/src/include/port/atomics/generic-gcc.h
index 591c9fe1eb3..301ab510bf3 100644
--- a/src/include/port/atomics/generic-gcc.h
+++ b/src/include/port/atomics/generic-gcc.h
@@ -10,9 +10,9 @@
  *
  * Documentation:
  * * Legacy __sync Built-in Functions for Atomic Memory Access
- *   http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
+ *	 http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
  * * Built-in functions for memory model aware atomic operations
- *   http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
+ *	 http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
  *
  * src/include/port/atomics/generic-gcc.h
  *
@@ -40,21 +40,21 @@
  * definitions where possible, and use this only as a fallback.
  */
 #if !defined(pg_memory_barrier_impl)
-#	if defined(HAVE_GCC__ATOMIC_INT32_CAS)
-#		define pg_memory_barrier_impl()		__atomic_thread_fence(__ATOMIC_SEQ_CST)
-#	elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
-#		define pg_memory_barrier_impl()		__sync_synchronize()
-#	endif
-#endif /* !defined(pg_memory_barrier_impl) */
+#if defined(HAVE_GCC__ATOMIC_INT32_CAS)
+#define pg_memory_barrier_impl()	 __atomic_thread_fence(__ATOMIC_SEQ_CST)
+#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
+#define pg_memory_barrier_impl()	 __sync_synchronize()
+#endif
+#endif   /* !defined(pg_memory_barrier_impl) */
 
 #if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
 /* acquire semantics include read barrier semantics */
-#		define pg_read_barrier_impl()		__atomic_thread_fence(__ATOMIC_ACQUIRE)
+#define pg_read_barrier_impl()		 __atomic_thread_fence(__ATOMIC_ACQUIRE)
 #endif
 
 #if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
 /* release semantics include write barrier semantics */
-#		define pg_write_barrier_impl()		__atomic_thread_fence(__ATOMIC_RELEASE)
+#define pg_write_barrier_impl()		 __atomic_thread_fence(__ATOMIC_RELEASE)
 #endif
 
 #ifdef HAVE_ATOMICS
@@ -75,7 +75,7 @@ typedef struct pg_atomic_flag
 #endif
 } pg_atomic_flag;
 
-#endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
+#endif   /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
 
 /* generic gcc based atomic uint32 implementation */
 #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
@@ -87,7 +87,8 @@ typedef struct pg_atomic_uint32
 	volatile uint32 value;
 } pg_atomic_uint32;
 
-#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
+#endif   /* defined(HAVE_GCC__ATOMIC_INT32_CAS) ||
+								 * defined(HAVE_GCC__SYNC_INT32_CAS) */
 
 /* generic gcc based atomic uint64 implementation */
 #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
@@ -101,7 +102,8 @@ typedef struct pg_atomic_uint64
 	volatile uint64 value pg_attribute_aligned(8);
 } pg_atomic_uint64;
 
-#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
+#endif   /* defined(HAVE_GCC__ATOMIC_INT64_CAS) ||
+								 * defined(HAVE_GCC__SYNC_INT64_CAS) */
 
 /*
  * Implementation follows. Inlined or directly included from atomics.c
@@ -123,7 +125,7 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
 }
 #endif
 
-#endif /* defined(HAVE_GCC__SYNC_*_TAS) */
+#endif   /* defined(HAVE_GCC__SYNC_*_TAS) */
 
 #ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
 #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
@@ -152,7 +154,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
 }
 #endif
 
-#endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
+#endif   /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
 
 /* prefer __atomic, it has a better API */
 #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
@@ -173,8 +175,9 @@ static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 									uint32 *expected, uint32 newval)
 {
-	bool	ret;
-	uint32	current;
+	bool		ret;
+	uint32		current;
+
 	current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
 	ret = current == *expected;
 	*expected = current;
@@ -211,8 +214,9 @@ static inline bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 									uint64 *expected, uint64 newval)
 {
-	bool	ret;
-	uint64	current;
+	bool		ret;
+	uint64		current;
+
 	current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
 	ret = current == *expected;
 	*expected = current;
@@ -229,8 +233,9 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
 }
 #endif
 
-#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
+#endif   /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
 
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif   /* defined(PG_USE_INLINE) ||
+								 * defined(ATOMICS_INCLUDE_DEFINITIONS) */
 
-#endif /* defined(HAVE_ATOMICS) */
+#endif   /* defined(HAVE_ATOMICS) */
diff --git a/src/include/port/atomics/generic-msvc.h b/src/include/port/atomics/generic-msvc.h
index d259d6f51d0..3c177fea7e5 100644
--- a/src/include/port/atomics/generic-msvc.h
+++ b/src/include/port/atomics/generic-msvc.h
@@ -10,7 +10,7 @@
  *
  * Documentation:
  * * Interlocked Variable Access
- *   http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
+ *	 http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
  *
  * src/include/port/atomics/generic-msvc.h
  *
@@ -41,12 +41,14 @@ typedef struct pg_atomic_uint32
 } pg_atomic_uint32;
 
 #define PG_HAVE_ATOMIC_U64_SUPPORT
-typedef struct __declspec(align(8)) pg_atomic_uint64
+typedef struct __declspec (
+						   align(8))
+pg_atomic_uint64
 {
 	volatile uint64 value;
 } pg_atomic_uint64;
 
-#endif /* defined(HAVE_ATOMICS) */
+#endif   /* defined(HAVE_ATOMICS) */
 
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -58,8 +60,9 @@ static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 									uint32 *expected, uint32 newval)
 {
-	bool	ret;
-	uint32	current;
+	bool		ret;
+	uint32		current;
+
 	current = InterlockedCompareExchange(&ptr->value, newval, *expected);
 	ret = current == *expected;
 	*expected = current;
@@ -86,8 +89,9 @@ static inline bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 									uint64 *expected, uint64 newval)
 {
-	bool	ret;
-	uint64	current;
+	bool		ret;
+	uint64		current;
+
 	current = _InterlockedCompareExchange64(&ptr->value, newval, *expected);
 	ret = current == *expected;
 	*expected = current;
@@ -104,8 +108,9 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
 {
 	return _InterlockedExchangeAdd64(&ptr->value, add_);
 }
-#endif /* _WIN64 */
+#endif   /* _WIN64 */
 
-#endif /* HAVE_ATOMICS */
+#endif   /* HAVE_ATOMICS */
 
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif   /* defined(PG_USE_INLINE) ||
+								 * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/generic-sunpro.h b/src/include/port/atomics/generic-sunpro.h
index d369207fb34..e74cbad5028 100644
--- a/src/include/port/atomics/generic-sunpro.h
+++ b/src/include/port/atomics/generic-sunpro.h
@@ -9,8 +9,8 @@
  *
  * Documentation:
  * * manpage for atomic_cas(3C)
- *   http://www.unix.com/man-page/opensolaris/3c/atomic_cas/
- *   http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html
+ *	 http://www.unix.com/man-page/opensolaris/3c/atomic_cas/
+ *	 http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html
  *
  * src/include/port/atomics/generic-sunpro.h
  *
@@ -30,16 +30,16 @@
  * membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad on x86/sparc
  * respectively.
  */
-#	define pg_memory_barrier_impl()		__machine_rw_barrier()
+#define pg_memory_barrier_impl()	 __machine_rw_barrier()
 #endif
 #ifndef pg_read_barrier_impl
-#	define pg_read_barrier_impl()		__machine_r_barrier()
+#define pg_read_barrier_impl()		 __machine_r_barrier()
 #endif
 #ifndef pg_write_barrier_impl
-#	define pg_write_barrier_impl()		__machine_w_barrier()
+#define pg_write_barrier_impl()		 __machine_w_barrier()
 #endif
 
-#endif /* HAVE_MBARRIER_H */
+#endif   /* HAVE_MBARRIER_H */
 
 /* Older versions of the compiler don't have atomic.h... */
 #ifdef HAVE_ATOMIC_H
@@ -64,9 +64,9 @@ typedef struct pg_atomic_uint64
 	volatile uint64 value pg_attribute_aligned(8);
 } pg_atomic_uint64;
 
-#endif /* HAVE_ATOMIC_H */
+#endif   /* HAVE_ATOMIC_H */
 
-#endif /* defined(HAVE_ATOMICS) */
+#endif   /* defined(HAVE_ATOMICS) */
 
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -80,8 +80,8 @@ static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 									uint32 *expected, uint32 newval)
 {
-	bool	ret;
-	uint32	current;
+	bool		ret;
+	uint32		current;
 
 	current = atomic_cas_32(&ptr->value, *expected, newval);
 	ret = current == *expected;
@@ -94,8 +94,8 @@ static inline bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 									uint64 *expected, uint64 newval)
 {
-	bool	ret;
-	uint64	current;
+	bool		ret;
+	uint64		current;
 
 	current = atomic_cas_64(&ptr->value, *expected, newval);
 	ret = current == *expected;
@@ -103,8 +103,9 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 	return ret;
 }
 
-#endif /* HAVE_ATOMIC_H */
+#endif   /* HAVE_ATOMIC_H */
 
-#endif /* defined(HAVE_ATOMICS) */
+#endif   /* defined(HAVE_ATOMICS) */
 
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif   /* defined(PG_USE_INLINE) ||
+								 * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/generic-xlc.h b/src/include/port/atomics/generic-xlc.h
index 1c743f2bc80..01c19121eb6 100644
--- a/src/include/port/atomics/generic-xlc.h
+++ b/src/include/port/atomics/generic-xlc.h
@@ -9,7 +9,7 @@
  *
  * Documentation:
  * * Synchronization and atomic built-in functions
- *   http://publib.boulder.ibm.com/infocenter/lnxpcomp/v8v101/topic/com.ibm.xlcpp8l.doc/compiler/ref/bif_sync.htm
+ *	 http://publib.boulder.ibm.com/infocenter/lnxpcomp/v8v101/topic/com.ibm.xlcpp8l.doc/compiler/ref/bif_sync.htm
  *
  * src/include/port/atomics/generic-xlc.h
  *
@@ -35,9 +35,9 @@ typedef struct pg_atomic_uint64
 	volatile uint64 value pg_attribute_aligned(8);
 } pg_atomic_uint64;
 
-#endif /* __64BIT__ */
+#endif   /* __64BIT__ */
 
-#endif /* defined(HAVE_ATOMICS) */
+#endif   /* defined(HAVE_ATOMICS) */
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
 
@@ -48,13 +48,13 @@ static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 									uint32 *expected, uint32 newval)
 {
-	bool	ret;
-	uint64	current;
+	bool		ret;
+	uint64		current;
 
 	/*
-	 * xlc's documentation tells us:
-	 * "If __compare_and_swap is used as a locking primitive, insert a call to
-	 * the __isync built-in function at the start of any critical sections."
+	 * xlc's documentation tells us: "If __compare_and_swap is used as a
+	 * locking primitive, insert a call to the __isync built-in function at
+	 * the start of any critical sections."
 	 */
 	__isync();
 
@@ -62,8 +62,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 	 * XXX: __compare_and_swap is defined to take signed parameters, but that
 	 * shouldn't matter since we don't perform any arithmetic operations.
 	 */
-	current = (uint32)__compare_and_swap((volatile int*)ptr->value,
-										 (int)*expected, (int)newval);
+	current = (uint32) __compare_and_swap((volatile int *) ptr->value,
+										  (int) *expected, (int) newval);
 	ret = current == *expected;
 	*expected = current;
 	return ret;
@@ -83,13 +83,13 @@ static inline bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 									uint64 *expected, uint64 newval)
 {
-	bool	ret;
-	uint64	current;
+	bool		ret;
+	uint64		current;
 
 	__isync();
 
-	current = (uint64)__compare_and_swaplp((volatile long*)ptr->value,
-										   (long)*expected, (long)newval);
+	current = (uint64) __compare_and_swaplp((volatile long *) ptr->value,
+											(long) *expected, (long) newval);
 	ret = current == *expected;
 	*expected = current;
 	return ret;
@@ -102,8 +102,9 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
 	return __fetch_and_addlp(&ptr->value, add_);
 }
 
-#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
+#endif   /* PG_HAVE_ATOMIC_U64_SUPPORT */
 
-#endif /* defined(HAVE_ATOMICS) */
+#endif   /* defined(HAVE_ATOMICS) */
 
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif   /* defined(PG_USE_INLINE) ||
+								 * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/atomics/generic.h b/src/include/port/atomics/generic.h
index bb31df36237..9787f9ee871 100644
--- a/src/include/port/atomics/generic.h
+++ b/src/include/port/atomics/generic.h
@@ -14,7 +14,7 @@
 
 /* intentionally no include guards, should only be included by atomics.h */
 #ifndef INSIDE_ATOMICS_H
-#	error "should be included via atomics.h"
+#error "should be included via atomics.h"
 #endif
 
 /*
@@ -22,10 +22,10 @@
  * barriers.
  */
 #if !defined(pg_read_barrier_impl)
-#	define pg_read_barrier_impl pg_memory_barrier_impl
+#define pg_read_barrier_impl pg_memory_barrier_impl
 #endif
 #if !defined(pg_write_barrier_impl)
-#	define pg_write_barrier_impl pg_memory_barrier_impl
+#define pg_write_barrier_impl pg_memory_barrier_impl
 #endif
 
 #ifndef PG_HAVE_SPIN_DELAY
@@ -113,7 +113,8 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
 static inline bool
 pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
 {
-	uint32 value = 0;
+	uint32		value = 0;
+
 	return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1);
 }
 
@@ -129,23 +130,23 @@ static inline void
 pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
 {
 	/*
-	 * Use a memory barrier + plain write if we have a native memory
-	 * barrier. But don't do so if memory barriers use spinlocks - that'd lead
-	 * to circularity if flags are used to implement spinlocks.
+	 * Use a memory barrier + plain write if we have a native memory barrier.
+	 * But don't do so if memory barriers use spinlocks - that'd lead to
+	 * circularity if flags are used to implement spinlocks.
 	 */
 #ifndef PG_HAVE_MEMORY_BARRIER_EMULATION
 	/* XXX: release semantics suffice? */
 	pg_memory_barrier_impl();
 	pg_atomic_write_u32_impl(ptr, 0);
 #else
-	uint32 value = 1;
+	uint32		value = 1;
 	pg_atomic_compare_exchange_u32_impl(ptr, &value, 0);
 #endif
 }
 
 #elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG)
-#	error "No pg_atomic_test_and_set provided"
-#endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
+#error "No pg_atomic_test_and_set provided"
+#endif   /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
 
 
 #ifndef PG_HAVE_ATOMIC_INIT_U32
@@ -162,7 +163,8 @@ pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
 static inline uint32
 pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
 {
-	uint32 old;
+	uint32		old;
+
 	while (true)
 	{
 		old = pg_atomic_read_u32_impl(ptr);
@@ -178,7 +180,8 @@ pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
 static inline uint32
 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
 {
-	uint32 old;
+	uint32		old;
+
 	while (true)
 	{
 		old = pg_atomic_read_u32_impl(ptr);
@@ -203,7 +206,8 @@ pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
 static inline uint32
 pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
 {
-	uint32 old;
+	uint32		old;
+
 	while (true)
 	{
 		old = pg_atomic_read_u32_impl(ptr);
@@ -219,7 +223,8 @@ pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
 static inline uint32
 pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
 {
-	uint32 old;
+	uint32		old;
+
 	while (true)
 	{
 		old = pg_atomic_read_u32_impl(ptr);
@@ -255,7 +260,8 @@ pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
 static inline uint64
 pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
 {
-	uint64 old;
+	uint64		old;
+
 	while (true)
 	{
 		old = ptr->value;
@@ -284,7 +290,7 @@ pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
 static inline uint64
 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
 {
-	uint64 old = 0;
+	uint64		old = 0;
 
 	/*
 	 * 64 bit reads aren't safe on all platforms. In the generic
@@ -312,7 +318,8 @@ pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
 static inline uint64
 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
 {
-	uint64 old;
+	uint64		old;
+
 	while (true)
 	{
 		old = pg_atomic_read_u64_impl(ptr);
@@ -337,7 +344,8 @@ pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
 static inline uint64
 pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
 {
-	uint64 old;
+	uint64		old;
+
 	while (true)
 	{
 		old = pg_atomic_read_u64_impl(ptr);
@@ -353,7 +361,8 @@ pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
 static inline uint64
 pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
 {
-	uint64 old;
+	uint64		old;
+
 	while (true)
 	{
 		old = pg_atomic_read_u64_impl(ptr);
@@ -382,6 +391,7 @@ pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
 }
 #endif
 
-#endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */
+#endif   /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */
 
-#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif   /* defined(PG_USE_INLINE) ||
+								 * defined(ATOMICS_INCLUDE_DEFINITIONS) */
diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h
index b14d194fb33..c925c569014 100644
--- a/src/include/port/pg_crc32c.h
+++ b/src/include/port/pg_crc32c.h
@@ -90,4 +90,4 @@ extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len)
 
 #endif
 
-#endif /* PG_CRC32C_H */
+#endif   /* PG_CRC32C_H */
diff --git a/src/include/postmaster/bgworker.h b/src/include/postmaster/bgworker.h
index de9180df91b..f0a95306545 100644
--- a/src/include/postmaster/bgworker.h
+++ b/src/include/postmaster/bgworker.h
@@ -113,7 +113,7 @@ extern BgwHandleStatus
 WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *
 							   handle, pid_t *pid);
 extern BgwHandleStatus
-WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *);
+			WaitForBackgroundWorkerShutdown(BackgroundWorkerHandle *);
 
 /* Terminate a bgworker */
 extern void TerminateBackgroundWorker(BackgroundWorkerHandle *handle);
diff --git a/src/include/replication/origin.h b/src/include/replication/origin.h
index b814aeb4fd3..5d294de60e4 100644
--- a/src/include/replication/origin.h
+++ b/src/include/replication/origin.h
@@ -1,6 +1,6 @@
 /*-------------------------------------------------------------------------
  * origin.h
- *     Exports from replication/logical/origin.c
+ *	   Exports from replication/logical/origin.c
  *
  * Copyright (c) 2013-2015, PostgreSQL Global Development Group
  *
@@ -17,13 +17,13 @@
 typedef struct xl_replorigin_set
 {
 	XLogRecPtr	remote_lsn;
-	RepOriginId	node_id;
+	RepOriginId node_id;
 	bool		force;
 } xl_replorigin_set;
 
 typedef struct xl_replorigin_drop
 {
-	RepOriginId	node_id;
+	RepOriginId node_id;
 } xl_replorigin_drop;
 
 #define XLOG_REPLORIGIN_SET		0x00
@@ -41,17 +41,17 @@ extern RepOriginId replorigin_by_name(char *name, bool missing_ok);
 extern RepOriginId replorigin_create(char *name);
 extern void replorigin_drop(RepOriginId roident);
 extern bool replorigin_by_oid(RepOriginId roident, bool missing_ok,
-								char **roname);
+				  char **roname);
 
 /* API for querying & manipulating replication progress tracking */
 extern void replorigin_advance(RepOriginId node,
-							   XLogRecPtr remote_commit,
-							   XLogRecPtr local_commit,
-							   bool go_backward, bool wal_log);
+				   XLogRecPtr remote_commit,
+				   XLogRecPtr local_commit,
+				   bool go_backward, bool wal_log);
 extern XLogRecPtr replorigin_get_progress(RepOriginId node, bool flush);
 
 extern void replorigin_session_advance(XLogRecPtr remote_commit,
-										XLogRecPtr local_commit);
+						   XLogRecPtr local_commit);
 extern void replorigin_session_setup(RepOriginId node);
 extern void replorigin_session_reset(void);
 extern XLogRecPtr replorigin_session_get_progress(bool flush);
@@ -61,9 +61,9 @@ extern void CheckPointReplicationOrigin(void);
 extern void StartupReplicationOrigin(void);
 
 /* WAL logging */
-void replorigin_redo(XLogReaderState *record);
-void replorigin_desc(StringInfo buf, XLogReaderState *record);
-const char * replorigin_identify(uint8 info);
+void		replorigin_redo(XLogReaderState *record);
+void		replorigin_desc(StringInfo buf, XLogReaderState *record);
+const char *replorigin_identify(uint8 info);
 
 /* shared memory allocation */
 extern Size ReplicationOriginShmemSize(void);
@@ -83,4 +83,4 @@ extern Datum pg_replication_origin_advance(PG_FUNCTION_ARGS);
 extern Datum pg_replication_origin_progress(PG_FUNCTION_ARGS);
 extern Datum pg_show_replication_origin_status(PG_FUNCTION_ARGS);
 
-#endif /* PG_ORIGIN_H */
+#endif   /* PG_ORIGIN_H */
diff --git a/src/include/replication/output_plugin.h b/src/include/replication/output_plugin.h
index bec1a56017c..17c3de235e6 100644
--- a/src/include/replication/output_plugin.h
+++ b/src/include/replication/output_plugin.h
@@ -78,7 +78,7 @@ typedef void (*LogicalDecodeCommitCB) (
  */
 typedef bool (*LogicalDecodeFilterByOriginCB) (
 											 struct LogicalDecodingContext *,
-												   RepOriginId origin_id);
+													  RepOriginId origin_id);
 
 /*
  * Called to shutdown an output plugin.
diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h
index 666c5f28419..110e78e7a8e 100644
--- a/src/include/replication/reorderbuffer.h
+++ b/src/include/replication/reorderbuffer.h
@@ -177,7 +177,7 @@ typedef struct ReorderBufferTXN
 
 	/* origin of the change that caused this transaction */
 	RepOriginId origin_id;
-	XLogRecPtr origin_lsn;
+	XLogRecPtr	origin_lsn;
 
 	/*
 	 * Commit time, only known when we read the actual commit record.
@@ -352,7 +352,7 @@ void		ReorderBufferReturnChange(ReorderBuffer *, ReorderBufferChange *);
 void		ReorderBufferQueueChange(ReorderBuffer *, TransactionId, XLogRecPtr lsn, ReorderBufferChange *);
 void ReorderBufferCommit(ReorderBuffer *, TransactionId,
 					XLogRecPtr commit_lsn, XLogRecPtr end_lsn,
-					TimestampTz commit_time, RepOriginId origin_id, XLogRecPtr origin_lsn);
+	  TimestampTz commit_time, RepOriginId origin_id, XLogRecPtr origin_lsn);
 void		ReorderBufferAssignChild(ReorderBuffer *, TransactionId, TransactionId, XLogRecPtr commit_lsn);
 void ReorderBufferCommitChild(ReorderBuffer *, TransactionId, TransactionId,
 						 XLogRecPtr commit_lsn, XLogRecPtr end_lsn);
diff --git a/src/include/replication/walsender.h b/src/include/replication/walsender.h
index b10e78488f6..cb3f6bd21fb 100644
--- a/src/include/replication/walsender.h
+++ b/src/include/replication/walsender.h
@@ -25,7 +25,7 @@ extern bool wake_wal_senders;
 /* user-settable parameters */
 extern int	max_wal_senders;
 extern int	wal_sender_timeout;
-extern bool	log_replication_commands;
+extern bool log_replication_commands;
 
 extern void InitWalSender(void);
 extern void exec_replication_command(const char *query_string);
diff --git a/src/include/rewrite/rowsecurity.h b/src/include/rewrite/rowsecurity.h
index eb4b20559f5..523c56e5982 100644
--- a/src/include/rewrite/rowsecurity.h
+++ b/src/include/rewrite/rowsecurity.h
@@ -2,8 +2,8 @@
  *
  * rowsecurity.h
  *
- *    prototypes for rewrite/rowsecurity.c and the structures for managing
- *    the row security policies for relations in relcache.
+ *	  prototypes for rewrite/rowsecurity.c and the structures for managing
+ *	  the row security policies for relations in relcache.
  *
  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
@@ -19,23 +19,23 @@
 
 typedef struct RowSecurityPolicy
 {
-	Oid					policy_id;		/* OID of the policy */
-	char			   *policy_name;	/* Name of the policy */
-	char				polcmd;			/* Type of command policy is for */
-	ArrayType		   *roles;			/* Array of roles policy is for */
-	Expr			   *qual;			/* Expression to filter rows */
-	Expr			   *with_check_qual; /* Expression to limit rows allowed */
-	bool				hassublinks;	/* If either expression has sublinks */
+	Oid			policy_id;		/* OID of the policy */
+	char	   *policy_name;	/* Name of the policy */
+	char		polcmd;			/* Type of command policy is for */
+	ArrayType  *roles;			/* Array of roles policy is for */
+	Expr	   *qual;			/* Expression to filter rows */
+	Expr	   *with_check_qual;	/* Expression to limit rows allowed */
+	bool		hassublinks;	/* If either expression has sublinks */
 } RowSecurityPolicy;
 
 typedef struct RowSecurityDesc
 {
-	MemoryContext		rscxt;		/* row security memory context */
-	List			   *policies;	/* list of row security policies */
+	MemoryContext rscxt;		/* row security memory context */
+	List	   *policies;		/* list of row security policies */
 } RowSecurityDesc;
 
-typedef List *(*row_security_policy_hook_type)(CmdType cmdtype,
-											   Relation relation);
+typedef List *(*row_security_policy_hook_type) (CmdType cmdtype,
+														  Relation relation);
 
 extern PGDLLIMPORT row_security_policy_hook_type row_security_policy_hook_permissive;
 
@@ -46,4 +46,4 @@ extern void get_row_security_policies(Query *root, CmdType commandType,
 						  List **securityQuals, List **withCheckOptions,
 						  bool *hasRowSecurity, bool *hasSubLinks);
 
-#endif	/* ROWSECURITY_H */
+#endif   /* ROWSECURITY_H */
diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h
index 7cc75fc1065..e7ccce22942 100644
--- a/src/include/storage/lmgr.h
+++ b/src/include/storage/lmgr.h
@@ -77,9 +77,9 @@ extern void WaitForLockers(LOCKTAG heaplocktag, LOCKMODE lockmode);
 extern void WaitForLockersMultiple(List *locktags, LOCKMODE lockmode);
 
 /* Lock an XID for tuple insertion (used to wait for an insertion to finish) */
-extern uint32	SpeculativeInsertionLockAcquire(TransactionId xid);
-extern void		SpeculativeInsertionLockRelease(TransactionId xid);
-extern void		SpeculativeInsertionWait(TransactionId xid, uint32 token);
+extern uint32 SpeculativeInsertionLockAcquire(TransactionId xid);
+extern void SpeculativeInsertionLockRelease(TransactionId xid);
+extern void SpeculativeInsertionWait(TransactionId xid, uint32 token);
 
 /* Lock a general object (other than a relation) of the current database */
 extern void LockDatabaseObject(Oid classid, Oid objid, uint16 objsubid,
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index b4eb1b4a9e3..96fe3a66ab0 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -176,7 +176,7 @@ typedef enum LockTagType
 	/* ID info for a transaction is its TransactionId */
 	LOCKTAG_VIRTUALTRANSACTION, /* virtual transaction (ditto) */
 	/* ID info for a virtual transaction is its VirtualTransactionId */
-	LOCKTAG_SPECULATIVE_TOKEN,		/* speculative insertion Xid and token */
+	LOCKTAG_SPECULATIVE_TOKEN,	/* speculative insertion Xid and token */
 	/* ID info for a transaction is its TransactionId */
 	LOCKTAG_OBJECT,				/* non-relation database object */
 	/* ID info for an object is DB OID + CLASS OID + OBJECT OID + SUBID */
diff --git a/src/include/storage/shm_mq.h b/src/include/storage/shm_mq.h
index 085a8a73e31..1a2ba040cb4 100644
--- a/src/include/storage/shm_mq.h
+++ b/src/include/storage/shm_mq.h
@@ -28,8 +28,8 @@ typedef struct shm_mq_handle shm_mq_handle;
 /* Descriptors for a single write spanning multiple locations. */
 typedef struct
 {
-	const char  *data;
-	Size	len;
+	const char *data;
+	Size		len;
 } shm_mq_iovec;
 
 /* Possible results of a send or receive operation. */
@@ -69,7 +69,7 @@ extern void shm_mq_detach(shm_mq *);
 extern shm_mq_result shm_mq_send(shm_mq_handle *mqh,
 			Size nbytes, const void *data, bool nowait);
 extern shm_mq_result shm_mq_sendv(shm_mq_handle *mqh,
-			shm_mq_iovec *iov, int iovcnt, bool nowait);
+			 shm_mq_iovec *iov, int iovcnt, bool nowait);
 extern shm_mq_result shm_mq_receive(shm_mq_handle *mqh,
 			   Size *nbytesp, void **datap, bool nowait);
 
diff --git a/src/include/tcop/deparse_utility.h b/src/include/tcop/deparse_utility.h
index b6bcbeb3174..d276eeb2287 100644
--- a/src/include/tcop/deparse_utility.h
+++ b/src/include/tcop/deparse_utility.h
@@ -37,8 +37,8 @@ typedef enum CollectedCommandType
  */
 typedef struct CollectedATSubcmd
 {
-	ObjectAddress	address; /* affected column, constraint, index, ... */
-	Node		   *parsetree;
+	ObjectAddress address;		/* affected column, constraint, index, ... */
+	Node	   *parsetree;
 } CollectedATSubcmd;
 
 typedef struct CollectedCommand
@@ -54,52 +54,52 @@ typedef struct CollectedCommand
 		{
 			ObjectAddress address;
 			ObjectAddress secondaryObject;
-		} simple;
+		}			simple;
 
 		/* ALTER TABLE, and internal uses thereof */
 		struct
 		{
-			Oid		objectId;
-			Oid		classId;
-			List   *subcmds;
-		} alterTable;
+			Oid			objectId;
+			Oid			classId;
+			List	   *subcmds;
+		}			alterTable;
 
 		/* GRANT / REVOKE */
 		struct
 		{
 			InternalGrant *istmt;
-		} grant;
+		}			grant;
 
 		/* ALTER OPERATOR FAMILY */
 		struct
 		{
 			ObjectAddress address;
-			List   *operators;
-			List   *procedures;
-		} opfam;
+			List	   *operators;
+			List	   *procedures;
+		}			opfam;
 
 		/* CREATE OPERATOR CLASS */
 		struct
 		{
 			ObjectAddress address;
-			List   *operators;
-			List   *procedures;
-		} createopc;
+			List	   *operators;
+			List	   *procedures;
+		}			createopc;
 
 		/* ALTER TEXT SEARCH CONFIGURATION ADD/ALTER/DROP MAPPING */
 		struct
 		{
 			ObjectAddress address;
-			Oid	   *dictIds;
-			int		ndicts;
-		} atscfg;
+			Oid		   *dictIds;
+			int			ndicts;
+		}			atscfg;
 
 		/* ALTER DEFAULT PRIVILEGES */
 		struct
 		{
 			GrantObjectType objtype;
-		} defprivs;
-	} d;
+		}			defprivs;
+	}			d;
 } CollectedCommand;
 
-#endif	/* DEPARSE_UTILITY_H */
+#endif   /* DEPARSE_UTILITY_H */
diff --git a/src/include/tcop/fastpath.h b/src/include/tcop/fastpath.h
index 47028cb113c..dc6905d48c7 100644
--- a/src/include/tcop/fastpath.h
+++ b/src/include/tcop/fastpath.h
@@ -15,7 +15,7 @@
 
 #include "lib/stringinfo.h"
 
-extern int GetOldFunctionMessage(StringInfo buf);
+extern int	GetOldFunctionMessage(StringInfo buf);
 extern int	HandleFunctionRequest(StringInfo msgBuf);
 
 #endif   /* FASTPATH_H */
diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h
index d747579e7a0..915ea39be23 100644
--- a/src/include/utils/acl.h
+++ b/src/include/utils/acl.h
@@ -230,7 +230,7 @@ extern bool is_admin_of_role(Oid member, Oid role);
 extern void check_is_member_of_role(Oid member, Oid role);
 extern Oid	get_role_oid(const char *rolename, bool missing_ok);
 extern Oid	get_role_oid_or_public(const char *rolename);
-extern Oid  get_rolespec_oid(const Node *node, bool missing_ok);
+extern Oid	get_rolespec_oid(const Node *node, bool missing_ok);
 extern HeapTuple get_rolespec_tuple(const Node *node);
 extern char *get_rolespec_name(const Node *node);
 
diff --git a/src/include/utils/aclchk_internal.h b/src/include/utils/aclchk_internal.h
index 0855bf1d0d2..83785675784 100644
--- a/src/include/utils/aclchk_internal.h
+++ b/src/include/utils/aclchk_internal.h
@@ -42,4 +42,4 @@ typedef struct
 } InternalGrant;
 
 
-#endif	/* ACLCHK_INTERNAL_H */
+#endif   /* ACLCHK_INTERNAL_H */
diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h
index 1140c17792b..51f25a28148 100644
--- a/src/include/utils/builtins.h
+++ b/src/include/utils/builtins.h
@@ -657,7 +657,7 @@ extern List *stringToQualifiedNameList(const char *string);
 extern char *format_procedure(Oid procedure_oid);
 extern char *format_procedure_qualified(Oid procedure_oid);
 extern void format_procedure_parts(Oid operator_oid, List **objnames,
-					  List **objargs);
+					   List **objargs);
 extern char *format_operator(Oid operator_oid);
 extern char *format_operator_qualified(Oid operator_oid);
 extern void format_operator_parts(Oid operator_oid, List **objnames,
@@ -804,9 +804,9 @@ extern Datum textoverlay_no_len(PG_FUNCTION_ARGS);
 extern Datum name_text(PG_FUNCTION_ARGS);
 extern Datum text_name(PG_FUNCTION_ARGS);
 extern int	varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid);
-extern int	varstr_levenshtein(const char *source, int slen, const char *target,
+extern int varstr_levenshtein(const char *source, int slen, const char *target,
 				   int tlen, int ins_c, int del_c, int sub_c);
-extern int	varstr_levenshtein_less_equal(const char *source, int slen,
+extern int varstr_levenshtein_less_equal(const char *source, int slen,
 							  const char *target, int tlen, int ins_c,
 							  int del_c, int sub_c, int max_d);
 extern List *textToQualifiedNameList(text *textval);
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index ff78b70b96d..a8191c94c39 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -202,7 +202,8 @@ typedef enum
 #define GUC_SUPERUSER_ONLY		0x0100	/* show only to superusers */
 #define GUC_IS_NAME				0x0200	/* limit string to NAMEDATALEN-1 */
 #define GUC_NOT_WHILE_SEC_REST	0x0400	/* can't set if security restricted */
-#define GUC_DISALLOW_IN_AUTO_FILE 0x0800 /* can't set in PG_AUTOCONF_FILENAME */
+#define GUC_DISALLOW_IN_AUTO_FILE 0x0800		/* can't set in
+												 * PG_AUTOCONF_FILENAME */
 
 #define GUC_UNIT_KB				0x1000	/* value is in kilobytes */
 #define GUC_UNIT_BLOCKS			0x2000	/* value is in blocks */
diff --git a/src/include/utils/guc_tables.h b/src/include/utils/guc_tables.h
index c0f9cb9374b..7a58ddb10b6 100644
--- a/src/include/utils/guc_tables.h
+++ b/src/include/utils/guc_tables.h
@@ -167,7 +167,7 @@ struct config_generic
  * Caution: the GUC_IS_IN_FILE bit is transient state for ProcessConfigFile.
  * Do not assume that its value represents useful information elsewhere.
  */
-#define GUC_PENDING_RESTART	0x0002
+#define GUC_PENDING_RESTART 0x0002
 
 
 /* GUC records for specific variable types */
diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h
index 1d8293b2235..296d20af838 100644
--- a/src/include/utils/jsonapi.h
+++ b/src/include/utils/jsonapi.h
@@ -122,6 +122,6 @@ extern JsonLexContext *makeJsonLexContextCstringLen(char *json,
  *
  * str agrument does not need to be nul-terminated.
  */
-extern bool IsValidJsonNumber(const char * str, int len);
+extern bool IsValidJsonNumber(const char *str, int len);
 
 #endif   /* JSONAPI_H */
diff --git a/src/include/utils/jsonb.h b/src/include/utils/jsonb.h
index b02934a1aef..4d614430ce1 100644
--- a/src/include/utils/jsonb.h
+++ b/src/include/utils/jsonb.h
@@ -244,7 +244,7 @@ struct JsonbValue
 	union
 	{
 		Numeric numeric;
-		bool		boolean;
+		bool boolean;
 		struct
 		{
 			int			len;
@@ -401,9 +401,9 @@ extern Datum jsonb_pretty(PG_FUNCTION_ARGS);
 extern Datum jsonb_concat(PG_FUNCTION_ARGS);
 
 /* deletion */
-Datum jsonb_delete(PG_FUNCTION_ARGS);
-Datum jsonb_delete_idx(PG_FUNCTION_ARGS);
-Datum jsonb_delete_path(PG_FUNCTION_ARGS);
+Datum		jsonb_delete(PG_FUNCTION_ARGS);
+Datum		jsonb_delete_idx(PG_FUNCTION_ARGS);
+Datum		jsonb_delete_path(PG_FUNCTION_ARGS);
 
 /* replacement */
 extern Datum jsonb_replace(PG_FUNCTION_ARGS);
@@ -431,7 +431,7 @@ extern void JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash);
 extern char *JsonbToCString(StringInfo out, JsonbContainer *in,
 			   int estimated_len);
 extern char *JsonbToCStringIndent(StringInfo out, JsonbContainer *in,
-			   int estimated_len);
+					 int estimated_len);
 
 
 #endif   /* __JSONB_H__ */
diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h
index e2e5734ea7b..a40c9b12732 100644
--- a/src/include/utils/lsyscache.h
+++ b/src/include/utils/lsyscache.h
@@ -102,8 +102,8 @@ extern Oid	get_rel_namespace(Oid relid);
 extern Oid	get_rel_type_id(Oid relid);
 extern char get_rel_relkind(Oid relid);
 extern Oid	get_rel_tablespace(Oid relid);
-extern Oid get_transform_fromsql(Oid typid, Oid langid, List *trftypes);
-extern Oid get_transform_tosql(Oid typid, Oid langid, List *trftypes);
+extern Oid	get_transform_fromsql(Oid typid, Oid langid, List *trftypes);
+extern Oid	get_transform_tosql(Oid typid, Oid langid, List *trftypes);
 extern bool get_typisdefined(Oid typid);
 extern int16 get_typlen(Oid typid);
 extern bool get_typbyval(Oid typid);
diff --git a/src/include/utils/palloc.h b/src/include/utils/palloc.h
index 9861f0dac73..e56f5014a3d 100644
--- a/src/include/utils/palloc.h
+++ b/src/include/utils/palloc.h
@@ -72,7 +72,7 @@ extern void *MemoryContextAlloc(MemoryContext context, Size size);
 extern void *MemoryContextAllocZero(MemoryContext context, Size size);
 extern void *MemoryContextAllocZeroAligned(MemoryContext context, Size size);
 extern void *MemoryContextAllocExtended(MemoryContext context,
-										Size size, int flags);
+						   Size size, int flags);
 
 extern void *palloc(Size size);
 extern void *palloc0(Size size);
diff --git a/src/include/utils/pg_crc.h b/src/include/utils/pg_crc.h
index b4efe157f1c..37bb0e933b2 100644
--- a/src/include/utils/pg_crc.h
+++ b/src/include/utils/pg_crc.h
@@ -93,7 +93,7 @@ do {															  \
 \
 	while (__len-- > 0) \
 	{ \
-		int		__tab_index = ((int) ((crc) >> 24) ^ *__data++) & 0xFF;	\
+		int		__tab_index = ((int) ((crc) >> 24) ^ *__data++) & 0xFF; \
 		(crc) = table[__tab_index] ^ ((crc) << 8); \
 	} \
 } while (0)
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index ef206c47ea5..90a018082f0 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -109,7 +109,7 @@ typedef struct CachedPlanSource
 	double		generic_cost;	/* cost of generic plan, or -1 if not known */
 	double		total_custom_cost;		/* total cost of custom plans so far */
 	int			num_custom_plans;		/* number of plans included in total */
-	bool		hasRowSecurity;			/* planned with row security? */
+	bool		hasRowSecurity; /* planned with row security? */
 	int			row_security_env;		/* row security setting when planned */
 	bool		rowSecurityDisabled;	/* is row security disabled? */
 } CachedPlanSource;
diff --git a/src/include/utils/rls.h b/src/include/utils/rls.h
index 867faa05ffd..3770ddc2163 100644
--- a/src/include/utils/rls.h
+++ b/src/include/utils/rls.h
@@ -14,15 +14,15 @@
 #define RLS_H
 
 /* GUC variable */
-extern int row_security;
+extern int	row_security;
 
 /* Possible values for row_security GUC */
 typedef enum RowSecurityConfigType
 {
-	ROW_SECURITY_OFF,		/* RLS never applied- error thrown if no priv */
-	ROW_SECURITY_ON,		/* normal case, RLS applied for regular users */
-	ROW_SECURITY_FORCE		/* RLS applied for superusers and table owners */
-} RowSecurityConfigType;
+	ROW_SECURITY_OFF,			/* RLS never applied- error thrown if no priv */
+	ROW_SECURITY_ON,			/* normal case, RLS applied for regular users */
+	ROW_SECURITY_FORCE			/* RLS applied for superusers and table owners */
+}	RowSecurityConfigType;
 
 /*
  * Used by callers of check_enable_rls.
@@ -48,11 +48,11 @@ typedef enum RowSecurityConfigType
  */
 enum CheckEnableRlsResult
 {
-		RLS_NONE,
-		RLS_NONE_ENV,
-		RLS_ENABLED
+	RLS_NONE,
+	RLS_NONE_ENV,
+	RLS_ENABLED
 };
 
-extern int check_enable_rls(Oid relid, Oid checkAsUser, bool noError);
+extern int	check_enable_rls(Oid relid, Oid checkAsUser, bool noError);
 
 #endif   /* RLS_H */
diff --git a/src/include/utils/ruleutils.h b/src/include/utils/ruleutils.h
index fed9c7b6ffa..3494b13b0fe 100644
--- a/src/include/utils/ruleutils.h
+++ b/src/include/utils/ruleutils.h
@@ -32,4 +32,4 @@ extern List *select_rtable_names_for_explain(List *rtable,
 								Bitmapset *rels_used);
 extern char *generate_collation_name(Oid collid);
 
-#endif	/* RULEUTILS_H */
+#endif   /* RULEUTILS_H */
diff --git a/src/include/utils/sampling.h b/src/include/utils/sampling.h
index 476bb002346..1653ed0aa43 100644
--- a/src/include/utils/sampling.h
+++ b/src/include/utils/sampling.h
@@ -20,7 +20,7 @@
 typedef unsigned short SamplerRandomState[3];
 
 extern void sampler_random_init_state(long seed,
-									  SamplerRandomState randstate);
+						  SamplerRandomState randstate);
 extern double sampler_random_fract(SamplerRandomState randstate);
 
 /* Block sampling methods */
@@ -32,7 +32,7 @@ typedef struct
 	int			n;				/* desired sample size */
 	BlockNumber t;				/* current block number */
 	int			m;				/* blocks selected so far */
-	SamplerRandomState randstate; /* random generator state */
+	SamplerRandomState randstate;		/* random generator state */
 } BlockSamplerData;
 
 typedef BlockSamplerData *BlockSampler;
@@ -46,8 +46,8 @@ extern BlockNumber BlockSampler_Next(BlockSampler bs);
 
 typedef struct
 {
-	double	W;
-	SamplerRandomState randstate; /* random generator state */
+	double		W;
+	SamplerRandomState randstate;		/* random generator state */
 } ReservoirStateData;
 
 typedef ReservoirStateData *ReservoirState;
@@ -62,4 +62,4 @@ extern double anl_random_fract(void);
 extern double anl_init_selection_state(int n);
 extern double anl_get_next_S(double t, int n, double *stateptr);
 
-#endif /* SAMPLING_H */
+#endif   /* SAMPLING_H */
diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h
index fdca7130bb0..b3d8017b8bd 100644
--- a/src/include/utils/selfuncs.h
+++ b/src/include/utils/selfuncs.h
@@ -185,7 +185,7 @@ extern void mergejoinscansel(PlannerInfo *root, Node *clause,
 				 Selectivity *rightstart, Selectivity *rightend);
 
 extern double estimate_num_groups(PlannerInfo *root, List *groupExprs,
-								  double input_rows, List **pgset);
+					double input_rows, List **pgset);
 
 extern Selectivity estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey,
 						 double nbuckets);
diff --git a/src/include/utils/snapshot.h b/src/include/utils/snapshot.h
index a734bf00752..cbf1bbdeb1f 100644
--- a/src/include/utils/snapshot.h
+++ b/src/include/utils/snapshot.h
@@ -118,7 +118,7 @@ typedef enum
 	HeapTupleSelfUpdated,
 	HeapTupleUpdated,
 	HeapTupleBeingUpdated,
-	HeapTupleWouldBlock	/* can be returned by heap_tuple_lock */
+	HeapTupleWouldBlock			/* can be returned by heap_tuple_lock */
 } HTSU_Result;
 
 #endif   /* SNAPSHOT_H */
diff --git a/src/include/utils/sortsupport.h b/src/include/utils/sortsupport.h
index 44c596f507d..787404ed903 100644
--- a/src/include/utils/sortsupport.h
+++ b/src/include/utils/sortsupport.h
@@ -100,18 +100,18 @@ typedef struct SortSupportData
 	 * INT_MIN, as callers are allowed to negate the result before using it.
 	 *
 	 * This may be either the authoritative comparator, or the abbreviated
-	 * comparator.  Core code may switch this over the initial preference of an
-	 * opclass support function despite originally indicating abbreviation was
-	 * applicable, by assigning the authoritative comparator back.
+	 * comparator.  Core code may switch this over the initial preference of
+	 * an opclass support function despite originally indicating abbreviation
+	 * was applicable, by assigning the authoritative comparator back.
 	 */
 	int			(*comparator) (Datum x, Datum y, SortSupport ssup);
 
 	/*
 	 * "Abbreviated key" infrastructure follows.
 	 *
-	 * All callbacks must be set by sortsupport opclasses that make use of this
-	 * optional additional infrastructure (unless for whatever reasons the
-	 * opclass doesn't proceed with abbreviation, in which case
+	 * All callbacks must be set by sortsupport opclasses that make use of
+	 * this optional additional infrastructure (unless for whatever reasons
+	 * the opclass doesn't proceed with abbreviation, in which case
 	 * abbrev_converter must not be set).
 	 *
 	 * This allows opclass authors to supply a conversion routine, used to
@@ -120,20 +120,20 @@ typedef struct SortSupportData
 	 * pass-by-value Datum format that only the opclass has knowledge of.  An
 	 * alternative comparator, used only with this alternative representation
 	 * must also be provided (which is assigned to "comparator").  This
-	 * representation is a simple approximation of the original Datum.  It must
-	 * be possible to compare datums of this representation with each other
-	 * using the supplied alternative comparator, and have any non-zero return
-	 * value be a reliable proxy for what a proper comparison would indicate.
-	 * Returning zero from the alternative comparator does not indicate
-	 * equality, as with a conventional support routine 1, though -- it
-	 * indicates that it wasn't possible to determine how the two abbreviated
-	 * values compared.  A proper comparison, using "abbrev_full_comparator"/
-	 * ApplySortAbbrevFullComparator() is therefore required.  In many cases
-	 * this results in most or all comparisons only using the cheap alternative
-	 * comparison func, which is typically implemented as code that compiles to
-	 * just a few CPU instructions.  CPU cache miss penalties are expensive; to
-	 * get good overall performance, sort infrastructure must heavily weigh
-	 * cache performance.
+	 * representation is a simple approximation of the original Datum.  It
+	 * must be possible to compare datums of this representation with each
+	 * other using the supplied alternative comparator, and have any non-zero
+	 * return value be a reliable proxy for what a proper comparison would
+	 * indicate. Returning zero from the alternative comparator does not
+	 * indicate equality, as with a conventional support routine 1, though --
+	 * it indicates that it wasn't possible to determine how the two
+	 * abbreviated values compared.  A proper comparison, using
+	 * "abbrev_full_comparator"/ ApplySortAbbrevFullComparator() is therefore
+	 * required.  In many cases this results in most or all comparisons only
+	 * using the cheap alternative comparison func, which is typically
+	 * implemented as code that compiles to just a few CPU instructions.  CPU
+	 * cache miss penalties are expensive; to get good overall performance,
+	 * sort infrastructure must heavily weigh cache performance.
 	 *
 	 * Opclass authors must consider the final cardinality of abbreviated keys
 	 * when devising an encoding scheme.  It's possible for a strategy to work
@@ -143,16 +143,16 @@ typedef struct SortSupportData
 	 */
 
 	/*
-	 * "abbreviate" concerns whether or not the abbreviated key optimization is
-	 * applicable in principle (that is, the sortsupport routine needs to know
-	 * if its dealing with a key where an abbreviated representation can
+	 * "abbreviate" concerns whether or not the abbreviated key optimization
+	 * is applicable in principle (that is, the sortsupport routine needs to
+	 * know if its dealing with a key where an abbreviated representation can
 	 * usefully be packed together.  Conventionally, this is the leading
 	 * attribute key).  Note, however, that in order to determine that
 	 * abbreviation is not in play, the core code always checks whether or not
 	 * the opclass has set abbrev_converter.  This is a one way, one time
 	 * message to the opclass.
 	 */
-	bool			abbreviate;
+	bool		abbreviate;
 
 	/*
 	 * Converter to abbreviated format, from original representation.  Core
@@ -161,24 +161,25 @@ typedef struct SortSupportData
 	 * guaranteed NOT NULL, because it doesn't make sense to factor NULLness
 	 * into ad-hoc cost model.
 	 *
-	 * abbrev_converter is tested to see if abbreviation is in play.  Core code
-	 * may set it to NULL to indicate abbreviation should not be used (which is
-	 * something sortsupport routines need not concern themselves with).
-	 * However, sortsupport routines must not set it when it is immediately
-	 * established that abbreviation should not proceed (e.g., for !abbreviate
-	 * calls, or due to platform-specific impediments to using abbreviation).
+	 * abbrev_converter is tested to see if abbreviation is in play.  Core
+	 * code may set it to NULL to indicate abbreviation should not be used
+	 * (which is something sortsupport routines need not concern themselves
+	 * with). However, sortsupport routines must not set it when it is
+	 * immediately established that abbreviation should not proceed (e.g., for
+	 * !abbreviate calls, or due to platform-specific impediments to using
+	 * abbreviation).
 	 */
-	Datum			(*abbrev_converter) (Datum original, SortSupport ssup);
+	Datum		(*abbrev_converter) (Datum original, SortSupport ssup);
 
 	/*
-	 * abbrev_abort callback allows clients to verify that the current strategy
-	 * is working out, using a sortsupport routine defined ad-hoc cost model.
-	 * If there is a lot of duplicate abbreviated keys in practice, it's useful
-	 * to be able to abandon the strategy before paying too high a cost in
-	 * conversion (perhaps certain opclass-specific adaptations are useful
-	 * too).
+	 * abbrev_abort callback allows clients to verify that the current
+	 * strategy is working out, using a sortsupport routine defined ad-hoc
+	 * cost model. If there is a lot of duplicate abbreviated keys in
+	 * practice, it's useful to be able to abandon the strategy before paying
+	 * too high a cost in conversion (perhaps certain opclass-specific
+	 * adaptations are useful too).
 	 */
-	bool			(*abbrev_abort) (int memtupcount, SortSupport ssup);
+	bool		(*abbrev_abort) (int memtupcount, SortSupport ssup);
 
 	/*
 	 * Full, authoritative comparator for key that an abbreviated
@@ -200,8 +201,8 @@ extern int ApplySortComparator(Datum datum1, bool isNull1,
 					Datum datum2, bool isNull2,
 					SortSupport ssup);
 extern int ApplySortAbbrevFullComparator(Datum datum1, bool isNull1,
-							Datum datum2, bool isNull2,
-							SortSupport ssup);
+							  Datum datum2, bool isNull2,
+							  SortSupport ssup);
 #endif   /* !PG_USE_INLINE */
 #if defined(PG_USE_INLINE) || defined(SORTSUPPORT_INCLUDE_DEFINITIONS)
 /*
@@ -284,6 +285,6 @@ ApplySortAbbrevFullComparator(Datum datum1, bool isNull1,
 extern void PrepareSortSupportComparisonShim(Oid cmpFunc, SortSupport ssup);
 extern void PrepareSortSupportFromOrderingOp(Oid orderingOp, SortSupport ssup);
 extern void PrepareSortSupportFromIndexRel(Relation indexRel, int16 strategy,
-										   SortSupport ssup);
+							   SortSupport ssup);
 
 #endif   /* SORTSUPPORT_H */
diff --git a/src/interfaces/ecpg/ecpglib/data.c b/src/interfaces/ecpg/ecpglib/data.c
index 2dcb9153da5..c3cd94682dd 100644
--- a/src/interfaces/ecpg/ecpglib/data.c
+++ b/src/interfaces/ecpg/ecpglib/data.c
@@ -291,7 +291,8 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
 					date		ddres;
 					timestamp	tres;
 					interval   *ires;
-					char *endptr, endchar;
+					char	   *endptr,
+								endchar;
 
 				case ECPGt_short:
 				case ECPGt_int:
diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c
index 22ce55b60a0..bcb38d25f87 100644
--- a/src/interfaces/ecpg/ecpglib/execute.c
+++ b/src/interfaces/ecpg/ecpglib/execute.c
@@ -499,9 +499,9 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
 	char	   *newcopy = NULL;
 
 	/*
-	 * arrays are not possible unless the column is an array, too
-	 * FIXME: we do not know if the column is an array here
-	 * array input to singleton column will result in a runtime error
+	 * arrays are not possible unless the column is an array, too FIXME: we do
+	 * not know if the column is an array here array input to singleton column
+	 * will result in a runtime error
 	 */
 
 	/*
@@ -852,7 +852,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
 						mallocedval = ecpg_strdup("", lineno);
 
 					if (!mallocedval)
-							return false;
+						return false;
 
 					for (element = 0; element < asize; element++)
 					{
@@ -915,7 +915,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
 						mallocedval = ecpg_strdup("", lineno);
 
 					if (!mallocedval)
-							return false;
+						return false;
 
 					for (element = 0; element < asize; element++)
 					{
@@ -962,7 +962,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
 						mallocedval = ecpg_strdup("", lineno);
 
 					if (!mallocedval)
-							return false;
+						return false;
 
 					for (element = 0; element < asize; element++)
 					{
@@ -1009,7 +1009,7 @@ ecpg_store_input(const int lineno, const bool force_indicator, const struct vari
 						mallocedval = ecpg_strdup("", lineno);
 
 					if (!mallocedval)
-							return false;
+						return false;
 
 					for (element = 0; element < asize; element++)
 					{
diff --git a/src/interfaces/ecpg/ecpglib/memory.c b/src/interfaces/ecpg/ecpglib/memory.c
index dffc3a76187..9c1d20efc56 100644
--- a/src/interfaces/ecpg/ecpglib/memory.c
+++ b/src/interfaces/ecpg/ecpglib/memory.c
@@ -107,7 +107,7 @@ static struct auto_mem *auto_allocs = NULL;
 char *
 ecpg_auto_alloc(long size, int lineno)
 {
-	void	*ptr = (void *) ecpg_alloc(size, lineno);
+	void	   *ptr = (void *) ecpg_alloc(size, lineno);
 
 	if (!ptr)
 		return NULL;
diff --git a/src/interfaces/ecpg/preproc/parse.pl b/src/interfaces/ecpg/preproc/parse.pl
index 588bb63e53f..74557425a93 100644
--- a/src/interfaces/ecpg/preproc/parse.pl
+++ b/src/interfaces/ecpg/preproc/parse.pl
@@ -42,17 +42,16 @@ my %replace_token = (
 
 # or in the block
 my %replace_string = (
-	'NOT_LA'          => 'not',
-	'NULLS_LA'        => 'nulls',
-	'WITH_LA'         => 'with',
-	'TYPECAST'        => '::',
-	'DOT_DOT'         => '..',
-	'COLON_EQUALS'    => ':=',
-	'EQUALS_GREATER'  => '=>',
-	'LESS_EQUALS'     => '<=',
-	'GREATER_EQUALS'  => '>=',
-	'NOT_EQUALS'      => '<>',
-);
+	'NOT_LA'         => 'not',
+	'NULLS_LA'       => 'nulls',
+	'WITH_LA'        => 'with',
+	'TYPECAST'       => '::',
+	'DOT_DOT'        => '..',
+	'COLON_EQUALS'   => ':=',
+	'EQUALS_GREATER' => '=>',
+	'LESS_EQUALS'    => '<=',
+	'GREATER_EQUALS' => '>=',
+	'NOT_EQUALS'     => '<>',);
 
 # specific replace_types for specific non-terminals - never include the ':'
 # ECPG-only replace_types are defined in ecpg-replace_types
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index e7c7a256e63..a45f4cba34f 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -2011,7 +2011,7 @@ keep_going:						/* We will come back to here until there is
 							appendPQExpBuffer(&conn->errorMessage,
 											  libpq_gettext("could not look up local user ID %d: %s\n"),
 											  (int) uid,
-											  pqStrerror(passerr, sebuf, sizeof(sebuf)));
+								  pqStrerror(passerr, sebuf, sizeof(sebuf)));
 						else
 							appendPQExpBuffer(&conn->errorMessage,
 											  libpq_gettext("local user with ID %d does not exist\n"),
@@ -3845,7 +3845,7 @@ ldapServiceLookup(const char *purl, PQconninfoOption *options,
 						if (!options[i].val)
 						{
 							printfPQExpBuffer(errorMessage,
-											libpq_gettext("out of memory\n"));
+										   libpq_gettext("out of memory\n"));
 							free(result);
 							return 3;
 						}
@@ -4085,7 +4085,7 @@ parseServiceFile(const char *serviceFile,
 						if (!options[i].val)
 						{
 							printfPQExpBuffer(errorMessage,
-											libpq_gettext("out of memory\n"));
+										   libpq_gettext("out of memory\n"));
 							fclose(f);
 							return 3;
 						}
@@ -4516,7 +4516,7 @@ conninfo_array_parse(const char *const * keywords, const char *const * values,
 								if (!options[k].val)
 								{
 									printfPQExpBuffer(errorMessage,
-													  libpq_gettext("out of memory\n"));
+										   libpq_gettext("out of memory\n"));
 									PQconninfoFree(options);
 									PQconninfoFree(dbname_options);
 									return NULL;
@@ -4526,6 +4526,7 @@ conninfo_array_parse(const char *const * keywords, const char *const * values,
 						}
 					}
 				}
+
 				/*
 				 * Forget the parsed connection string, so that any subsequent
 				 * dbname parameters will not be expanded.
@@ -5018,7 +5019,7 @@ conninfo_uri_parse_params(char *params,
 			/* Insert generic message if conninfo_storeval didn't give one. */
 			if (errorMessage->len == 0)
 				printfPQExpBuffer(errorMessage,
-								  libpq_gettext("invalid URI query parameter: \"%s\"\n"),
+					  libpq_gettext("invalid URI query parameter: \"%s\"\n"),
 								  keyword);
 			/* And fail. */
 			if (malloced)
diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c
index 25aecc2f144..0dbcf732227 100644
--- a/src/interfaces/libpq/fe-misc.c
+++ b/src/interfaces/libpq/fe-misc.c
@@ -744,10 +744,10 @@ retry3:
 	 * the file selected for reading already.
 	 *
 	 * In SSL mode it's even worse: SSL_read() could say WANT_READ and then
-	 * data could arrive before we make the pqReadReady() test, but the
-	 * second SSL_read() could still say WANT_READ because the data received
-	 * was not a complete SSL record.  So we must play dumb and assume there
-	 * is more data, relying on the SSL layer to detect true EOF.
+	 * data could arrive before we make the pqReadReady() test, but the second
+	 * SSL_read() could still say WANT_READ because the data received was not
+	 * a complete SSL record.  So we must play dumb and assume there is more
+	 * data, relying on the SSL layer to detect true EOF.
 	 */
 
 #ifdef USE_SSL
@@ -916,9 +916,9 @@ pqSendSome(PGconn *conn, int len)
 			 * might not arrive until after we've gone to sleep.  Therefore,
 			 * we wait for either read ready or write ready.
 			 *
-			 * In non-blocking mode, we don't wait here directly, but return
-			 * 1 to indicate that data is still pending.  The caller should
-			 * wait for both read and write ready conditions, and call
+			 * In non-blocking mode, we don't wait here directly, but return 1
+			 * to indicate that data is still pending.  The caller should wait
+			 * for both read and write ready conditions, and call
 			 * PQconsumeInput() on read ready, but just in case it doesn't, we
 			 * call pqReadData() ourselves before returning.  That's not
 			 * enough if the data has not arrived yet, but it's the best we
diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c
index 0cc5e8d33db..d4069b9e0bd 100644
--- a/src/interfaces/libpq/fe-secure-openssl.c
+++ b/src/interfaces/libpq/fe-secure-openssl.c
@@ -64,19 +64,19 @@
 
 static bool verify_peer_name_matches_certificate(PGconn *);
 static int	verify_cb(int ok, X509_STORE_CTX *ctx);
-static int	verify_peer_name_matches_certificate_name(PGconn *conn,
-												  ASN1_STRING *name,
-												  char **store_name);
+static int verify_peer_name_matches_certificate_name(PGconn *conn,
+										  ASN1_STRING *name,
+										  char **store_name);
 static void destroy_ssl_system(void);
 static int	initialize_SSL(PGconn *conn);
 static PostgresPollingStatusType open_client_SSL(PGconn *);
 static char *SSLerrmessage(void);
 static void SSLerrfree(char *buf);
 
-static int my_sock_read(BIO *h, char *buf, int size);
-static int my_sock_write(BIO *h, const char *buf, int size);
+static int	my_sock_read(BIO *h, char *buf, int size);
+static int	my_sock_write(BIO *h, const char *buf, int size);
 static BIO_METHOD *my_BIO_s_socket(void);
-static int my_SSL_set_fd(PGconn *conn, int fd);
+static int	my_SSL_set_fd(PGconn *conn, int fd);
 
 
 static bool pq_init_ssl_lib = true;
@@ -187,7 +187,7 @@ pgtls_open_client(PGconn *conn)
 }
 
 /*
- *  Is there unread data waiting in the SSL read buffer?
+ *	Is there unread data waiting in the SSL read buffer?
  */
 bool
 pgtls_read_pending(PGconn *conn)
@@ -221,7 +221,7 @@ rloop:
 			{
 				/* Not supposed to happen, so we don't translate the msg */
 				printfPQExpBuffer(&conn->errorMessage,
-								  "SSL_read failed but did not provide error information\n");
+				  "SSL_read failed but did not provide error information\n");
 				/* assume the connection is broken */
 				result_errno = ECONNRESET;
 			}
@@ -247,7 +247,7 @@ rloop:
 					printfPQExpBuffer(&conn->errorMessage,
 									  libpq_gettext(
 								"server closed the connection unexpectedly\n"
-													"\tThis probably means the server terminated abnormally\n"
+					"\tThis probably means the server terminated abnormally\n"
 							 "\tbefore or while processing the request.\n"));
 				else
 					printfPQExpBuffer(&conn->errorMessage,
@@ -279,12 +279,12 @@ rloop:
 		case SSL_ERROR_ZERO_RETURN:
 
 			/*
-			 * Per OpenSSL documentation, this error code is only returned
-			 * for a clean connection closure, so we should not report it
-			 * as a server crash.
+			 * Per OpenSSL documentation, this error code is only returned for
+			 * a clean connection closure, so we should not report it as a
+			 * server crash.
 			 */
 			printfPQExpBuffer(&conn->errorMessage,
-							  libpq_gettext("SSL connection has been closed unexpectedly\n"));
+			 libpq_gettext("SSL connection has been closed unexpectedly\n"));
 			result_errno = ECONNRESET;
 			n = -1;
 			break;
@@ -329,7 +329,7 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len)
 			{
 				/* Not supposed to happen, so we don't translate the msg */
 				printfPQExpBuffer(&conn->errorMessage,
-								  "SSL_write failed but did not provide error information\n");
+				 "SSL_write failed but did not provide error information\n");
 				/* assume the connection is broken */
 				result_errno = ECONNRESET;
 			}
@@ -337,9 +337,8 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len)
 		case SSL_ERROR_WANT_READ:
 
 			/*
-			 * Returning 0 here causes caller to wait for write-ready,
-			 * which is not really the right thing, but it's the best we
-			 * can do.
+			 * Returning 0 here causes caller to wait for write-ready, which
+			 * is not really the right thing, but it's the best we can do.
 			 */
 			n = 0;
 			break;
@@ -354,7 +353,7 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len)
 					printfPQExpBuffer(&conn->errorMessage,
 									  libpq_gettext(
 								"server closed the connection unexpectedly\n"
-				   "\tThis probably means the server terminated abnormally\n"
+					"\tThis probably means the server terminated abnormally\n"
 							 "\tbefore or while processing the request.\n"));
 				else
 					printfPQExpBuffer(&conn->errorMessage,
@@ -386,12 +385,12 @@ pgtls_write(PGconn *conn, const void *ptr, size_t len)
 		case SSL_ERROR_ZERO_RETURN:
 
 			/*
-			 * Per OpenSSL documentation, this error code is only returned
-			 * for a clean connection closure, so we should not report it
-			 * as a server crash.
+			 * Per OpenSSL documentation, this error code is only returned for
+			 * a clean connection closure, so we should not report it as a
+			 * server crash.
 			 */
 			printfPQExpBuffer(&conn->errorMessage,
-							  libpq_gettext("SSL connection has been closed unexpectedly\n"));
+			 libpq_gettext("SSL connection has been closed unexpectedly\n"));
 			result_errno = ECONNRESET;
 			n = -1;
 			break;
@@ -509,7 +508,7 @@ verify_peer_name_matches_certificate_name(PGconn *conn, ASN1_STRING *name_entry,
 	if (name_entry == NULL)
 	{
 		printfPQExpBuffer(&conn->errorMessage,
-				  libpq_gettext("SSL certificate's name entry is missing\n"));
+				 libpq_gettext("SSL certificate's name entry is missing\n"));
 		return -1;
 	}
 
@@ -539,7 +538,7 @@ verify_peer_name_matches_certificate_name(PGconn *conn, ASN1_STRING *name_entry,
 	{
 		free(name);
 		printfPQExpBuffer(&conn->errorMessage,
-			libpq_gettext("SSL certificate's name contains embedded null\n"));
+		   libpq_gettext("SSL certificate's name contains embedded null\n"));
 		return -1;
 	}
 
@@ -574,8 +573,9 @@ verify_peer_name_matches_certificate(PGconn *conn)
 	bool		found_match = false;
 	bool		got_error = false;
 	char	   *first_name = NULL;
+
 	STACK_OF(GENERAL_NAME) *peer_san;
-	int 		i;
+	int			i;
 	int			rc;
 
 	/*
@@ -614,7 +614,7 @@ verify_peer_name_matches_certificate(PGconn *conn)
 
 				names_examined++;
 				rc = verify_peer_name_matches_certificate_name(conn,
-															   name->d.dNSName,
+															 name->d.dNSName,
 															   &alt_name);
 				if (rc == -1)
 					got_error = true;
@@ -634,6 +634,7 @@ verify_peer_name_matches_certificate(PGconn *conn)
 		}
 		sk_GENERAL_NAME_free(peer_san);
 	}
+
 	/*
 	 * If there is no subjectAltName extension of type dNSName, check the
 	 * Common Name.
@@ -656,10 +657,10 @@ verify_peer_name_matches_certificate(PGconn *conn)
 			{
 				names_examined++;
 				rc = verify_peer_name_matches_certificate_name(
-					conn,
-					X509_NAME_ENTRY_get_data(
-						X509_NAME_get_entry(subject_name, cn_index)),
-					&first_name);
+															   conn,
+													X509_NAME_ENTRY_get_data(
+								X509_NAME_get_entry(subject_name, cn_index)),
+															   &first_name);
 
 				if (rc == -1)
 					got_error = true;
@@ -672,10 +673,10 @@ verify_peer_name_matches_certificate(PGconn *conn)
 	if (!found_match && !got_error)
 	{
 		/*
-		 * No match. Include the name from the server certificate in the
-		 * error message, to aid debugging broken configurations. If there
-		 * are multiple names, only print the first one to avoid an overly
-		 * long error message.
+		 * No match. Include the name from the server certificate in the error
+		 * message, to aid debugging broken configurations. If there are
+		 * multiple names, only print the first one to avoid an overly long
+		 * error message.
 		 */
 		if (names_examined > 1)
 		{
@@ -806,8 +807,10 @@ pgtls_init(PGconn *conn)
 
 		if (ssl_open_connections++ == 0)
 		{
-			/* These are only required for threaded libcrypto applications, but
-			 * make sure we don't stomp on them if they're already set. */
+			/*
+			 * These are only required for threaded libcrypto applications,
+			 * but make sure we don't stomp on them if they're already set.
+			 */
 			if (CRYPTO_get_id_callback() == NULL)
 				CRYPTO_set_id_callback(pq_threadidcallback);
 			if (CRYPTO_get_locking_callback() == NULL)
@@ -888,8 +891,10 @@ destroy_ssl_system(void)
 
 	if (pq_init_crypto_lib && ssl_open_connections == 0)
 	{
-		/* No connections left, unregister libcrypto callbacks, if no one
-		 * registered different ones in the meantime. */
+		/*
+		 * No connections left, unregister libcrypto callbacks, if no one
+		 * registered different ones in the meantime.
+		 */
 		if (CRYPTO_get_locking_callback() == pq_lockingcallback)
 			CRYPTO_set_locking_callback(NULL);
 		if (CRYPTO_get_id_callback() == pq_threadidcallback)
@@ -1538,6 +1543,7 @@ PQsslAttributes(PGconn *conn)
 		"protocol",
 		NULL
 	};
+
 	return result;
 }
 
@@ -1555,7 +1561,7 @@ PQsslAttribute(PGconn *conn, const char *attribute_name)
 	if (strcmp(attribute_name, "key_bits") == 0)
 	{
 		static char sslbits_str[10];
-		int		sslbits;
+		int			sslbits;
 
 		SSL_get_cipher_bits(conn->ssl, &sslbits);
 		snprintf(sslbits_str, sizeof(sslbits_str), "%d", sslbits);
@@ -1571,7 +1577,7 @@ PQsslAttribute(PGconn *conn, const char *attribute_name)
 	if (strcmp(attribute_name, "protocol") == 0)
 		return SSL_get_version(conn->ssl);
 
-	return NULL;		/* unknown attribute */
+	return NULL;				/* unknown attribute */
 }
 
 /*
diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c
index 57c572bd096..db91e52ee90 100644
--- a/src/interfaces/libpq/fe-secure.c
+++ b/src/interfaces/libpq/fe-secure.c
@@ -251,14 +251,14 @@ pqsecure_raw_read(PGconn *conn, void *ptr, size_t len)
 				printfPQExpBuffer(&conn->errorMessage,
 								  libpq_gettext(
 								"server closed the connection unexpectedly\n"
-					"\tThis probably means the server terminated abnormally\n"
+				   "\tThis probably means the server terminated abnormally\n"
 							 "\tbefore or while processing the request.\n"));
 				break;
 #endif
 
 			default:
 				printfPQExpBuffer(&conn->errorMessage,
-					libpq_gettext("could not receive data from server: %s\n"),
+				   libpq_gettext("could not receive data from server: %s\n"),
 								  SOCK_STRERROR(result_errno,
 												sebuf, sizeof(sebuf)));
 				break;
@@ -323,9 +323,9 @@ retry_masked:
 		result_errno = SOCK_ERRNO;
 
 		/*
-		 * If we see an EINVAL, it may be because MSG_NOSIGNAL isn't
-		 * available on this machine.  So, clear sigpipe_flag so we don't
-		 * try the flag again, and retry the send().
+		 * If we see an EINVAL, it may be because MSG_NOSIGNAL isn't available
+		 * on this machine.  So, clear sigpipe_flag so we don't try the flag
+		 * again, and retry the send().
 		 */
 #ifdef MSG_NOSIGNAL
 		if (flags != 0 && result_errno == EINVAL)
@@ -360,15 +360,15 @@ retry_masked:
 				printfPQExpBuffer(&conn->errorMessage,
 								  libpq_gettext(
 								"server closed the connection unexpectedly\n"
-					"\tThis probably means the server terminated abnormally\n"
+				   "\tThis probably means the server terminated abnormally\n"
 							 "\tbefore or while processing the request.\n"));
 				break;
 
 			default:
 				printfPQExpBuffer(&conn->errorMessage,
 						libpq_gettext("could not send data to server: %s\n"),
-									  SOCK_STRERROR(result_errno,
-													sebuf, sizeof(sebuf)));
+								  SOCK_STRERROR(result_errno,
+												sebuf, sizeof(sebuf)));
 				break;
 		}
 	}
@@ -411,7 +411,7 @@ PQsslAttribute(PGconn *conn, const char *attribute_name)
 const char **
 PQsslAttributes(PGconn *conn)
 {
-	static const char *result[] = { NULL };
+	static const char *result[] = {NULL};
 
 	return result;
 }
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 840df2ee0b8..78baaac05db 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -2690,7 +2690,7 @@ compile_plperl_function(Oid fn_oid, bool is_trigger, bool is_event_trigger)
 			MemoryContext oldcxt;
 
 			protrftypes_datum = SysCacheGetAttr(PROCOID, procTup,
-												Anum_pg_proc_protrftypes, &isnull);
+										  Anum_pg_proc_protrftypes, &isnull);
 			oldcxt = MemoryContextSwitchTo(TopMemoryContext);
 			prodesc->trftypes = isnull ? NIL : oid_array_to_list(protrftypes_datum);
 			MemoryContextSwitchTo(oldcxt);
diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c
index d5d44f06849..16ff84560bb 100644
--- a/src/pl/plpython/plpy_procedure.c
+++ b/src/pl/plpython/plpy_procedure.c
@@ -172,8 +172,9 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger)
 	{
 		MemoryContext oldcxt;
 
-		Datum protrftypes_datum = SysCacheGetAttr(PROCOID, procTup,
-												  Anum_pg_proc_protrftypes, &isnull);
+		Datum		protrftypes_datum = SysCacheGetAttr(PROCOID, procTup,
+										  Anum_pg_proc_protrftypes, &isnull);
+
 		oldcxt = MemoryContextSwitchTo(TopMemoryContext);
 		proc->trftypes = isnull ? NIL : oid_array_to_list(protrftypes_datum);
 		MemoryContextSwitchTo(oldcxt);
diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c
index 7b65a931831..7a45b228714 100644
--- a/src/pl/plpython/plpy_typeio.c
+++ b/src/pl/plpython/plpy_typeio.c
@@ -401,18 +401,18 @@ PLy_output_datum_func2(PLyObToDatum *arg, HeapTuple typeTup, Oid langid, List *t
 		arg->func = PLyObject_ToComposite;
 	}
 	else
-	switch (base_type)
-	{
-		case BOOLOID:
-			arg->func = PLyObject_ToBool;
-			break;
-		case BYTEAOID:
-			arg->func = PLyObject_ToBytea;
-			break;
-		default:
-			arg->func = PLyObject_ToDatum;
-			break;
-	}
+		switch (base_type)
+		{
+			case BOOLOID:
+				arg->func = PLyObject_ToBool;
+				break;
+			case BYTEAOID:
+				arg->func = PLyObject_ToBytea;
+				break;
+			default:
+				arg->func = PLyObject_ToDatum;
+				break;
+		}
 
 	if (element_type)
 	{
@@ -464,39 +464,39 @@ PLy_input_datum_func2(PLyDatumToOb *arg, Oid typeOid, HeapTuple typeTup, Oid lan
 		perm_fmgr_info(funcid, &arg->typtransform);
 	}
 	else
-	switch (base_type)
-	{
-		case BOOLOID:
-			arg->func = PLyBool_FromBool;
-			break;
-		case FLOAT4OID:
-			arg->func = PLyFloat_FromFloat4;
-			break;
-		case FLOAT8OID:
-			arg->func = PLyFloat_FromFloat8;
-			break;
-		case NUMERICOID:
-			arg->func = PLyDecimal_FromNumeric;
-			break;
-		case INT2OID:
-			arg->func = PLyInt_FromInt16;
-			break;
-		case INT4OID:
-			arg->func = PLyInt_FromInt32;
-			break;
-		case INT8OID:
-			arg->func = PLyLong_FromInt64;
-			break;
-		case OIDOID:
-			arg->func = PLyLong_FromOid;
-			break;
-		case BYTEAOID:
-			arg->func = PLyBytes_FromBytea;
-			break;
-		default:
-			arg->func = PLyString_FromDatum;
-			break;
-	}
+		switch (base_type)
+		{
+			case BOOLOID:
+				arg->func = PLyBool_FromBool;
+				break;
+			case FLOAT4OID:
+				arg->func = PLyFloat_FromFloat4;
+				break;
+			case FLOAT8OID:
+				arg->func = PLyFloat_FromFloat8;
+				break;
+			case NUMERICOID:
+				arg->func = PLyDecimal_FromNumeric;
+				break;
+			case INT2OID:
+				arg->func = PLyInt_FromInt16;
+				break;
+			case INT4OID:
+				arg->func = PLyInt_FromInt32;
+				break;
+			case INT8OID:
+				arg->func = PLyLong_FromInt64;
+				break;
+			case OIDOID:
+				arg->func = PLyLong_FromOid;
+				break;
+			case BYTEAOID:
+				arg->func = PLyBytes_FromBytea;
+				break;
+			default:
+				arg->func = PLyString_FromDatum;
+				break;
+		}
 
 	if (element_type)
 	{
diff --git a/src/port/gettimeofday.c b/src/port/gettimeofday.c
index 3c602385188..af1157134b5 100644
--- a/src/port/gettimeofday.c
+++ b/src/port/gettimeofday.c
@@ -38,14 +38,14 @@ static const unsigned __int64 epoch = UINT64CONST(116444736000000000);
  * January 1, 1601 (UTC).
  */
 #define FILETIME_UNITS_PER_SEC	10000000L
-#define FILETIME_UNITS_PER_USEC	10
+#define FILETIME_UNITS_PER_USEC 10
 
 /*
  * Both GetSystemTimeAsFileTime and GetSystemTimePreciseAsFileTime share a
  * signature, so we can just store a pointer to whichever we find. This
  * is the pointer's type.
  */
-typedef VOID (WINAPI *PgGetSystemTimeFn)(LPFILETIME);
+typedef		VOID(WINAPI * PgGetSystemTimeFn) (LPFILETIME);
 
 /* One-time initializer function, must match that signature. */
 static void WINAPI init_gettimeofday(LPFILETIME lpSystemTimeAsFileTime);
@@ -71,12 +71,12 @@ init_gettimeofday(LPFILETIME lpSystemTimeAsFileTime)
 	 *
 	 * While we could look up the Windows version and skip this on Windows
 	 * versions below Windows 8 / Windows Server 2012 there isn't much point,
-	 * and determining the windows version is its self somewhat Windows version
-	 * and development SDK specific...
+	 * and determining the windows version is its self somewhat Windows
+	 * version and development SDK specific...
 	 */
 	pg_get_system_time = (PgGetSystemTimeFn) GetProcAddress(
-			GetModuleHandle(TEXT("kernel32.dll")),
-				"GetSystemTimePreciseAsFileTime");
+									   GetModuleHandle(TEXT("kernel32.dll")),
+										   "GetSystemTimePreciseAsFileTime");
 	if (pg_get_system_time == NULL)
 	{
 		/*
@@ -84,15 +84,15 @@ init_gettimeofday(LPFILETIME lpSystemTimeAsFileTime)
 		 * the function isn't present. No other error should occur.
 		 *
 		 * We can't report an error here because this might be running in
-		 * frontend code; and even if we're in the backend, it's too early
-		 * to elog(...) if we get some unexpected error.  Also, it's not a
+		 * frontend code; and even if we're in the backend, it's too early to
+		 * elog(...) if we get some unexpected error.  Also, it's not a
 		 * serious problem, so just silently fall back to
 		 * GetSystemTimeAsFileTime irrespective of why the failure occurred.
 		 */
 		pg_get_system_time = &GetSystemTimeAsFileTime;
 	}
 
-	(*pg_get_system_time)(lpSystemTimeAsFileTime);
+	(*pg_get_system_time) (lpSystemTimeAsFileTime);
 }
 
 /*
@@ -107,13 +107,13 @@ gettimeofday(struct timeval * tp, struct timezone * tzp)
 	FILETIME	file_time;
 	ULARGE_INTEGER ularge;
 
-	(*pg_get_system_time)(&file_time);
+	(*pg_get_system_time) (&file_time);
 	ularge.LowPart = file_time.dwLowDateTime;
 	ularge.HighPart = file_time.dwHighDateTime;
 
 	tp->tv_sec = (long) ((ularge.QuadPart - epoch) / FILETIME_UNITS_PER_SEC);
 	tp->tv_usec = (long) (((ularge.QuadPart - epoch) % FILETIME_UNITS_PER_SEC)
-		/ FILETIME_UNITS_PER_USEC);
+						  / FILETIME_UNITS_PER_USEC);
 
 	return 0;
 }
diff --git a/src/port/pg_crc32c_choose.c b/src/port/pg_crc32c_choose.c
index ba0d1670f82..5a297ae30c3 100644
--- a/src/port/pg_crc32c_choose.c
+++ b/src/port/pg_crc32c_choose.c
@@ -42,7 +42,7 @@ pg_crc32c_sse42_available(void)
 #error cpuid instruction not available
 #endif
 
-	return (exx[2] & (1 << 20)) != 0;    /* SSE 4.2 */
+	return (exx[2] & (1 << 20)) != 0;	/* SSE 4.2 */
 }
 
 /*
@@ -60,4 +60,4 @@ pg_comp_crc32c_choose(pg_crc32c crc, const void *data, size_t len)
 	return pg_comp_crc32c(crc, data, len);
 }
 
-pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len) = pg_comp_crc32c_choose;
+pg_crc32c	(*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len) = pg_comp_crc32c_choose;
diff --git a/src/port/pg_crc32c_sse42.c b/src/port/pg_crc32c_sse42.c
index a22a9dd78bf..150d4cb15b7 100644
--- a/src/port/pg_crc32c_sse42.c
+++ b/src/port/pg_crc32c_sse42.c
@@ -45,6 +45,7 @@ pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len)
 		p += 4;
 	}
 #else
+
 	/*
 	 * Process four bytes at a time. (The eight byte instruction is not
 	 * available on the 32-bit x86 architecture).
@@ -54,7 +55,7 @@ pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len)
 		crc = _mm_crc32_u32(crc, *((const unsigned int *) p));
 		p += 4;
 	}
-#endif /* __x86_64__ */
+#endif   /* __x86_64__ */
 
 	/* Process any remaining bytes one at a time. */
 	while (p < pend)
diff --git a/src/port/win32setlocale.c b/src/port/win32setlocale.c
index f253967c07c..ca9d5906975 100644
--- a/src/port/win32setlocale.c
+++ b/src/port/win32setlocale.c
@@ -40,9 +40,9 @@ struct locale_map
 {
 	/*
 	 * String in locale name to replace. Can be a single string (end is NULL),
-	 * or separate start and end strings. If two strings are given, the
-	 * locale name must contain both of them, and everything between them
-	 * is replaced. This is used for a poor-man's regexp search, allowing
+	 * or separate start and end strings. If two strings are given, the locale
+	 * name must contain both of them, and everything between them is
+	 * replaced. This is used for a poor-man's regexp search, allowing
 	 * replacement of "start.*end".
 	 */
 	const char *locale_name_start;
@@ -104,7 +104,7 @@ static const struct locale_map locale_map_result[] = {
 #define MAX_LOCALE_NAME_LEN		100
 
 static const char *
-map_locale(const struct locale_map *map, const char *locale)
+map_locale(const struct locale_map * map, const char *locale)
 {
 	static char aliasbuf[MAX_LOCALE_NAME_LEN];
 	int			i;
diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c
index f9ba4132e7c..44a5cb0277e 100644
--- a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c
+++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c
@@ -63,8 +63,8 @@ Datum
 get_altertable_subcmdtypes(PG_FUNCTION_ARGS)
 {
 	CollectedCommand *cmd = (CollectedCommand *) PG_GETARG_POINTER(0);
-	ArrayBuildState	*astate = NULL;
-	ListCell *cell;
+	ArrayBuildState *astate = NULL;
+	ListCell   *cell;
 
 	if (cmd->type != SCT_AlterTable)
 		elog(ERROR, "command is not ALTER TABLE");
@@ -72,8 +72,8 @@ get_altertable_subcmdtypes(PG_FUNCTION_ARGS)
 	foreach(cell, cmd->d.alterTable.subcmds)
 	{
 		CollectedATSubcmd *sub = lfirst(cell);
-		AlterTableCmd  *subcmd = (AlterTableCmd *) sub->parsetree;
-		const char     *strtype;
+		AlterTableCmd *subcmd = (AlterTableCmd *) sub->parsetree;
+		const char *strtype;
 
 		Assert(IsA(subcmd, AlterTableCmd));
 
diff --git a/src/test/modules/test_rls_hooks/test_rls_hooks.c b/src/test/modules/test_rls_hooks/test_rls_hooks.c
index c2122e7981a..61b62d55b4c 100644
--- a/src/test/modules/test_rls_hooks/test_rls_hooks.c
+++ b/src/test/modules/test_rls_hooks/test_rls_hooks.c
@@ -35,11 +35,12 @@ PG_MODULE_MAGIC;
 static row_security_policy_hook_type prev_row_security_policy_hook_permissive = NULL;
 static row_security_policy_hook_type prev_row_security_policy_hook_restrictive = NULL;
 
-void        _PG_init(void);
-void        _PG_fini(void);
+void		_PG_init(void);
+void		_PG_fini(void);
 
 /* Install hooks */
-void		_PG_init(void)
+void
+_PG_init(void)
 {
 	/* Save values for unload  */
 	prev_row_security_policy_hook_permissive = row_security_policy_hook_permissive;
@@ -51,7 +52,8 @@ void		_PG_init(void)
 }
 
 /* Uninstall hooks */
-void        _PG_fini(void)
+void
+_PG_fini(void)
 {
 	row_security_policy_hook_permissive = prev_row_security_policy_hook_permissive;
 	row_security_policy_hook_restrictive = prev_row_security_policy_hook_restrictive;
@@ -60,20 +62,20 @@ void        _PG_fini(void)
 /*
  * Return permissive policies to be added
  */
-List*
+List *
 test_rls_hooks_permissive(CmdType cmdtype, Relation relation)
 {
-	List			   *policies = NIL;
-	RowSecurityPolicy  *policy = palloc0(sizeof(RowSecurityPolicy));
-	Datum				role;
-	FuncCall		   *n;
-	Node			   *e;
-	ColumnRef		   *c;
-	ParseState		   *qual_pstate;
-	RangeTblEntry	   *rte;
-
-	if (strcmp(RelationGetRelationName(relation),"rls_test_permissive")
-			&& strcmp(RelationGetRelationName(relation),"rls_test_both"))
+	List	   *policies = NIL;
+	RowSecurityPolicy *policy = palloc0(sizeof(RowSecurityPolicy));
+	Datum		role;
+	FuncCall   *n;
+	Node	   *e;
+	ColumnRef  *c;
+	ParseState *qual_pstate;
+	RangeTblEntry *rte;
+
+	if (strcmp(RelationGetRelationName(relation), "rls_test_permissive")
+		&& strcmp(RelationGetRelationName(relation), "rls_test_both"))
 		return NIL;
 
 	qual_pstate = make_parsestate(NULL);
@@ -88,11 +90,11 @@ test_rls_hooks_permissive(CmdType cmdtype, Relation relation)
 	policy->policy_id = InvalidOid;
 	policy->polcmd = '*';
 	policy->roles = construct_array(&role, 1, OIDOID, sizeof(Oid), true, 'i');
+
 	/*
-	policy->qual = (Expr *) makeConst(BOOLOID, -1, InvalidOid,
-									  sizeof(bool), BoolGetDatum(true),
-									  false, true);
-									  */
+	 * policy->qual = (Expr *) makeConst(BOOLOID, -1, InvalidOid,
+	 * sizeof(bool), BoolGetDatum(true), false, true);
+	 */
 
 	n = makeFuncCall(list_make2(makeString("pg_catalog"),
 								makeString("current_user")), NIL, 0);
@@ -101,11 +103,11 @@ test_rls_hooks_permissive(CmdType cmdtype, Relation relation)
 	c->fields = list_make1(makeString("username"));
 	c->location = 0;
 
-	e = (Node*) makeSimpleA_Expr(AEXPR_OP, "=", (Node*) n, (Node*) c, 0);
+	e = (Node *) makeSimpleA_Expr(AEXPR_OP, "=", (Node *) n, (Node *) c, 0);
 
-	policy->qual = (Expr*) transformWhereClause(qual_pstate, copyObject(e),
-												EXPR_KIND_WHERE,
-												"POLICY");
+	policy->qual = (Expr *) transformWhereClause(qual_pstate, copyObject(e),
+												 EXPR_KIND_WHERE,
+												 "POLICY");
 
 	policy->with_check_qual = copyObject(policy->qual);
 	policy->hassublinks = false;
@@ -118,21 +120,21 @@ test_rls_hooks_permissive(CmdType cmdtype, Relation relation)
 /*
  * Return restrictive policies to be added
  */
-List*
+List *
 test_rls_hooks_restrictive(CmdType cmdtype, Relation relation)
 {
-	List			   *policies = NIL;
-	RowSecurityPolicy  *policy = palloc0(sizeof(RowSecurityPolicy));
-	Datum				role;
-	FuncCall		   *n;
-	Node			   *e;
-	ColumnRef		   *c;
-	ParseState		   *qual_pstate;
-	RangeTblEntry	   *rte;
-
-
-	if (strcmp(RelationGetRelationName(relation),"rls_test_restrictive")
-			&& strcmp(RelationGetRelationName(relation),"rls_test_both"))
+	List	   *policies = NIL;
+	RowSecurityPolicy *policy = palloc0(sizeof(RowSecurityPolicy));
+	Datum		role;
+	FuncCall   *n;
+	Node	   *e;
+	ColumnRef  *c;
+	ParseState *qual_pstate;
+	RangeTblEntry *rte;
+
+
+	if (strcmp(RelationGetRelationName(relation), "rls_test_restrictive")
+		&& strcmp(RelationGetRelationName(relation), "rls_test_both"))
 		return NIL;
 
 	qual_pstate = make_parsestate(NULL);
@@ -155,11 +157,11 @@ test_rls_hooks_restrictive(CmdType cmdtype, Relation relation)
 	c->fields = list_make1(makeString("supervisor"));
 	c->location = 0;
 
-	e = (Node*) makeSimpleA_Expr(AEXPR_OP, "=", (Node*) n, (Node*) c, 0);
+	e = (Node *) makeSimpleA_Expr(AEXPR_OP, "=", (Node *) n, (Node *) c, 0);
 
-	policy->qual = (Expr*) transformWhereClause(qual_pstate, copyObject(e),
-												EXPR_KIND_WHERE,
-												"POLICY");
+	policy->qual = (Expr *) transformWhereClause(qual_pstate, copyObject(e),
+												 EXPR_KIND_WHERE,
+												 "POLICY");
 
 	policy->with_check_qual = copyObject(policy->qual);
 	policy->hassublinks = false;
diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm
index 003cd9a2cca..ef42366888e 100644
--- a/src/test/perl/TestLib.pm
+++ b/src/test/perl/TestLib.pm
@@ -60,11 +60,15 @@ $ENV{PGPORT} = int($ENV{PGPORT}) % 65536;
 
 sub tempdir
 {
-	return File::Temp::tempdir('tmp_testXXXX', DIR => $ENV{TESTDIR} || cwd(), CLEANUP => 1);
+	return File::Temp::tempdir(
+		'tmp_testXXXX',
+		DIR => $ENV{TESTDIR} || cwd(),
+		CLEANUP => 1);
 }
 
 sub tempdir_short
 {
+
 	# Use a separate temp dir outside the build tree for the
 	# Unix-domain socket, to avoid file name length issues.
 	return File::Temp::tempdir(CLEANUP => 1);
@@ -75,7 +79,7 @@ sub standard_initdb
 	my $pgdata = shift;
 	system_or_bail("initdb -D '$pgdata' -A trust -N >/dev/null");
 	system_or_bail("$ENV{top_builddir}/src/test/regress/pg_regress",
-				   '--config-auth', $pgdata);
+		'--config-auth', $pgdata);
 }
 
 my ($test_server_datadir, $test_server_logfile);
@@ -90,7 +94,7 @@ sub start_test_server
 	standard_initdb "$tempdir/pgdata";
 	$ret = system 'pg_ctl', '-D', "$tempdir/pgdata", '-s', '-w', '-l',
 	  "$tempdir/logfile", '-o',
-	  "--fsync=off -k $tempdir_short --listen-addresses='' --log-statement=all",
+"--fsync=off -k $tempdir_short --listen-addresses='' --log-statement=all",
 	  'start';
 
 	if ($ret != 0)
@@ -185,7 +189,8 @@ sub program_options_handling_ok
 {
 	my ($cmd) = @_;
 	my ($stdout, $stderr);
-	my $result = run [ $cmd, '--not-a-valid-option' ], '>', \$stdout, '2>', \$stderr;
+	my $result = run [ $cmd, '--not-a-valid-option' ], '>', \$stdout, '2>',
+	  \$stderr;
 	ok(!$result, "$cmd with invalid option nonzero exit code");
 	isnt($stderr, '', "$cmd with invalid option prints error message");
 }
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index 2df5c1b5d6a..cc260169a48 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -1139,15 +1139,15 @@ spawn_process(const char *cmdline)
 	/* in parent */
 	return pid;
 #else
-	PROCESS_INFORMATION	pi;
-	char	*cmdline2;
-	HANDLE	restrictedToken;
+	PROCESS_INFORMATION pi;
+	char	   *cmdline2;
+	HANDLE		restrictedToken;
 
 	memset(&pi, 0, sizeof(pi));
 	cmdline2 = psprintf("cmd /c \"%s\"", cmdline);
 
-	if((restrictedToken =
-		CreateRestrictedProcess(cmdline2, &pi, progname)) == 0)
+	if ((restrictedToken =
+		 CreateRestrictedProcess(cmdline2, &pi, progname)) == 0)
 		exit(2);
 
 	CloseHandle(pi.hThread);
@@ -1973,8 +1973,9 @@ help(void)
 	printf(_("  --schedule=FILE           use test ordering schedule from FILE\n"));
 	printf(_("                            (can be used multiple times to concatenate)\n"));
 	printf(_("  --temp-instance=DIR       create a temporary instance in DIR\n"));
-	printf(_("  --use-existing            use an existing installation\n")); // XXX
-	printf(_("\n"));
+	printf(_("  --use-existing            use an existing installation\n"));
+	//XXX
+		printf(_("\n"));
 	printf(_("Options for \"temp-instance\" mode:\n"));
 	printf(_("  --no-locale               use C locale\n"));
 	printf(_("  --port=PORT               start postmaster on PORT\n"));
@@ -2446,8 +2447,8 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
 
 	/*
 	 * If there were no errors, remove the temp instance immediately to
-	 * conserve disk space.  (If there were errors, we leave the instance
-	 * in place for possible manual investigation.)
+	 * conserve disk space.  (If there were errors, we leave the instance in
+	 * place for possible manual investigation.)
 	 */
 	if (temp_instance && fail_count == 0 && fail_ignore_count == 0)
 	{
diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index d68c90cd9bb..bd31a3d3825 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -1,8 +1,8 @@
 /*------------------------------------------------------------------------
  *
  * regress.c
- *   Code for various C-language functions defined as part of the
- *   regression tests.
+ *	 Code for various C-language functions defined as part of the
+ *	 regression tests.
  *
  * This code is released under the terms of the PostgreSQL License.
  *
@@ -911,14 +911,14 @@ test_atomic_flag(void)
 
 	pg_atomic_clear_flag(&flag);
 }
-#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
+#endif   /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
 
 static void
 test_atomic_uint32(void)
 {
 	pg_atomic_uint32 var;
-	uint32 expected;
-	int i;
+	uint32		expected;
+	int			i;
 
 	pg_atomic_init_u32(&var, 0);
 
@@ -955,7 +955,7 @@ test_atomic_uint32(void)
 	if (pg_atomic_fetch_add_u32(&var, INT_MAX) != INT_MAX)
 		elog(ERROR, "pg_atomic_add_fetch_u32() #3 wrong");
 
-	pg_atomic_fetch_add_u32(&var, 1); /* top up to UINT_MAX */
+	pg_atomic_fetch_add_u32(&var, 1);	/* top up to UINT_MAX */
 
 	if (pg_atomic_read_u32(&var) != UINT_MAX)
 		elog(ERROR, "atomic_read_u32() #2 wrong");
@@ -963,7 +963,7 @@ test_atomic_uint32(void)
 	if (pg_atomic_fetch_sub_u32(&var, INT_MAX) != UINT_MAX)
 		elog(ERROR, "pg_atomic_fetch_sub_u32() #2 wrong");
 
-	if (pg_atomic_read_u32(&var) != (uint32)INT_MAX + 1)
+	if (pg_atomic_read_u32(&var) != (uint32) INT_MAX + 1)
 		elog(ERROR, "atomic_read_u32() #3 wrong: %u", pg_atomic_read_u32(&var));
 
 	expected = pg_atomic_sub_fetch_u32(&var, INT_MAX);
@@ -1018,8 +1018,8 @@ static void
 test_atomic_uint64(void)
 {
 	pg_atomic_uint64 var;
-	uint64 expected;
-	int i;
+	uint64		expected;
+	int			i;
 
 	pg_atomic_init_u64(&var, 0);
 
@@ -1083,13 +1083,13 @@ test_atomic_uint64(void)
 		elog(ERROR, "pg_atomic_fetch_and_u64() #1 wrong");
 
 	if (pg_atomic_fetch_and_u64(&var, ~1) != 1)
-		elog(ERROR, "pg_atomic_fetch_and_u64() #2 wrong: is "UINT64_FORMAT,
+		elog(ERROR, "pg_atomic_fetch_and_u64() #2 wrong: is " UINT64_FORMAT,
 			 pg_atomic_read_u64(&var));
 	/* no bits set anymore */
 	if (pg_atomic_fetch_and_u64(&var, ~0) != 0)
 		elog(ERROR, "pg_atomic_fetch_and_u64() #3 wrong");
 }
-#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
+#endif   /* PG_HAVE_ATOMIC_U64_SUPPORT */
 
 
 PG_FUNCTION_INFO_V1(test_atomic_ops);
diff --git a/src/test/ssl/ServerSetup.pm b/src/test/ssl/ServerSetup.pm
index fda3afebc18..4ce4a69e741 100644
--- a/src/test/ssl/ServerSetup.pm
+++ b/src/test/ssl/ServerSetup.pm
@@ -38,74 +38,81 @@ sub copy_files
 	foreach my $orig_file (@orig_files)
 	{
 		my $base_file = basename($orig_file);
-		copy($orig_file, "$dest/$base_file") or die "Could not copy $orig_file to $dest";
+		copy($orig_file, "$dest/$base_file")
+		  or die "Could not copy $orig_file to $dest";
 	}
 }
 
 sub configure_test_server_for_ssl
 {
-  my $tempdir = $_[0];
+	my $tempdir = $_[0];
 
-  # Create test users and databases
-  psql 'postgres', "CREATE USER ssltestuser";
-  psql 'postgres', "CREATE USER anotheruser";
-  psql 'postgres', "CREATE DATABASE trustdb";
-  psql 'postgres', "CREATE DATABASE certdb";
+	# Create test users and databases
+	psql 'postgres', "CREATE USER ssltestuser";
+	psql 'postgres', "CREATE USER anotheruser";
+	psql 'postgres', "CREATE DATABASE trustdb";
+	psql 'postgres', "CREATE DATABASE certdb";
 
-  # enable logging etc.
-  open CONF, ">>$tempdir/pgdata/postgresql.conf";
-  print CONF "fsync=off\n";
-  print CONF "log_connections=on\n";
-  print CONF "log_hostname=on\n";
-  print CONF "log_statement=all\n";
+	# enable logging etc.
+	open CONF, ">>$tempdir/pgdata/postgresql.conf";
+	print CONF "fsync=off\n";
+	print CONF "log_connections=on\n";
+	print CONF "log_hostname=on\n";
+	print CONF "log_statement=all\n";
 
-  # enable SSL and set up server key
-  print CONF "include 'sslconfig.conf'";
+	# enable SSL and set up server key
+	print CONF "include 'sslconfig.conf'";
 
-  close CONF;
+	close CONF;
 
-  # Copy all server certificates and keys, and client root cert, to the data dir
-  copy_files("ssl/server-*.crt", "$tempdir/pgdata");
-  copy_files("ssl/server-*.key", "$tempdir/pgdata");
-  system_or_bail "chmod 0600 '$tempdir'/pgdata/server-*.key";
-  copy_files("ssl/root+client_ca.crt", "$tempdir/pgdata");
-  copy_files("ssl/root+client.crl", "$tempdir/pgdata");
+# Copy all server certificates and keys, and client root cert, to the data dir
+	copy_files("ssl/server-*.crt", "$tempdir/pgdata");
+	copy_files("ssl/server-*.key", "$tempdir/pgdata");
+	system_or_bail "chmod 0600 '$tempdir'/pgdata/server-*.key";
+	copy_files("ssl/root+client_ca.crt", "$tempdir/pgdata");
+	copy_files("ssl/root+client.crl",    "$tempdir/pgdata");
 
   # Only accept SSL connections from localhost. Our tests don't depend on this
   # but seems best to keep it as narrow as possible for security reasons.
   #
   # When connecting to certdb, also check the client certificate.
-  open HBA, ">$tempdir/pgdata/pg_hba.conf";
-  print HBA "# TYPE  DATABASE        USER            ADDRESS                 METHOD\n";
-  print HBA "hostssl trustdb         ssltestuser     127.0.0.1/32            trust\n";
-  print HBA "hostssl trustdb         ssltestuser     ::1/128                 trust\n";
-  print HBA "hostssl certdb          ssltestuser     127.0.0.1/32            cert\n";
-  print HBA "hostssl certdb          ssltestuser     ::1/128                 cert\n";
-  close HBA;
+	open HBA, ">$tempdir/pgdata/pg_hba.conf";
+	print HBA
+"# TYPE  DATABASE        USER            ADDRESS                 METHOD\n";
+	print HBA
+"hostssl trustdb         ssltestuser     127.0.0.1/32            trust\n";
+	print HBA
+"hostssl trustdb         ssltestuser     ::1/128                 trust\n";
+	print HBA
+"hostssl certdb          ssltestuser     127.0.0.1/32            cert\n";
+	print HBA
+"hostssl certdb          ssltestuser     ::1/128                 cert\n";
+	close HBA;
 }
 
 # Change the configuration to use given server cert file, and restart
 # the server so that the configuration takes effect.
 sub switch_server_cert
 {
-  my $tempdir = $_[0];
-  my $certfile = $_[1];
-
-  diag "Restarting server with certfile \"$certfile\"...";
-
-  open SSLCONF, ">$tempdir/pgdata/sslconfig.conf";
-  print SSLCONF "ssl=on\n";
-  print SSLCONF "ssl_ca_file='root+client_ca.crt'\n";
-  print SSLCONF "ssl_cert_file='$certfile.crt'\n";
-  print SSLCONF "ssl_key_file='$certfile.key'\n";
-  print SSLCONF "ssl_crl_file='root+client.crl'\n";
-  close SSLCONF;
-
-  # Stop and restart server to reload the new config. We cannot use
-  # restart_test_server() because that overrides listen_addresses to only all
-  # Unix domain socket connections.
-
-  system_or_bail 'pg_ctl', 'stop', '-s', '-D', "$tempdir/pgdata", '-w';
-  system_or_bail 'pg_ctl', 'start', '-s', '-D', "$tempdir/pgdata", '-w', '-l',
-        "$tempdir/logfile";
+	my $tempdir  = $_[0];
+	my $certfile = $_[1];
+
+	diag "Restarting server with certfile \"$certfile\"...";
+
+	open SSLCONF, ">$tempdir/pgdata/sslconfig.conf";
+	print SSLCONF "ssl=on\n";
+	print SSLCONF "ssl_ca_file='root+client_ca.crt'\n";
+	print SSLCONF "ssl_cert_file='$certfile.crt'\n";
+	print SSLCONF "ssl_key_file='$certfile.key'\n";
+	print SSLCONF "ssl_crl_file='root+client.crl'\n";
+	close SSLCONF;
+
+   # Stop and restart server to reload the new config. We cannot use
+   # restart_test_server() because that overrides listen_addresses to only all
+   # Unix domain socket connections.
+
+	system_or_bail 'pg_ctl', 'stop',  '-s', '-D', "$tempdir/pgdata", '-w';
+	system_or_bail 'pg_ctl', 'start', '-s', '-D', "$tempdir/pgdata", '-w',
+	  '-l',
+	  "$tempdir/logfile";
 }
diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl
index b492a56688a..926b529198d 100644
--- a/src/test/ssl/t/001_ssltests.pl
+++ b/src/test/ssl/t/001_ssltests.pl
@@ -23,9 +23,10 @@ BEGIN
 # This is the hostname used to connect to the server. This cannot be a
 # hostname, because the server certificate is always for the domain
 # postgresql-ssl-regression.test.
-my $SERVERHOSTADDR='127.0.0.1';
+my $SERVERHOSTADDR = '127.0.0.1';
 
 my $tempdir = TestLib::tempdir;
+
 #my $tempdir = "tmp_check";
 
 
@@ -33,17 +34,17 @@ my $tempdir = TestLib::tempdir;
 
 my $common_connstr;
 
-sub run_test_psql {
-	my $connstr = $_[0];
+sub run_test_psql
+{
+	my $connstr   = $_[0];
 	my $logstring = $_[1];
 
-	my $cmd = [ 'psql',
-				'-A', '-t',
-				'-c', "SELECT 'connected with $connstr'",
-				'-d', "$connstr"
-		];
+	my $cmd = [
+		'psql', '-A', '-t', '-c', "SELECT 'connected with $connstr'",
+		'-d', "$connstr" ];
 
-    open CLIENTLOG, ">>$tempdir/client-log" or die "Could not open client-log file";
+	open CLIENTLOG, ">>$tempdir/client-log"
+	  or die "Could not open client-log file";
 	print CLIENTLOG "\n# Running test: $connstr $logstring\n";
 	close CLIENTLOG;
 
@@ -57,14 +58,17 @@ sub run_test_psql {
 # which also contains a libpq connection string.
 #
 # The second argument is a hostname to connect to.
-sub test_connect_ok {
+sub test_connect_ok
+{
 	my $connstr = $_[0];
 
-	my $result = run_test_psql("$common_connstr $connstr", "(should succeed)");
+	my $result =
+	  run_test_psql("$common_connstr $connstr", "(should succeed)");
 	ok($result, $connstr);
 }
 
-sub test_connect_fails {
+sub test_connect_fails
+{
 	my $connstr = $_[0];
 
 	my $result = run_test_psql("$common_connstr $connstr", "(should fail)");
@@ -91,7 +95,8 @@ switch_server_cert($tempdir, 'server-cn-only');
 
 diag "running client tests...";
 
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
 
 # The server should not accept non-SSL connections
 diag "test that the server doesn't accept non-SSL connections";
@@ -100,7 +105,7 @@ test_connect_fails("sslmode=disable");
 # Try without a root cert. In sslmode=require, this should work. In verify-ca
 # or verify-full mode it should fail
 diag "connect without server root cert";
-test_connect_ok   ("sslrootcert=invalid sslmode=require");
+test_connect_ok("sslrootcert=invalid sslmode=require");
 test_connect_fails("sslrootcert=invalid sslmode=verify-ca");
 test_connect_fails("sslrootcert=invalid sslmode=verify-full");
 
@@ -118,42 +123,50 @@ test_connect_fails("sslrootcert=ssl/server_ca.crt sslmode=verify-ca");
 
 # And finally, with the correct root cert.
 diag "connect with correct server CA cert file";
-test_connect_ok   ("sslrootcert=ssl/root+server_ca.crt sslmode=require");
-test_connect_ok   ("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca");
-test_connect_ok   ("sslrootcert=ssl/root+server_ca.crt sslmode=verify-full");
+test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=require");
+test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca");
+test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=verify-full");
 
 # Test with cert root file that contains two certificates. The client should
 # be able to pick the right one, regardless of the order in the file.
-test_connect_ok   ("sslrootcert=ssl/both-cas-1.crt sslmode=verify-ca");
-test_connect_ok   ("sslrootcert=ssl/both-cas-2.crt sslmode=verify-ca");
+test_connect_ok("sslrootcert=ssl/both-cas-1.crt sslmode=verify-ca");
+test_connect_ok("sslrootcert=ssl/both-cas-2.crt sslmode=verify-ca");
 
 diag "testing sslcrl option with a non-revoked cert";
 
 # Invalid CRL filename is the same as no CRL, succeeds
-test_connect_ok   ("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=invalid");
+test_connect_ok(
+	"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=invalid");
+
 # A CRL belonging to a different CA is not accepted, fails
-test_connect_fails("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/client.crl");
+test_connect_fails(
+"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/client.crl");
+
 # With the correct CRL, succeeds (this cert is not revoked)
-test_connect_ok   ("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl");
+test_connect_ok(
+"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl"
+);
 
 # Check that connecting with verify-full fails, when the hostname doesn't
 # match the hostname in the server's certificate.
 diag "test mismatch between hostname and server certificate";
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
 
-test_connect_ok   ("sslmode=require host=wronghost.test");
-test_connect_ok   ("sslmode=verify-ca host=wronghost.test");
+test_connect_ok("sslmode=require host=wronghost.test");
+test_connect_ok("sslmode=verify-ca host=wronghost.test");
 test_connect_fails("sslmode=verify-full host=wronghost.test");
 
 # Test Subject Alternative Names.
 switch_server_cert($tempdir, 'server-multiple-alt-names');
 
 diag "test hostname matching with X509 Subject Alternative Names";
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
 
-test_connect_ok   ("host=dns1.alt-name.pg-ssltest.test");
-test_connect_ok   ("host=dns2.alt-name.pg-ssltest.test");
-test_connect_ok   ("host=foo.wildcard.pg-ssltest.test");
+test_connect_ok("host=dns1.alt-name.pg-ssltest.test");
+test_connect_ok("host=dns2.alt-name.pg-ssltest.test");
+test_connect_ok("host=foo.wildcard.pg-ssltest.test");
 
 test_connect_fails("host=wronghost.alt-name.pg-ssltest.test");
 test_connect_fails("host=deep.subdomain.wildcard.pg-ssltest.test");
@@ -163,9 +176,10 @@ test_connect_fails("host=deep.subdomain.wildcard.pg-ssltest.test");
 switch_server_cert($tempdir, 'server-single-alt-name');
 
 diag "test hostname matching with a single X509 Subject Alternative Name";
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
 
-test_connect_ok   ("host=single.alt-name.pg-ssltest.test");
+test_connect_ok("host=single.alt-name.pg-ssltest.test");
 
 test_connect_fails("host=wronghost.alt-name.pg-ssltest.test");
 test_connect_fails("host=deep.subdomain.wildcard.pg-ssltest.test");
@@ -175,48 +189,58 @@ test_connect_fails("host=deep.subdomain.wildcard.pg-ssltest.test");
 switch_server_cert($tempdir, 'server-cn-and-alt-names');
 
 diag "test certificate with both a CN and SANs";
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR sslmode=verify-full";
 
-test_connect_ok   ("host=dns1.alt-name.pg-ssltest.test");
-test_connect_ok   ("host=dns2.alt-name.pg-ssltest.test");
+test_connect_ok("host=dns1.alt-name.pg-ssltest.test");
+test_connect_ok("host=dns2.alt-name.pg-ssltest.test");
 test_connect_fails("host=common-name.pg-ssltest.test");
 
 # Finally, test a server certificate that has no CN or SANs. Of course, that's
 # not a very sensible certificate, but libpq should handle it gracefully.
 switch_server_cert($tempdir, 'server-no-names');
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR";
 
-test_connect_ok   ("sslmode=verify-ca host=common-name.pg-ssltest.test");
+test_connect_ok("sslmode=verify-ca host=common-name.pg-ssltest.test");
 test_connect_fails("sslmode=verify-full host=common-name.pg-ssltest.test");
 
 # Test that the CRL works
 diag "Testing client-side CRL";
 switch_server_cert($tempdir, 'server-revoked');
 
-$common_connstr="user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
+$common_connstr =
+"user=ssltestuser dbname=trustdb sslcert=invalid hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
 
 # Without the CRL, succeeds. With it, fails.
-test_connect_ok   ("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca");
-test_connect_fails("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl");
+test_connect_ok("sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca");
+test_connect_fails(
+"sslrootcert=ssl/root+server_ca.crt sslmode=verify-ca sslcrl=ssl/root+server.crl"
+);
 
 ### Part 2. Server-side tests.
 ###
 ### Test certificate authorization.
 
 diag "Testing certificate authorization...";
-$common_connstr="sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR";
+$common_connstr =
+"sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR";
 
 # no client cert
 test_connect_fails("user=ssltestuser sslcert=invalid");
 
 # correct client cert
-test_connect_ok   ("user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client.key");
+test_connect_ok(
+	"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client.key");
 
 # client cert belonging to another user
-test_connect_fails("user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client.key");
+test_connect_fails(
+	"user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client.key");
 
 # revoked client cert
-test_connect_fails("user=ssltestuser sslcert=ssl/client-revoked.crt sslkey=ssl/client-revoked.key");
+test_connect_fails(
+"user=ssltestuser sslcert=ssl/client-revoked.crt sslkey=ssl/client-revoked.key"
+);
 
 
 # All done! Save the log, before the temporary installation is deleted
diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm
index b617835c0c5..b592f997f6c 100644
--- a/src/tools/msvc/Install.pm
+++ b/src/tools/msvc/Install.pm
@@ -306,6 +306,7 @@ sub CopySolutionOutput
 			}
 			else    # 'StaticLibrary'
 			{
+
 				# Static lib, such as libpgport, only used internally
 				# during build, don't install.
 				next;
@@ -438,6 +439,7 @@ sub CopyContribFiles
 		opendir($D, $subdir) || croak "Could not opendir on $subdir!\n";
 		while (my $d = readdir($D))
 		{
+
 			# These configuration-based exclusions must match vcregress.pl
 			next if ($d eq "uuid-ossp"       && !defined($config->{uuid}));
 			next if ($d eq "sslinfo"         && !defined($config->{openssl}));
@@ -463,7 +465,7 @@ sub CopySubdirFiles
 	return if ($module =~ /^\./);
 	return unless (-f "$subdir/$module/Makefile");
 	return
-		  if ($insttype eq "client" && !grep { $_ eq $module } @client_contribs);
+	  if ($insttype eq "client" && !grep { $_ eq $module } @client_contribs);
 
 	my $mf = read_file("$subdir/$module/Makefile");
 	$mf =~ s{\\\r?\n}{}g;
@@ -480,18 +482,17 @@ sub CopySubdirFiles
 
 		foreach my $f (split /\s+/, $flist)
 		{
-			lcopy(
-					"$subdir/$module/$f.control",
-					"$target/share/extension/$f.control"
-				) || croak("Could not copy file $f.control in contrib $module");
-				print '.';
+			lcopy("$subdir/$module/$f.control",
+				"$target/share/extension/$f.control")
+			  || croak("Could not copy file $f.control in contrib $module");
+			print '.';
 		}
 	}
 
 	$flist = '';
 	if ($mf =~ /^DATA_built\s*=\s*(.*)$/m) { $flist .= $1 }
 	if ($mf =~ /^DATA\s*=\s*(.*)$/m)       { $flist .= " $1" }
-	$flist =~ s/^\s*//;  # Remove leading spaces if we had only DATA_built
+	$flist =~ s/^\s*//;    # Remove leading spaces if we had only DATA_built
 
 	if ($flist ne '')
 	{
@@ -500,9 +501,9 @@ sub CopySubdirFiles
 		foreach my $f (split /\s+/, $flist)
 		{
 			lcopy("$subdir/$module/$f",
-				  "$target/share/$moduledir/" . basename($f))
-				  || croak("Could not copy file $f in contrib $module");
-				print '.';
+				"$target/share/$moduledir/" . basename($f))
+			  || croak("Could not copy file $f in contrib $module");
+			print '.';
 		}
 	}
 
@@ -533,8 +534,7 @@ sub CopySubdirFiles
 		  if ($module eq 'spi');
 		foreach my $f (split /\s+/, $flist)
 		{
-			lcopy("$subdir/$module/$f",
-				  "$target/doc/$moduledir/$f")
+			lcopy("$subdir/$module/$f", "$target/doc/$moduledir/$f")
 			  || croak("Could not copy file $f in contrib $module");
 			print '.';
 		}
diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm
index be06898d1ae..0603130c580 100644
--- a/src/tools/msvc/Mkvcbuild.pm
+++ b/src/tools/msvc/Mkvcbuild.pm
@@ -30,33 +30,30 @@ my $libpq;
 
 # Set of variables for modules in contrib/ and src/test/modules/
 my $contrib_defines = { 'refint' => 'REFINT_VERBOSE' };
-my @contrib_uselibpq =
-  ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo');
-my @contrib_uselibpgport = (
-	'oid2name',
-	'pg_standby',
-	'vacuumlo');
-my @contrib_uselibpgcommon = (
-	'oid2name',
-	'pg_standby',
-	'vacuumlo');
-my $contrib_extralibs = undef;
+my @contrib_uselibpq = ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo');
+my @contrib_uselibpgport   = ('oid2name', 'pg_standby', 'vacuumlo');
+my @contrib_uselibpgcommon = ('oid2name', 'pg_standby', 'vacuumlo');
+my $contrib_extralibs      = undef;
 my $contrib_extraincludes =
   { 'tsearch2' => ['contrib/tsearch2'], 'dblink' => ['src/backend'] };
 my $contrib_extrasource = {
 	'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ],
-	'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], };
+	'seg'  => [ 'contrib/seg/segscan.l',   'contrib/seg/segparse.y' ], };
 my @contrib_excludes = (
-	'commit_ts', 'hstore_plperl',
-	'hstore_plpython', 'intagg',
-	'ltree_plpython', 'pgcrypto',
-	'sepgsql');
+	'commit_ts',      'hstore_plperl', 'hstore_plpython', 'intagg',
+	'ltree_plpython', 'pgcrypto',      'sepgsql');
 
 # Set of variables for frontend modules
 my $frontend_defines = { 'initdb' => 'FRONTEND' };
 my @frontend_uselibpq = ('pg_ctl', 'pg_upgrade', 'pgbench', 'psql');
-my @frontend_uselibpgport = ( 'pg_archivecleanup', 'pg_test_fsync', 'pg_test_timing', 'pg_upgrade', 'pg_xlogdump', 'pgbench' );
-my @frontend_uselibpgcommon = ( 'pg_archivecleanup', 'pg_test_fsync', 'pg_test_timing', 'pg_upgrade', 'pg_xlogdump', 'pgbench' );
+my @frontend_uselibpgport = (
+	'pg_archivecleanup', 'pg_test_fsync',
+	'pg_test_timing',    'pg_upgrade',
+	'pg_xlogdump',       'pgbench');
+my @frontend_uselibpgcommon = (
+	'pg_archivecleanup', 'pg_test_fsync',
+	'pg_test_timing',    'pg_upgrade',
+	'pg_xlogdump',       'pgbench');
 my $frontend_extralibs = {
 	'initdb'     => ['ws2_32.lib'],
 	'pg_restore' => ['ws2_32.lib'],
@@ -68,10 +65,10 @@ my $frontend_extraincludes = {
 my $frontend_extrasource = {
 	'psql' => ['src/bin/psql/psqlscan.l'],
 	'pgbench' =>
-		[ 'src/bin/pgbench/exprscan.l', 'src/bin/pgbench/exprparse.y' ],
-};
-my @frontend_excludes =
-  ('pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump', 'pg_xlogdump', 'scripts');
+	  [ 'src/bin/pgbench/exprscan.l', 'src/bin/pgbench/exprparse.y' ], };
+my @frontend_excludes = (
+	'pgevent',     'pg_basebackup', 'pg_rewind', 'pg_dump',
+	'pg_xlogdump', 'scripts');
 
 sub mkvcbuild
 {
@@ -104,15 +101,16 @@ sub mkvcbuild
 	}
 	else
 	{
-		push(@pgportfiles, 'pg_crc32c_sb8.c')
+		push(@pgportfiles, 'pg_crc32c_sb8.c');
 	}
 
 	our @pgcommonallfiles = qw(
 	  exec.c pg_lzcompress.c pgfnames.c psprintf.c relpath.c rmtree.c
 	  string.c username.c wait_error.c);
 
-	our @pgcommonfrontendfiles = (@pgcommonallfiles, qw(fe_memutils.c
-	  restricted_token.c));
+	our @pgcommonfrontendfiles = (
+		@pgcommonallfiles, qw(fe_memutils.c
+		  restricted_token.c));
 
 	our @pgcommonbkndfiles = @pgcommonallfiles;
 
@@ -467,15 +465,16 @@ sub mkvcbuild
 	# ltree_plpython and hstore_plperl.
 	if ($solution->{options}->{python})
 	{
+
 		# Attempt to get python version and location.
 		# Assume python.exe in specified dir.
-		my $pythonprog = "import sys;print(sys.prefix);" .
-		  "print(str(sys.version_info[0])+str(sys.version_info[1]))";
-		my $prefixcmd = $solution->{options}->{python}
-			  . "\\python -c \"$pythonprog\"";
+		my $pythonprog = "import sys;print(sys.prefix);"
+		  . "print(str(sys.version_info[0])+str(sys.version_info[1]))";
+		my $prefixcmd =
+		  $solution->{options}->{python} . "\\python -c \"$pythonprog\"";
 		my $pyout = `$prefixcmd`;
 		die "Could not query for python version!\n" if $?;
-		my ($pyprefix,$pyver) = split(/\r?\n/,$pyout);
+		my ($pyprefix, $pyver) = split(/\r?\n/, $pyout);
 
 		# Sometimes (always?) if python is not present, the execution
 		# appears to work, but gives no data...
@@ -490,16 +489,14 @@ sub mkvcbuild
 		$plpython->AddReference($postgres);
 
 		# Add transform modules dependent on plpython
-		AddTransformModule('hstore_plpython' . $pymajorver,
-						   'contrib/hstore_plpython',
-						   'plpython' . $pymajorver,
-						   'src/pl/plpython', 'hstore',
-						   'contrib/hstore');
-		AddTransformModule('ltree_plpython' . $pymajorver,
-						   'contrib/ltree_plpython',
-						   'plpython' . $pymajorver,
-						   'src/pl/plpython', 'ltree',
-						   'contrib/ltree');
+		AddTransformModule(
+			'hstore_plpython' . $pymajorver, 'contrib/hstore_plpython',
+			'plpython' . $pymajorver,        'src/pl/plpython',
+			'hstore',                        'contrib/hstore');
+		AddTransformModule(
+			'ltree_plpython' . $pymajorver, 'contrib/ltree_plpython',
+			'plpython' . $pymajorver,       'src/pl/plpython',
+			'ltree',                        'contrib/ltree');
 	}
 
 	if ($solution->{options}->{perl})
@@ -587,10 +584,10 @@ sub mkvcbuild
 		}
 
 		# Add transform module dependent on plperl
-		my $hstore_plperl =
-		  AddTransformModule('hstore_plperl', 'contrib/hstore_plperl',
-							 'plperl', 'src/pl/plperl',
-							 'hstore', 'contrib/hstore');
+		my $hstore_plperl = AddTransformModule(
+			'hstore_plperl', 'contrib/hstore_plperl',
+			'plperl',        'src/pl/plperl',
+			'hstore',        'contrib/hstore');
 		$hstore_plperl->AddDefine('PLPERL_HAVE_UID_GID');
 	}
 
@@ -670,7 +667,7 @@ sub mkvcbuild
 	$pg_xlogdump->AddDefine('FRONTEND');
 	foreach my $xf (glob('src/backend/access/rmgrdesc/*desc.c'))
 	{
-		$pg_xlogdump->AddFile($xf)
+		$pg_xlogdump->AddFile($xf);
 	}
 	$pg_xlogdump->AddFile('src/backend/access/transam/xlogreader.c');
 
@@ -706,12 +703,12 @@ sub AddSimpleFrontend
 # Add a simple transform module
 sub AddTransformModule
 {
-	my $n = shift;
-	my $n_src = shift;
-	my $pl_proj_name = shift;
-	my $pl_src = shift;
+	my $n              = shift;
+	my $n_src          = shift;
+	my $pl_proj_name   = shift;
+	my $pl_src         = shift;
 	my $transform_name = shift;
-	my $transform_src = shift;
+	my $transform_src  = shift;
 
 	my $transform_proj = undef;
 	foreach my $proj (@{ $solution->{projects}->{'contrib'} })
@@ -723,7 +720,7 @@ sub AddTransformModule
 		}
 	}
 	die "could not find base module $transform_name for transform module $n"
-		if (!defined($transform_proj));
+	  if (!defined($transform_proj));
 
 	my $pl_proj = undef;
 	foreach my $proj (@{ $solution->{projects}->{'PLs'} })
@@ -735,7 +732,7 @@ sub AddTransformModule
 		}
 	}
 	die "could not find PL $pl_proj_name for transform module $n"
-		if (!defined($pl_proj));
+	  if (!defined($pl_proj));
 
 	my $p = $solution->AddProject($n, 'dll', 'contrib', $n_src);
 	for my $file (glob("$n_src/*.c"))
@@ -748,7 +745,7 @@ sub AddTransformModule
 	$p->AddIncludeDir($pl_src);
 	$p->AddReference($pl_proj);
 	$p->AddIncludeDir($pl_proj->{includes});
-	foreach my $pl_lib (@{$pl_proj->{libraries}})
+	foreach my $pl_lib (@{ $pl_proj->{libraries} })
 	{
 		$p->AddLibrary($pl_lib);
 	}
@@ -756,7 +753,7 @@ sub AddTransformModule
 	# Add base module dependencies
 	$p->AddIncludeDir($transform_src);
 	$p->AddIncludeDir($transform_proj->{includes});
-	foreach my $trans_lib (@{$transform_proj->{libraries}})
+	foreach my $trans_lib (@{ $transform_proj->{libraries} })
 	{
 		$p->AddLibrary($trans_lib);
 	}
@@ -769,14 +766,13 @@ sub AddTransformModule
 sub AddContrib
 {
 	my $subdir = shift;
-	my $n  = shift;
-	my $mf = Project::read_file("$subdir/$n/Makefile");
+	my $n      = shift;
+	my $mf     = Project::read_file("$subdir/$n/Makefile");
 
 	if ($mf =~ /^MODULE_big\s*=\s*(.*)$/mg)
 	{
 		my $dn = $1;
-		my $proj =
-		  $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n");
+		my $proj = $solution->AddProject($dn, 'dll', 'contrib', "$subdir/$n");
 		$proj->AddReference($postgres);
 		AdjustContribProj($proj);
 	}
@@ -794,8 +790,7 @@ sub AddContrib
 	}
 	elsif ($mf =~ /^PROGRAM\s*=\s*(.*)$/mg)
 	{
-		my $proj =
-		  $solution->AddProject($1, 'exe', 'contrib', "$subdir/$n");
+		my $proj = $solution->AddProject($1, 'exe', 'contrib', "$subdir/$n");
 		AdjustContribProj($proj);
 	}
 	else
@@ -841,7 +836,7 @@ sub GenerateContribSqlFiles
 				print "Building $out from $in (contrib/$n)...\n";
 				my $cont = Project::read_file("contrib/$n/$in");
 				my $dn   = $out;
-				$dn =~ s/\.sql$//;
+				$dn   =~ s/\.sql$//;
 				$cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g;
 				my $o;
 				open($o, ">contrib/$n/$out")
@@ -866,10 +861,11 @@ sub AdjustContribProj
 sub AdjustFrontendProj
 {
 	my $proj = shift;
-	AdjustModule($proj, $frontend_defines, \@frontend_uselibpq,
-		\@frontend_uselibpgport, \@frontend_uselibpgcommon,
-		$frontend_extralibs,
-		$frontend_extrasource, $frontend_extraincludes);
+	AdjustModule(
+		$proj,                     $frontend_defines,
+		\@frontend_uselibpq,       \@frontend_uselibpgport,
+		\@frontend_uselibpgcommon, $frontend_extralibs,
+		$frontend_extrasource,     $frontend_extraincludes);
 }
 
 sub AdjustModule
diff --git a/src/tools/msvc/Project.pm b/src/tools/msvc/Project.pm
index 362beb4218a..4ce09418537 100644
--- a/src/tools/msvc/Project.pm
+++ b/src/tools/msvc/Project.pm
@@ -63,6 +63,7 @@ sub ReplaceFile
 
 	foreach my $file (keys %{ $self->{files} })
 	{
+
 		# Match complete filename
 		if ($filename =~ m!/!)
 		{
diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm
index db95afa14ba..6b16e69b690 100644
--- a/src/tools/msvc/Solution.pm
+++ b/src/tools/msvc/Solution.pm
@@ -36,7 +36,7 @@ sub _new
 	$options->{float8byval} = ($bits == 64)
 	  unless exists $options->{float8byval};
 	die "float8byval not permitted on 32 bit platforms"
-	  if  $options->{float8byval} && $bits == 32;
+	  if $options->{float8byval} && $bits == 32;
 	if ($options->{xml})
 	{
 		if (!($options->{xslt} && $options->{iconv}))
@@ -143,16 +143,13 @@ sub GenerateFiles
 	confess "Unable to parse configure.in for all variables!"
 	  if ($self->{strver} eq '' || $self->{numver} eq '');
 
-	if (IsNewer(
-			"src/include/pg_config_os.h", "src/include/port/win32.h"))
+	if (IsNewer("src/include/pg_config_os.h", "src/include/port/win32.h"))
 	{
 		print "Copying pg_config_os.h...\n";
-		copyFile("src/include/port/win32.h",
-			"src/include/pg_config_os.h");
+		copyFile("src/include/port/win32.h", "src/include/pg_config_os.h");
 	}
 
-	if (IsNewer(
-			"src/include/pg_config.h", "src/include/pg_config.h.win32"))
+	if (IsNewer("src/include/pg_config.h", "src/include/pg_config.h.win32"))
 	{
 		print "Generating pg_config.h...\n";
 		open(I, "src/include/pg_config.h.win32")
@@ -165,7 +162,7 @@ sub GenerateFiles
 		{
 			s{PG_VERSION "[^"]+"}{PG_VERSION "$self->{strver}$extraver"};
 			s{PG_VERSION_NUM \d+}{PG_VERSION_NUM $self->{numver}};
-			s{PG_VERSION_STR "[^"]+"}{__STRINGIFY(x) #x\n#define __STRINGIFY2(z) __STRINGIFY(z)\n#define PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " __STRINGIFY2(_MSC_VER) ", $bits-bit"};
+s{PG_VERSION_STR "[^"]+"}{__STRINGIFY(x) #x\n#define __STRINGIFY2(z) __STRINGIFY(z)\n#define PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " __STRINGIFY2(_MSC_VER) ", $bits-bit"};
 			print O;
 		}
 		print O "#define PG_MAJORVERSION \"$self->{majorver}\"\n";
@@ -177,10 +174,10 @@ sub GenerateFiles
 		  if ($self->{options}->{asserts});
 		print O "#define USE_INTEGER_DATETIMES 1\n"
 		  if ($self->{options}->{integer_datetimes});
-		print O "#define USE_LDAP 1\n"   if ($self->{options}->{ldap});
-		print O "#define HAVE_LIBZ 1\n"  if ($self->{options}->{zlib});
+		print O "#define USE_LDAP 1\n"    if ($self->{options}->{ldap});
+		print O "#define HAVE_LIBZ 1\n"   if ($self->{options}->{zlib});
 		print O "#define USE_OPENSSL 1\n" if ($self->{options}->{openssl});
-		print O "#define ENABLE_NLS 1\n" if ($self->{options}->{nls});
+		print O "#define ENABLE_NLS 1\n"  if ($self->{options}->{nls});
 
 		print O "#define BLCKSZ ", 1024 * $self->{options}->{blocksize}, "\n";
 		print O "#define RELSEG_SIZE ",
diff --git a/src/tools/msvc/VCBuildProject.pm b/src/tools/msvc/VCBuildProject.pm
index 3a24c4e52fa..a8d75d88f31 100644
--- a/src/tools/msvc/VCBuildProject.pm
+++ b/src/tools/msvc/VCBuildProject.pm
@@ -112,7 +112,7 @@ EOF
 			my $of = $fileNameWithPath;
 			$of =~ s/\.y$/.c/;
 			$of =~
-s{^src/pl/plpgsql/src/gram.c$}{src/pl/plpgsql/src/pl_gram.c};
+			  s{^src/pl/plpgsql/src/gram.c$}{src/pl/plpgsql/src/pl_gram.c};
 			print $f '>'
 			  . $self->GenerateCustomTool(
 				'Running bison on ' . $fileNameWithPath,
diff --git a/src/tools/msvc/VSObjectFactory.pm b/src/tools/msvc/VSObjectFactory.pm
index b83af4026eb..fee4684b21d 100644
--- a/src/tools/msvc/VSObjectFactory.pm
+++ b/src/tools/msvc/VSObjectFactory.pm
@@ -92,11 +92,14 @@ sub CreateProject
 
 sub DetermineVisualStudioVersion
 {
+
 	# To determine version of Visual Studio we use nmake as it has
 	# existed for a long time and still exists in current Visual
 	# Studio versions.
 	my $output = `nmake /? 2>&1`;
-	$? >> 8 == 0 or croak "Unable to determine Visual Studio version: The nmake command wasn't found.";
+	$? >> 8 == 0
+	  or croak
+"Unable to determine Visual Studio version: The nmake command wasn't found.";
 	if ($output =~ /(\d+)\.(\d+)\.\d+(\.\d+)?$/m)
 	{
 		return _GetVisualStudioVersion($1, $2);
diff --git a/src/tools/msvc/config_default.pl b/src/tools/msvc/config_default.pl
index 0bee0c0e2d9..b9f2ff41f73 100644
--- a/src/tools/msvc/config_default.pl
+++ b/src/tools/msvc/config_default.pl
@@ -7,24 +7,24 @@ our $config = {
 	  # integer_datetimes=>1,   # --enable-integer-datetimes - on is now default
 	  # float4byval=>1,         # --disable-float4-byval, on by default
 
-	  # float8byval=> $platformbits == 64, # --disable-float8-byval,
-	  # off by default on 32 bit platforms, on by default on 64 bit platforms
+	# float8byval=> $platformbits == 64, # --disable-float8-byval,
+	# off by default on 32 bit platforms, on by default on 64 bit platforms
 
-	  # blocksize => 8,         # --with-blocksize, 8kB by default
-	  # wal_blocksize => 8,     # --with-wal-blocksize, 8kB by default
-	  # wal_segsize => 16,      # --with-wal-segsize, 16MB by default
-	ldap     => 1,       # --with-ldap
-	extraver => undef,   # --with-extra-version=<string>
-	nls      => undef,   # --enable-nls=<path>
-	tcl      => undef,   # --with-tls=<path>
-	perl     => undef,   # --with-perl
-	python   => undef,   # --with-python=<path>
-	openssl  => undef,   # --with-openssl=<path>
-	uuid     => undef,   # --with-ossp-uuid
-	xml      => undef,   # --with-libxml=<path>
-	xslt     => undef,   # --with-libxslt=<path>
-	iconv    => undef,   # (not in configure, path to iconv)
-	zlib     => undef    # --with-zlib=<path>
+	# blocksize => 8,         # --with-blocksize, 8kB by default
+	# wal_blocksize => 8,     # --with-wal-blocksize, 8kB by default
+	# wal_segsize => 16,      # --with-wal-segsize, 16MB by default
+	ldap     => 1,        # --with-ldap
+	extraver => undef,    # --with-extra-version=<string>
+	nls      => undef,    # --enable-nls=<path>
+	tcl      => undef,    # --with-tls=<path>
+	perl     => undef,    # --with-perl
+	python   => undef,    # --with-python=<path>
+	openssl  => undef,    # --with-openssl=<path>
+	uuid     => undef,    # --with-ossp-uuid
+	xml      => undef,    # --with-libxml=<path>
+	xslt     => undef,    # --with-libxslt=<path>
+	iconv    => undef,    # (not in configure, path to iconv)
+	zlib     => undef     # --with-zlib=<path>
 };
 
 1;
diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl
index bfa8a3dc909..ddb628d1545 100644
--- a/src/tools/msvc/vcregress.pl
+++ b/src/tools/msvc/vcregress.pl
@@ -15,7 +15,7 @@ my $startdir = getcwd();
 
 chdir "../../.." if (-d "../../../src/tools/msvc");
 
-my $topdir = getcwd();
+my $topdir         = getcwd();
 my $tmp_installdir = "$topdir/tmp_install";
 
 require 'src/tools/msvc/config_default.pl';
@@ -230,11 +230,11 @@ sub subdircheck
 {
 	my $subdir = shift;
 	my $module = shift;
-	my $mstat = 0;
+	my $mstat  = 0;
 
-	if ( ! -d "$module/sql" ||
-		 ! -d "$module/expected" ||
-		 ( ! -f "$module/GNUmakefile" && ! -f "$module/Makefile"))
+	if (   !-d "$module/sql"
+		|| !-d "$module/expected"
+		|| (!-f "$module/GNUmakefile" && !-f "$module/Makefile"))
 	{
 		return;
 	}
@@ -246,19 +246,17 @@ sub subdircheck
 	# Add some options for transform modules, see their respective
 	# Makefile for more details regarding Python-version specific
 	# dependencies.
-	if ($module eq "hstore_plpython" ||
-		$module eq "ltree_plpython")
+	if (   $module eq "hstore_plpython"
+		|| $module eq "ltree_plpython")
 	{
 		die "Python not enabled in configuration"
-			if !defined($config->{python});
+		  if !defined($config->{python});
 
 		# Attempt to get python version and location.
 		# Assume python.exe in specified dir.
-		my $pythonprog = "import sys;" .
-		  "print(str(sys.version_info[0]))";
-		my $prefixcmd = $config->{python}
-			  . "\\python -c \"$pythonprog\"";
-		my $pyver = `$prefixcmd`;
+		my $pythonprog = "import sys;" . "print(str(sys.version_info[0]))";
+		my $prefixcmd  = $config->{python} . "\\python -c \"$pythonprog\"";
+		my $pyver      = `$prefixcmd`;
 		die "Could not query for python version!\n" if $?;
 		chomp($pyver);
 		if ($pyver eq "2")
@@ -268,6 +266,7 @@ sub subdircheck
 		}
 		else
 		{
+
 			# disable tests on python3 for now.
 			chdir "..";
 			return;
@@ -275,10 +274,9 @@ sub subdircheck
 	}
 
 
-	print
-	  "============================================================\n";
+	print "============================================================\n";
 	print "Checking $module\n";
-	my @args  = (
+	my @args = (
 		"${tmp_installdir}/bin/pg_regress",
 		"--bindir=${tmp_installdir}/bin",
 		"--dbname=contrib_regression", @opts, @tests);
@@ -295,11 +293,12 @@ sub contribcheck
 	chdir "$topdir/contrib";
 	foreach my $module (glob("*"))
 	{
+
 		# these configuration-based exclusions must match Install.pm
-		next if ($module eq "uuid-ossp"       && !defined($config->{uuid}));
-		next if ($module eq "sslinfo"         && !defined($config->{openssl}));
-		next if ($module eq "xml2"            && !defined($config->{xml}));
-		next if ($module eq "hstore_plperl"   && !defined($config->{perl}));
+		next if ($module eq "uuid-ossp"     && !defined($config->{uuid}));
+		next if ($module eq "sslinfo"       && !defined($config->{openssl}));
+		next if ($module eq "xml2"          && !defined($config->{xml}));
+		next if ($module eq "hstore_plperl" && !defined($config->{perl}));
 		next if ($module eq "hstore_plpython" && !defined($config->{python}));
 		next if ($module eq "ltree_plpython"  && !defined($config->{python}));
 		next if ($module eq "sepgsql");
@@ -412,6 +411,7 @@ sub fetchRegressOpts
 	$m =~ s{\\\r?\n}{}g;
 	if ($m =~ /^\s*REGRESS_OPTS\s*\+?=(.*)/m)
 	{
+
 		# Substitute known Makefile variables, then ignore options that retain
 		# an unhandled variable reference.  Ignore anything that isn't an
 		# option starting with "--".
@@ -492,6 +492,6 @@ sub usage
 {
 	print STDERR
 	  "Usage: vcregress.pl ",
-	  "<check|installcheck|plcheck|contribcheck|isolationcheck|ecpgcheck|upgradecheck> [schedule]\n";
+"<check|installcheck|plcheck|contribcheck|isolationcheck|ecpgcheck|upgradecheck> [schedule]\n";
 	exit(1);
 }
-- 
GitLab