diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index 7c8a777b33905ebbb476f62fb4a0a391ea144b9d..ecf9a03318044a438b6549a7555faf1b0c723603 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -281,7 +281,7 @@ CreateInitDecodingContext(char *plugin,
 	LWLockRelease(ProcArrayLock);
 
 	/*
-	 * tell the snapshot builder to only assemble snapshot once reaching the a
+	 * tell the snapshot builder to only assemble snapshot once reaching the
 	 * running_xact's record with the respective xmin.
 	 */
 	xmin_horizon = slot->data.catalog_xmin;
@@ -880,7 +880,7 @@ LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn, XLogRecPtr restart
 }
 
 /*
- * Handle a consumer's conformation having received all changes up to lsn.
+ * Handle a consumer's confirmation having received all changes up to lsn.
  */
 void
 LogicalConfirmReceivedLocation(XLogRecPtr lsn)
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 12e01edb3f7c3e73c38aa23fc202222530558e47..77375d91b28c69b8d352a373983e73722b7028aa 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -466,8 +466,8 @@ ReorderBufferGetTupleBuf(ReorderBuffer *rb, Size tuple_len)
 	/*
 	 * Most tuples are below MaxHeapTupleSize, so we use a slab allocator for
 	 * those. Thus always allocate at least MaxHeapTupleSize. Note that tuples
-	 * tuples generated for oldtuples can be bigger, as they don't have
-	 * out-of-line toast columns.
+	 * generated for oldtuples can be bigger, as they don't have out-of-line
+	 * toast columns.
 	 */
 	if (alloc_len < MaxHeapTupleSize)
 		alloc_len = MaxHeapTupleSize;
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index b4dc617a8c974e5d4ab719806dadc861e6e23818..b5fa3dbbc0bb7a4be872087933fb386348a88477 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -901,7 +901,7 @@ SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid)
 	/*
 	 * NB: This handles subtransactions correctly even if we started from
 	 * suboverflowed xl_running_xacts because we only keep track of toplevel
-	 * transactions. Since the latter are always are allocated before their
+	 * transactions. Since the latter are always allocated before their
 	 * subxids and since they end at the same time it's sufficient to deal
 	 * with them here.
 	 */
@@ -981,7 +981,7 @@ SnapBuildCommitTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid,
 		 * we reached consistency.
 		 */
 		forced_timetravel = true;
-		elog(DEBUG1, "forced to assume catalog changes for xid %u because it was running to early", xid);
+		elog(DEBUG1, "forced to assume catalog changes for xid %u because it was running too early", xid);
 	}
 
 	for (nxact = 0; nxact < nsubxacts; nxact++)