diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index 98d4f1eca76cd09d85550f4d23882296d49c87d7..646df087f9fcbaa48ff8fbe677f544e5877c6519 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -29,6 +29,7 @@
 #include "executor/nodeForeignscan.h"
 #include "executor/nodeSeqscan.h"
 #include "executor/nodeIndexscan.h"
+#include "executor/nodeIndexonlyscan.h"
 #include "executor/tqueue.h"
 #include "nodes/nodeFuncs.h"
 #include "optimizer/planmain.h"
@@ -202,6 +203,10 @@ ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
 				ExecIndexScanEstimate((IndexScanState *) planstate,
 									  e->pcxt);
 				break;
+			case T_IndexOnlyScanState:
+				ExecIndexOnlyScanEstimate((IndexOnlyScanState *) planstate,
+										  e->pcxt);
+				break;
 			case T_ForeignScanState:
 				ExecForeignScanEstimate((ForeignScanState *) planstate,
 										e->pcxt);
@@ -258,6 +263,10 @@ ExecParallelInitializeDSM(PlanState *planstate,
 				ExecIndexScanInitializeDSM((IndexScanState *) planstate,
 										   d->pcxt);
 				break;
+			case T_IndexOnlyScanState:
+				ExecIndexOnlyScanInitializeDSM((IndexOnlyScanState *) planstate,
+											   d->pcxt);
+				break;
 			case T_ForeignScanState:
 				ExecForeignScanInitializeDSM((ForeignScanState *) planstate,
 											 d->pcxt);
@@ -737,6 +746,9 @@ ExecParallelInitializeWorker(PlanState *planstate, shm_toc *toc)
 			case T_IndexScanState:
 				ExecIndexScanInitializeWorker((IndexScanState *) planstate, toc);
 				break;
+			case T_IndexOnlyScanState:
+				ExecIndexOnlyScanInitializeWorker((IndexOnlyScanState *) planstate, toc);
+				break;
 			case T_ForeignScanState:
 				ExecForeignScanInitializeWorker((ForeignScanState *) planstate,
 												toc);
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index d5b19b7c11589c4352134baa4783a50e35fca456..66c2ad66d719e5f648b6a5a64e1b941056b7cc5d 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -21,6 +21,11 @@
  *		ExecEndIndexOnlyScan		releases all storage.
  *		ExecIndexOnlyMarkPos		marks scan position.
  *		ExecIndexOnlyRestrPos		restores scan position.
+ *		ExecIndexOnlyScanEstimate	estimates DSM space needed for
+ *						parallel index-only scan
+ *		ExecIndexOnlyScanInitializeDSM	initialize DSM for parallel
+ *						index-only scan
+ *		ExecIndexOnlyScanInitializeWorker attach to DSM info in parallel worker
  */
 #include "postgres.h"
 
@@ -277,6 +282,16 @@ ExecIndexOnlyScan(IndexOnlyScanState *node)
 void
 ExecReScanIndexOnlyScan(IndexOnlyScanState *node)
 {
+	bool		reset_parallel_scan = true;
+
+	/*
+	 * If we are here to just update the scan keys, then don't reset parallel
+	 * scan. For detailed reason behind this look in the comments for
+	 * ExecReScanIndexScan.
+	 */
+	if (node->ioss_NumRuntimeKeys != 0 && !node->ioss_RuntimeKeysReady)
+		reset_parallel_scan = false;
+
 	/*
 	 * If we are doing runtime key calculations (ie, any of the index key
 	 * values weren't simple Consts), compute the new key values.  But first,
@@ -296,10 +311,16 @@ ExecReScanIndexOnlyScan(IndexOnlyScanState *node)
 	node->ioss_RuntimeKeysReady = true;
 
 	/* reset index scan */
-	index_rescan(node->ioss_ScanDesc,
-				 node->ioss_ScanKeys, node->ioss_NumScanKeys,
-				 node->ioss_OrderByKeys, node->ioss_NumOrderByKeys);
+	if (node->ioss_ScanDesc)
+	{
+
+		index_rescan(node->ioss_ScanDesc,
+					 node->ioss_ScanKeys, node->ioss_NumScanKeys,
+					 node->ioss_OrderByKeys, node->ioss_NumOrderByKeys);
 
+		if (reset_parallel_scan && node->ioss_ScanDesc->parallel_scan)
+			index_parallelrescan(node->ioss_ScanDesc);
+	}
 	ExecScanReScan(&node->ss);
 }
 
@@ -536,29 +557,124 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags)
 	/*
 	 * Initialize scan descriptor.
 	 */
-	indexstate->ioss_ScanDesc = index_beginscan(currentRelation,
-												indexstate->ioss_RelationDesc,
-												estate->es_snapshot,
+	if (!node->scan.plan.parallel_aware)
+	{
+		indexstate->ioss_ScanDesc = index_beginscan(currentRelation,
+											   indexstate->ioss_RelationDesc,
+													estate->es_snapshot,
 												indexstate->ioss_NumScanKeys,
 											indexstate->ioss_NumOrderByKeys);
 
-	/* Set it up for index-only scan */
-	indexstate->ioss_ScanDesc->xs_want_itup = true;
-	indexstate->ioss_VMBuffer = InvalidBuffer;
 
-	/*
-	 * If no run-time keys to calculate, go ahead and pass the scankeys to the
-	 * index AM.
-	 */
-	if (indexstate->ioss_NumRuntimeKeys == 0)
-		index_rescan(indexstate->ioss_ScanDesc,
-					 indexstate->ioss_ScanKeys,
-					 indexstate->ioss_NumScanKeys,
-					 indexstate->ioss_OrderByKeys,
-					 indexstate->ioss_NumOrderByKeys);
+		/* Set it up for index-only scan */
+		indexstate->ioss_ScanDesc->xs_want_itup = true;
+		indexstate->ioss_VMBuffer = InvalidBuffer;
+
+		/*
+		 * If no run-time keys to calculate, go ahead and pass the scankeys to
+		 * the index AM.
+		 */
+		if (indexstate->ioss_NumRuntimeKeys == 0)
+			index_rescan(indexstate->ioss_ScanDesc,
+						 indexstate->ioss_ScanKeys,
+						 indexstate->ioss_NumScanKeys,
+						 indexstate->ioss_OrderByKeys,
+						 indexstate->ioss_NumOrderByKeys);
+	}
 
 	/*
 	 * all done.
 	 */
 	return indexstate;
 }
+
+/* ----------------------------------------------------------------
+ *		Parallel Index-only Scan Support
+ * ----------------------------------------------------------------
+ */
+
+/* ----------------------------------------------------------------
+ *		ExecIndexOnlyScanEstimate
+ *
+ *	estimates the space required to serialize index-only scan node.
+ * ----------------------------------------------------------------
+ */
+void
+ExecIndexOnlyScanEstimate(IndexOnlyScanState *node,
+						  ParallelContext *pcxt)
+{
+	EState	   *estate = node->ss.ps.state;
+
+	node->ioss_PscanLen = index_parallelscan_estimate(node->ioss_RelationDesc,
+													  estate->es_snapshot);
+	shm_toc_estimate_chunk(&pcxt->estimator, node->ioss_PscanLen);
+	shm_toc_estimate_keys(&pcxt->estimator, 1);
+}
+
+/* ----------------------------------------------------------------
+ *		ExecIndexOnlyScanInitializeDSM
+ *
+ *		Set up a parallel index-only scan descriptor.
+ * ----------------------------------------------------------------
+ */
+void
+ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node,
+							   ParallelContext *pcxt)
+{
+	EState	   *estate = node->ss.ps.state;
+	ParallelIndexScanDesc piscan;
+
+	piscan = shm_toc_allocate(pcxt->toc, node->ioss_PscanLen);
+	index_parallelscan_initialize(node->ss.ss_currentRelation,
+								  node->ioss_RelationDesc,
+								  estate->es_snapshot,
+								  piscan);
+	shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, piscan);
+	node->ioss_ScanDesc =
+		index_beginscan_parallel(node->ss.ss_currentRelation,
+								 node->ioss_RelationDesc,
+								 node->ioss_NumScanKeys,
+								 node->ioss_NumOrderByKeys,
+								 piscan);
+	node->ioss_ScanDesc->xs_want_itup = true;
+	node->ioss_VMBuffer = InvalidBuffer;
+
+	/*
+	 * If no run-time keys to calculate, go ahead and pass the scankeys to
+	 * the index AM.
+	 */
+	if (node->ioss_NumRuntimeKeys == 0)
+		index_rescan(node->ioss_ScanDesc,
+					 node->ioss_ScanKeys, node->ioss_NumScanKeys,
+					 node->ioss_OrderByKeys, node->ioss_NumOrderByKeys);
+}
+
+/* ----------------------------------------------------------------
+ *		ExecIndexOnlyScanInitializeWorker
+ *
+ *		Copy relevant information from TOC into planstate.
+ * ----------------------------------------------------------------
+ */
+void
+ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node, shm_toc *toc)
+{
+	ParallelIndexScanDesc piscan;
+
+	piscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
+	node->ioss_ScanDesc =
+		index_beginscan_parallel(node->ss.ss_currentRelation,
+								 node->ioss_RelationDesc,
+								 node->ioss_NumScanKeys,
+								 node->ioss_NumOrderByKeys,
+								 piscan);
+	node->ioss_ScanDesc->xs_want_itup = true;
+
+	/*
+	 * If no run-time keys to calculate, go ahead and pass the scankeys to the
+	 * index AM.
+	 */
+	if (node->ioss_NumRuntimeKeys == 0)
+		index_rescan(node->ioss_ScanDesc,
+					 node->ioss_ScanKeys, node->ioss_NumScanKeys,
+					 node->ioss_OrderByKeys, node->ioss_NumOrderByKeys);
+}
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 56eccafd7b0745d8a60eddf974ed7bcc6033fba4..d92826bcfc9ebf2116a1d350174f5bfee4808e7f 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -1048,9 +1048,9 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
 
 		/*
 		 * If appropriate, consider parallel index scan.  We don't allow
-		 * parallel index scan for bitmap or index only scans.
+		 * parallel index scan for bitmap index scans.
 		 */
-		if (index->amcanparallel && !index_only_scan &&
+		if (index->amcanparallel &&
 			rel->consider_parallel && outer_relids == NULL &&
 			scantype != ST_BITMAPSCAN)
 		{
@@ -1104,7 +1104,7 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
 			result = lappend(result, ipath);
 
 			/* If appropriate, consider parallel index scan */
-			if (index->amcanparallel && !index_only_scan &&
+			if (index->amcanparallel &&
 				rel->consider_parallel && outer_relids == NULL &&
 				scantype != ST_BITMAPSCAN)
 			{
diff --git a/src/include/executor/nodeIndexonlyscan.h b/src/include/executor/nodeIndexonlyscan.h
index 4018af257d2fd105da3913c75ce18a411aa52dda..5d3c6bbc0de958ad7fa74d82b30aa4926678070d 100644
--- a/src/include/executor/nodeIndexonlyscan.h
+++ b/src/include/executor/nodeIndexonlyscan.h
@@ -15,6 +15,7 @@
 #define NODEINDEXONLYSCAN_H
 
 #include "nodes/execnodes.h"
+#include "access/parallel.h"
 
 extern IndexOnlyScanState *ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags);
 extern TupleTableSlot *ExecIndexOnlyScan(IndexOnlyScanState *node);
@@ -23,4 +24,12 @@ extern void ExecIndexOnlyMarkPos(IndexOnlyScanState *node);
 extern void ExecIndexOnlyRestrPos(IndexOnlyScanState *node);
 extern void ExecReScanIndexOnlyScan(IndexOnlyScanState *node);
 
+/* Support functions for parallel index-only scans */
+extern void ExecIndexOnlyScanEstimate(IndexOnlyScanState *node,
+						  ParallelContext *pcxt);
+extern void ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node,
+							   ParallelContext *pcxt);
+extern void ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node,
+								  shm_toc *toc);
+
 #endif   /* NODEINDEXONLYSCAN_H */
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 9f41babf3534fea130676202d5749b49959fedfa..1c1cb80a63648e1332236e0d83ae1524c34ec90d 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -1409,6 +1409,7 @@ typedef struct IndexScanState
  *		ScanDesc		   index scan descriptor
  *		VMBuffer		   buffer in use for visibility map testing, if any
  *		HeapFetches		   number of tuples we were forced to fetch from heap
+ *		ioss_PscanLen	   Size of parallel index-only scan descriptor
  * ----------------
  */
 typedef struct IndexOnlyScanState
@@ -1427,6 +1428,7 @@ typedef struct IndexOnlyScanState
 	IndexScanDesc ioss_ScanDesc;
 	Buffer		ioss_VMBuffer;
 	long		ioss_HeapFetches;
+	Size		ioss_PscanLen;
 } IndexOnlyScanState;
 
 /* ----------------
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index 48fb80e90c874ae03528fd17d81e65f1c422d380..a5a22323c172d506e8d6e7367d89c46438b18b36 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -92,12 +92,14 @@ explain (costs off)
 explain (costs off)
 	select  sum(parallel_restricted(unique1)) from tenk1
 	group by(parallel_restricted(unique1));
-                     QUERY PLAN                     
-----------------------------------------------------
+                            QUERY PLAN                             
+-------------------------------------------------------------------
  HashAggregate
    Group Key: parallel_restricted(unique1)
-   ->  Index Only Scan using tenk1_unique1 on tenk1
-(3 rows)
+   ->  Gather
+         Workers Planned: 4
+         ->  Parallel Index Only Scan using tenk1_unique1 on tenk1
+(5 rows)
 
 -- test parallel plans for queries containing un-correlated subplans.
 alter table tenk2 set (parallel_workers = 0);
@@ -146,6 +148,25 @@ select  count((unique1)) from tenk1 where hundred > 1;
   9800
 (1 row)
 
+-- test parallel index-only scans.
+explain (costs off)
+	select  count(*) from tenk1 where thousand > 95;
+                                   QUERY PLAN                                   
+--------------------------------------------------------------------------------
+ Finalize Aggregate
+   ->  Gather
+         Workers Planned: 4
+         ->  Partial Aggregate
+               ->  Parallel Index Only Scan using tenk1_thous_tenthous on tenk1
+                     Index Cond: (thousand > 95)
+(6 rows)
+
+select  count(*) from tenk1 where thousand > 95;
+ count 
+-------
+  9040
+(1 row)
+
 reset enable_seqscan;
 reset enable_bitmapscan;
 set force_parallel_mode=1;
diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql
index f5bc4d18733eb1c9a2504ab86672c5e0f131fd65..d72addf5a2494912bad9afa6c300db977e89395b 100644
--- a/src/test/regress/sql/select_parallel.sql
+++ b/src/test/regress/sql/select_parallel.sql
@@ -56,6 +56,11 @@ explain (costs off)
 	select  count((unique1)) from tenk1 where hundred > 1;
 select  count((unique1)) from tenk1 where hundred > 1;
 
+-- test parallel index-only scans.
+explain (costs off)
+	select  count(*) from tenk1 where thousand > 95;
+select  count(*) from tenk1 where thousand > 95;
+
 reset enable_seqscan;
 reset enable_bitmapscan;