diff --git a/exercises/MPI_examples/manager-worker/Makefile b/exercises/MPI_examples/manager-worker/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..73ba6f508c732c06599b37539cb3146dfc3ce896
--- /dev/null
+++ b/exercises/MPI_examples/manager-worker/Makefile
@@ -0,0 +1,24 @@
+PROG1 = manager
+PROG2 = worker
+OBJECTS = manager.o worker.o
+CC = mpicc
+CFLAGS = -Wall -std=c11
+CFLAGS += -I. # add the current directory to the include path
+
+$(PROG1): manager.o
+	$(CC) $(CFLAGS) $^ -o $@
+
+$(PROG2): worker.o
+	$(CC) $(CFLAGS) $^ -o $@
+
+.Phone: run
+run: $(PROG1) $(PROG2) # build and run the program
+	MY_MPIRUN_OPTIONS="-usize 8" mpirun -np 4 ./$(PROG1)
+
+$(OBJECTS): %.o: %.c  # compile the source files into object files
+	$(CC) $(CFLAGS) -c $<
+
+.PHONY: clean
+clean: # remove the object files and the binary
+	rm -f $(OBJECTS) $(PROG1) $(PROG2)
+
diff --git a/exercises/MPI_examples/manager-worker/manager.c b/exercises/MPI_examples/manager-worker/manager.c
new file mode 100644
index 0000000000000000000000000000000000000000..15060c7065185b9bd288d990722d01472ce99a19
--- /dev/null
+++ b/exercises/MPI_examples/manager-worker/manager.c
@@ -0,0 +1,101 @@
+/****************************************************************
+ ****************************************************************
+ ****
+ **** This program file is part of the book and course
+ **** "Parallel Computing"
+ **** by Victor Eijkhout, copyright 2013-2020
+ ****
+ **** spawn_manager.c : worker code for spawn example
+ ****
+ ****************************************************************
+ ****************************************************************
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+int main(int argc, char *argv[]) {
+
+#define ASSERT(p)                                                              \
+  if (!(p)) {                                                                  \
+    printf("Assertion failed for proc %d at line %d\n", procno, __LINE__);     \
+    return -1;                                                                 \
+  }
+#define ASSERTm(p, m)                                                          \
+  if (!(p)) {                                                                  \
+    printf("Message<<%s>> for proc %d at line %d\n", m, procno, __LINE__);     \
+    return -1;                                                                 \
+  }
+
+  MPI_Comm comm;
+  int procno = -1, nprocs;
+  MPI_Init(&argc, &argv);
+  comm = MPI_COMM_WORLD;
+  MPI_Comm_rank(comm, &procno);
+  MPI_Comm_size(comm, &nprocs);
+
+  /*
+   * To investigate process placement, get host name
+   */
+  {
+    int namelen = MPI_MAX_PROCESSOR_NAME;
+    char procname[namelen];
+    MPI_Get_processor_name(procname, &namelen);
+    printf("[%d] manager process runs on <<%s>>\n", procno, procname);
+  }
+
+  int world_size, manager_rank, universe_size, *universe_sizep, flag;
+
+  MPI_Comm_size(MPI_COMM_WORLD, &world_size);
+  MPI_Comm_rank(MPI_COMM_WORLD, &manager_rank);
+  MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, (void *)&universe_sizep,
+                    &flag);
+
+  // codesnippet uverse
+  if (!flag) {
+    if (manager_rank == 0) {
+      printf("This MPI does not support UNIVERSE_SIZE.\nHow many processes "
+             "total?");
+      scanf("%d", &universe_size);
+    }
+    MPI_Bcast(&universe_size, 1, MPI_INTEGER, 0, MPI_COMM_WORLD);
+    // codesnippet end
+  } else {
+    universe_size = *universe_sizep;
+    if (manager_rank == 0)
+      printf("Universe size deduced as %d\n", universe_size);
+  }
+  ASSERTm(universe_size > world_size, "No room to start workers");
+  int nworkers = universe_size - world_size;
+
+  /*
+   * Now spawn the workers. Note that there is a run-time determination
+   * of what type of worker to spawn, and presumably this calculation must
+   * be done at run time and cannot be calculated before starting
+   * the program. If everything is known when the application is
+   * first started, it is generally better to start them all at once
+   * in a single MPI_COMM_WORLD.
+   */
+
+  if (manager_rank == 0)
+    printf("Now spawning %d workers\n", nworkers);
+  const char *worker_program = "worker";
+  int errorcodes[nworkers];
+  MPI_Comm inter_to_workers; /* intercommunicator */
+  MPI_Comm_spawn(worker_program, MPI_ARGV_NULL, nworkers, MPI_INFO_NULL, 0,
+                 MPI_COMM_WORLD, &inter_to_workers, errorcodes);
+  for (int ie = 0; ie < nworkers; ie++)
+    if (errorcodes[ie] != 0)
+      printf("Error %d in spawning worker %d\n", errorcodes[ie], ie);
+
+  /*
+   * Parallel code here. The communicator "inter_to_workers" can be used
+   * to communicate with the spawned processes, which have ranks 0,..
+   * MPI_UNIVERSE_SIZE-1 in the remote group of the intercommunicator
+   * "inter_to_workers".
+   */
+
+  MPI_Finalize();
+  return 0;
+}
+
diff --git a/exercises/MPI_examples/manager-worker/worker.c b/exercises/MPI_examples/manager-worker/worker.c
new file mode 100644
index 0000000000000000000000000000000000000000..9fe965e8b44e028b47bb1fe203200108de9267bf
--- /dev/null
+++ b/exercises/MPI_examples/manager-worker/worker.c
@@ -0,0 +1,73 @@
+/****************************************************************
+ ****************************************************************
+ ****
+ **** This program file is part of the book and course
+ **** "Parallel Computing"
+ **** by Victor Eijkhout, copyright 2013-6
+ ****
+ **** spawn_worker.c : worker code for spawn example
+ ****
+ ****************************************************************
+ ****************************************************************
+ */
+
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char *argv[]) {
+
+#define ASSERT(p)                                                              \
+  if (!(p)) {                                                                  \
+    printf("Assertion failed for proc %d at line %d\n", procno, __LINE__);     \
+    return -1;                                                                 \
+  }
+#define ASSERTm(p, m)                                                          \
+  if (!(p)) {                                                                  \
+    printf("Message<<%s>> for proc %d at line %d\n", m, procno, __LINE__);     \
+    return -1;                                                                 \
+  }
+
+  MPI_Comm comm;
+  int procno = -1, nprocs;
+  MPI_Init(&argc, &argv);
+  comm = MPI_COMM_WORLD;
+  MPI_Comm_rank(comm, &procno);
+  MPI_Comm_size(comm, &nprocs);
+  MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);
+
+  int nworkers, workerno;
+  MPI_Comm parent;
+
+  // codesnippet spawnworker
+  MPI_Comm_size(MPI_COMM_WORLD, &nworkers);
+  MPI_Comm_rank(MPI_COMM_WORLD, &workerno);
+  MPI_Comm_get_parent(&parent);
+  // codesnippet end
+  ASSERTm(parent != MPI_COMM_NULL, "No parent!");
+
+  /*
+   * To investigate process placement, get host name
+   */
+  {
+    int namelen = MPI_MAX_PROCESSOR_NAME;
+    char procname[namelen];
+    MPI_Get_processor_name(procname, &namelen);
+    printf("[%d] worker process runs on <<%s>>\n", workerno, procname);
+  }
+
+  /*
+   * Parallel code here.
+   * The manager is represented as the process with rank 0 in (the remote
+   * group of) MPI_COMM_PARENT.  If the workers need to communicate among
+   * themselves, they can use MPI_COMM_WORLD.
+   */
+
+  char hostname[256];
+  int namelen = 256;
+  MPI_Get_processor_name(hostname, &namelen);
+  printf("worker %d running on %s\n", workerno, hostname);
+
+  MPI_Finalize();
+  return 0;
+}