diff --git a/exercises/MPI_examples/gather/Makefile b/exercises/MPI_examples/gather/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..3fb0fdba21b1df83d7a5c1a8570a19712a965d8d
--- /dev/null
+++ b/exercises/MPI_examples/gather/Makefile
@@ -0,0 +1,20 @@
+PROG = gather
+OBJECTS = gather.o
+CC = mpicc
+CFLAGS = -Wall -std=c11
+CFLAGS += -I. # add the current directory to the include path
+
+$(PROG): $(OBJECTS) # link the object files into a binary
+	$(CC) $(CFLAGS) $^ -o $@
+
+.Phone: run
+run: $(PROG) # build and run the program
+	mpirun ./$(PROG)
+
+$(OBJECTS): %.o: %.c  # compile the source files into object files
+	$(CC) $(CFLAGS) -c $<
+
+.PHONY: clean
+clean: # remove the object files and the binary
+	rm -f $(OBJECTS) $(PROG)
+
diff --git a/exercises/MPI_examples/gather/gather.c b/exercises/MPI_examples/gather/gather.c
new file mode 100644
index 0000000000000000000000000000000000000000..418135ad0f3ea0298bc8efee85c2bfed3062e971
--- /dev/null
+++ b/exercises/MPI_examples/gather/gather.c
@@ -0,0 +1,52 @@
+#include <mpi.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#define SCATTER_NUM 10
+
+int main(int argc, char **argv) {
+
+  MPI_Init(&argc, &argv);
+
+  // Get the number of processes
+  int num_processes;
+  MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
+
+  int *bigdata = malloc(SCATTER_NUM * num_processes * sizeof(int));
+  int *localdata = malloc(SCATTER_NUM * sizeof(int));
+
+  // Get the rank of the process
+  int rank;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  for (int i = 0; i < SCATTER_NUM; i++) {
+    localdata[i] = rank * SCATTER_NUM + (i + 1);
+  }
+
+  // Get the name of the processor
+  char processor_name[MPI_MAX_PROCESSOR_NAME];
+  int name_len;
+  MPI_Get_processor_name(processor_name, &name_len);
+
+  MPI_Gather(localdata, SCATTER_NUM, MPI_INT, bigdata,
+             SCATTER_NUM, MPI_INT, 0, MPI_COMM_WORLD);
+
+  if (rank == 0) {
+    printf("Process %d of %d on %s received: bigdata[%d] = { ", rank, num_processes,
+           processor_name, SCATTER_NUM * num_processes);
+    for (int i = 0; i < SCATTER_NUM * num_processes; i++) {
+      printf("%d, ", bigdata[i]);
+    }
+    printf("}\n");
+  } else {
+    printf("Process %d of %d on %s sent: localdata[%d] = { ", rank, num_processes,
+           processor_name, SCATTER_NUM);
+    for (int i = 0; i < SCATTER_NUM; i++) {
+      printf("%d, ", localdata[i]);
+    }
+    printf("}\n");
+  }
+
+  // Finalize the MPI environment. No more MPI calls can be made after this
+  MPI_Finalize();
+}
diff --git a/slides/images/MPI-gather.png b/slides/images/MPI-gather.png
new file mode 100644
index 0000000000000000000000000000000000000000..f070b09f22ed809a4b2b171800848fe8ed3c5970
Binary files /dev/null and b/slides/images/MPI-gather.png differ
diff --git a/slides/pages/recap.md b/slides/pages/recap.md
index fd69d3bfa2d86efc94a5d8bdb391daf338698dd9..81eebf7ad1e77affc7898555014c69dec1a7e9fb 100644
--- a/slides/pages/recap.md
+++ b/slides/pages/recap.md
@@ -349,6 +349,8 @@ title: Data Replication (Broadcast)
 
 ## Data Replication (Broadcast)
 
+Replicate data from one rank to all other ranks:
+
 ```c
 MPI_Bcast (void *data, int count, MPI_Datatype dtype, int root, MPI_Comm comm)
 ```
@@ -363,14 +365,14 @@ MPI_Bcast (void *data, int count, MPI_Datatype dtype, int root, MPI_Comm comm)
 - in rank root, data is an input argument
 - **MPI_Bcast completes only after all ranks in comm have made the call**
 
-See live demo.
-
 ---
 title: Data Scatter
 ---
 
 ## Data Scatter
 
+Distribute chunks of data from one rank to all ranks:
+
 ```c
 MPI_Scatter (void *sendbuf, int sendcount, MPI_Datatype sendtype,
     void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
@@ -387,4 +389,25 @@ MPI_Scatter (void *sendbuf, int sendcount, MPI_Datatype sendtype,
 - root also sends one data chunk to itself 
 - **for each chunk the amount of data sent must match the receive size**
 
-See live demo.
+---
+title: Data Gather
+---
+
+## Data Gather
+
+Collect chunks of data from all ranks in one place:
+
+```c
+MPI_Gather (void *sendbuf, int sendcount, MPI_Datatype sendtype,
+    void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
+```
+
+<div class="container flex justify-center mt-5">
+    <img src="/images/MPI-gather.png" class="block w-lg"/>
+</div>
+
+### Notes
+
+- The opposite operation of **MPI_Scatter**
+- root also receives one data chunk from itself
+- data chunks are stored in increasing order of the sender’s rank