diff --git a/exercises/MPI_examples/reduction/Makefile b/exercises/MPI_examples/reduction/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..e86ea1e95fcbd49f627a3793f792a13fd6770d36
--- /dev/null
+++ b/exercises/MPI_examples/reduction/Makefile
@@ -0,0 +1,20 @@
+PROG = reduction
+OBJECTS = reduction.o
+CC = mpicc
+CFLAGS = -Wall -std=c11
+CFLAGS += -I. # add the current directory to the include path
+
+$(PROG): $(OBJECTS) # link the object files into a binary
+	$(CC) $(CFLAGS) $^ -o $@
+
+.Phone: run
+run: $(PROG) # build and run the program
+	mpirun ./$(PROG)
+
+$(OBJECTS): %.o: %.c  # compile the source files into object files
+	$(CC) $(CFLAGS) -c $<
+
+.PHONY: clean
+clean: # remove the object files and the binary
+	rm -f $(OBJECTS) $(PROG)
+
diff --git a/exercises/MPI_examples/reduction/reduction.c b/exercises/MPI_examples/reduction/reduction.c
new file mode 100644
index 0000000000000000000000000000000000000000..61f922a8199a0c724f1009c1face47f841ea2b87
--- /dev/null
+++ b/exercises/MPI_examples/reduction/reduction.c
@@ -0,0 +1,51 @@
+#include <mpi.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#define SCATTER_NUM 10
+
+int main(int argc, char **argv) {
+
+  MPI_Init(&argc, &argv);
+
+  // Get the number of processes
+  int num_processes;
+  MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
+
+  int *sendbuf = malloc(SCATTER_NUM * sizeof(int));
+  int *recvbuf = malloc(SCATTER_NUM * sizeof(int));
+
+  // Get the rank of the process
+  int rank;
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  for (int i = 0; i < SCATTER_NUM; i++) {
+    sendbuf[i] = i + 1;
+  }
+
+  // Get the name of the processor
+  char processor_name[MPI_MAX_PROCESSOR_NAME];
+  int name_len;
+  MPI_Get_processor_name(processor_name, &name_len);
+
+  MPI_Reduce(sendbuf, recvbuf, SCATTER_NUM, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+
+  if (rank == 0) {
+    printf("Process %d of %d on %s received with reduction: [%d] = { ", rank, num_processes,
+           processor_name, SCATTER_NUM);
+    for (int i = 0; i < SCATTER_NUM; i++) {
+      printf("%d, ", recvbuf[i]);
+    }
+    printf("}\n");
+  } else {
+    printf("Process %d of %d on %s sent: sendbuf[%d] = { ", rank, num_processes,
+           processor_name, SCATTER_NUM);
+    for (int i = 0; i < SCATTER_NUM; i++) {
+      printf("%d, ", sendbuf[i]);
+    }
+    printf("}\n");
+  }
+
+  // Finalize the MPI environment. No more MPI calls can be made after this
+  MPI_Finalize();
+}
diff --git a/slides/images/MPI-all-to-all.png b/slides/images/MPI-all-to-all.png
new file mode 100644
index 0000000000000000000000000000000000000000..e19075022d254bc552cf85541680f4f9f649e39a
Binary files /dev/null and b/slides/images/MPI-all-to-all.png differ
diff --git a/slides/images/MPI-gather-to-all.png b/slides/images/MPI-gather-to-all.png
new file mode 100644
index 0000000000000000000000000000000000000000..8993c31f2fae0756a1e634f09326f7d87451734d
Binary files /dev/null and b/slides/images/MPI-gather-to-all.png differ
diff --git a/slides/images/MPI-global-reduction.png b/slides/images/MPI-global-reduction.png
new file mode 100644
index 0000000000000000000000000000000000000000..6edd08bfd387c5fbec0d31b759366d236f12e6bc
Binary files /dev/null and b/slides/images/MPI-global-reduction.png differ
diff --git a/slides/pages/recap.md b/slides/pages/recap.md
index 81eebf7ad1e77affc7898555014c69dec1a7e9fb..44829819a674a4b241685149f78857f98db8ca33 100644
--- a/slides/pages/recap.md
+++ b/slides/pages/recap.md
@@ -411,3 +411,70 @@ MPI_Gather (void *sendbuf, int sendcount, MPI_Datatype sendtype,
 - The opposite operation of **MPI_Scatter**
 - root also receives one data chunk from itself
 - data chunks are stored in increasing order of the sender’s rank
+
+---
+title: Gather-to-All
+---
+
+## Gather-to-All
+
+Collect chunks of data from all ranks in all ranks:
+
+```c
+MPI_Allgather (void *sendbuf, int sendcount, MPI_Datatype sendtype,
+    void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
+```
+
+<div class="container flex justify-center mt-5">
+    <img src="/images/MPI-gather-to-all.png" class="block w-lg"/>
+</div>
+
+### Notes
+
+- each rank distributes its **sendbuf** to every rank in the communicator
+- almost equivalent to **MPI_Scatter** + **MPI_Gather**
+
+---
+title: All-to-All
+---
+
+## All-to-All
+
+Combined scatter and gather operation:
+
+```c
+MPI_Alltoall (void *sendbuf, int sendcount, MPI_Datatype sendtype,
+    void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm)
+```
+
+<div class="container flex justify-center mt-5">
+    <img src="/images/MPI-all-to-all.png" class="block w-lg"/>
+</div>
+
+### Notes
+
+- a kind of global chunked transpose
+
+---
+title: Global Reduction
+---
+
+## Global Reduction
+
+Perform an arithmetic reduction operation while gathering data:
+
+```c
+MPI_Reduce (void *sendbuf, void *recvbuf, int count,
+    MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
+```
+
+<div class="container flex justify-center mt-5">
+    <img src="/images/MPI-global-reduction.png" class="block w-sm"/>
+</div>
+
+### Notes
+
+- Result is computed **in- or out-of-order** depending on the operation
+    - **All predefined operations are associative and commutative**
+    - **Beware of non-commutative effects on floats**
+