diff --git a/exercises/MPI_examples/scatter/Makefile b/exercises/MPI_examples/scatter/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..73463b93f4dbedabc49c9a1fc581cd38f2e95930 --- /dev/null +++ b/exercises/MPI_examples/scatter/Makefile @@ -0,0 +1,20 @@ +PROG = scatter +OBJECTS = scatter.o +CC = mpicc +CFLAGS = -Wall -std=c11 +CFLAGS += -I. # add the current directory to the include path + +$(PROG): $(OBJECTS) # link the object files into a binary + $(CC) $(CFLAGS) $^ -o $@ + +.Phone: run +run: $(PROG) # build and run the program + mpirun ./$(PROG) + +$(OBJECTS): %.o: %.c # compile the source files into object files + $(CC) $(CFLAGS) -c $< + +.PHONY: clean +clean: # remove the object files and the binary + rm -f $(OBJECTS) $(PROG) + diff --git a/exercises/MPI_examples/scatter/scatter.c b/exercises/MPI_examples/scatter/scatter.c new file mode 100644 index 0000000000000000000000000000000000000000..eb1fef86e21e1cae2cf29570856133d4b9ca3eeb --- /dev/null +++ b/exercises/MPI_examples/scatter/scatter.c @@ -0,0 +1,42 @@ +#include <mpi.h> +#include <stdio.h> +#include <stdlib.h> + +#define SCATTER_NUM 10 + +int main(int argc, char **argv) { + + MPI_Init(&argc, &argv); + + // Get the number of processes + int num_processes; + MPI_Comm_size(MPI_COMM_WORLD, &num_processes); + + int *bigdata = malloc(SCATTER_NUM * num_processes * sizeof(int)); + int *localdata = malloc(SCATTER_NUM * sizeof(int)); + + for (int i = 0; i < 10 * num_processes; i++) { + bigdata[i] = i + 1; + } + + // Get the rank of the process + int rank; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + // Get the name of the processor + char processor_name[MPI_MAX_PROCESSOR_NAME]; + int name_len; + MPI_Get_processor_name(processor_name, &name_len); + + MPI_Scatter(bigdata, SCATTER_NUM, MPI_INT, localdata, SCATTER_NUM, MPI_INT, 0, + MPI_COMM_WORLD); + + printf("Process %d of %d on %s: localdata[10] = {", rank, num_processes, processor_name); + for (int i = 0; i < SCATTER_NUM; i++) { + printf("%d, ", localdata[i]); + } + printf("}\n"); + + // Finalize the MPI environment. No more MPI calls can be made after this + MPI_Finalize(); +} diff --git a/slides/images/MPI-scatter.png b/slides/images/MPI-scatter.png new file mode 100644 index 0000000000000000000000000000000000000000..ef7d1c18ea05e5cfc2e7780a7b7e89bc1b56db1b Binary files /dev/null and b/slides/images/MPI-scatter.png differ diff --git a/slides/pages/recap.md b/slides/pages/recap.md index 261e2cbced7658fae87fcf72db6d1c23bc33bd77..fd69d3bfa2d86efc94a5d8bdb391daf338698dd9 100644 --- a/slides/pages/recap.md +++ b/slides/pages/recap.md @@ -364,3 +364,27 @@ MPI_Bcast (void *data, int count, MPI_Datatype dtype, int root, MPI_Comm comm) - **MPI_Bcast completes only after all ranks in comm have made the call** See live demo. + +--- +title: Data Scatter +--- + +## Data Scatter + +```c +MPI_Scatter (void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) +``` + +<div class="container flex justify-center mt-5"> + <img src="/images/MPI-scatter.png" class="block w-lg"/> +</div> + +### Notes + +- **sendbuf** must be large enough in order to supply **sendcount** elements +- data chunks are taken in increasing order following the receiver’s rank +- root also sends one data chunk to itself +- **for each chunk the amount of data sent must match the receive size** + +See live demo.