Skip to content
Snippets Groups Projects
Commit 69e0e776 authored by Chao Zhan's avatar Chao Zhan
Browse files

add MPI scatter example

parent ae86e025
Branches
No related tags found
No related merge requests found
PROG = scatter
OBJECTS = scatter.o
CC = mpicc
CFLAGS = -Wall -std=c11
CFLAGS += -I. # add the current directory to the include path
$(PROG): $(OBJECTS) # link the object files into a binary
$(CC) $(CFLAGS) $^ -o $@
.Phone: run
run: $(PROG) # build and run the program
mpirun ./$(PROG)
$(OBJECTS): %.o: %.c # compile the source files into object files
$(CC) $(CFLAGS) -c $<
.PHONY: clean
clean: # remove the object files and the binary
rm -f $(OBJECTS) $(PROG)
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define SCATTER_NUM 10
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
// Get the number of processes
int num_processes;
MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
int *bigdata = malloc(SCATTER_NUM * num_processes * sizeof(int));
int *localdata = malloc(SCATTER_NUM * sizeof(int));
for (int i = 0; i < 10 * num_processes; i++) {
bigdata[i] = i + 1;
}
// Get the rank of the process
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// Get the name of the processor
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
MPI_Scatter(bigdata, SCATTER_NUM, MPI_INT, localdata, SCATTER_NUM, MPI_INT, 0,
MPI_COMM_WORLD);
printf("Process %d of %d on %s: localdata[10] = {", rank, num_processes, processor_name);
for (int i = 0; i < SCATTER_NUM; i++) {
printf("%d, ", localdata[i]);
}
printf("}\n");
// Finalize the MPI environment. No more MPI calls can be made after this
MPI_Finalize();
}
slides/images/MPI-scatter.png

31.4 KiB

......@@ -364,3 +364,27 @@ MPI_Bcast (void *data, int count, MPI_Datatype dtype, int root, MPI_Comm comm)
- **MPI_Bcast completes only after all ranks in comm have made the call**
See live demo.
---
title: Data Scatter
---
## Data Scatter
```c
MPI_Scatter (void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
```
<div class="container flex justify-center mt-5">
<img src="/images/MPI-scatter.png" class="block w-lg"/>
</div>
### Notes
- **sendbuf** must be large enough in order to supply **sendcount** elements
- data chunks are taken in increasing order following the receiver’s rank
- root also sends one data chunk to itself
- **for each chunk the amount of data sent must match the receive size**
See live demo.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment