Skip to content
Snippets Groups Projects
Commit efeaf241 authored by Chao Zhan's avatar Chao Zhan
Browse files

add MPI gather example

parent 69e0e776
No related branches found
No related tags found
No related merge requests found
PROG = gather
OBJECTS = gather.o
CC = mpicc
CFLAGS = -Wall -std=c11
CFLAGS += -I. # add the current directory to the include path
$(PROG): $(OBJECTS) # link the object files into a binary
$(CC) $(CFLAGS) $^ -o $@
.Phone: run
run: $(PROG) # build and run the program
mpirun ./$(PROG)
$(OBJECTS): %.o: %.c # compile the source files into object files
$(CC) $(CFLAGS) -c $<
.PHONY: clean
clean: # remove the object files and the binary
rm -f $(OBJECTS) $(PROG)
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define SCATTER_NUM 10
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
// Get the number of processes
int num_processes;
MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
int *bigdata = malloc(SCATTER_NUM * num_processes * sizeof(int));
int *localdata = malloc(SCATTER_NUM * sizeof(int));
// Get the rank of the process
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (int i = 0; i < SCATTER_NUM; i++) {
localdata[i] = rank * SCATTER_NUM + (i + 1);
}
// Get the name of the processor
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
MPI_Gather(localdata, SCATTER_NUM, MPI_INT, bigdata,
SCATTER_NUM, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0) {
printf("Process %d of %d on %s received: bigdata[%d] = { ", rank, num_processes,
processor_name, SCATTER_NUM * num_processes);
for (int i = 0; i < SCATTER_NUM * num_processes; i++) {
printf("%d, ", bigdata[i]);
}
printf("}\n");
} else {
printf("Process %d of %d on %s sent: localdata[%d] = { ", rank, num_processes,
processor_name, SCATTER_NUM);
for (int i = 0; i < SCATTER_NUM; i++) {
printf("%d, ", localdata[i]);
}
printf("}\n");
}
// Finalize the MPI environment. No more MPI calls can be made after this
MPI_Finalize();
}
slides/images/MPI-gather.png

48.8 KiB

......@@ -349,6 +349,8 @@ title: Data Replication (Broadcast)
## Data Replication (Broadcast)
Replicate data from one rank to all other ranks:
```c
MPI_Bcast (void *data, int count, MPI_Datatype dtype, int root, MPI_Comm comm)
```
......@@ -363,14 +365,14 @@ MPI_Bcast (void *data, int count, MPI_Datatype dtype, int root, MPI_Comm comm)
- in rank root, data is an input argument
- **MPI_Bcast completes only after all ranks in comm have made the call**
See live demo.
---
title: Data Scatter
---
## Data Scatter
Distribute chunks of data from one rank to all ranks:
```c
MPI_Scatter (void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
......@@ -387,4 +389,25 @@ MPI_Scatter (void *sendbuf, int sendcount, MPI_Datatype sendtype,
- root also sends one data chunk to itself
- **for each chunk the amount of data sent must match the receive size**
See live demo.
---
title: Data Gather
---
## Data Gather
Collect chunks of data from all ranks in one place:
```c
MPI_Gather (void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
```
<div class="container flex justify-center mt-5">
<img src="/images/MPI-gather.png" class="block w-lg"/>
</div>
### Notes
- The opposite operation of **MPI_Scatter**
- root also receives one data chunk from itself
- data chunks are stored in increasing order of the sender’s rank
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment