Skip to content
Snippets Groups Projects
Commit b85a11ca authored by Chao Zhan's avatar Chao Zhan
Browse files

remove c files

parent 340563aa
Branches
No related tags found
No related merge requests found
Showing with 0 additions and 502 deletions
# MPI Examples
Here are some small program examples that help you understand the MPI basics.
More details in the slides and in the tutorial.
## Environment requirements
Make sure you have OpenMPI installed on your machine.
## Build
Each subfolder contains its own makefile, run the following commands to test and run the MPI program in a cluster:
```bash
# build all
make
# run the compiled executable
make run
# clean all compilation files
make clean
```
## Credits
Some exercises or examples are taken and adapted from the following sources:
- [https://mpitutorial.com/](https://mpitutorial.com/)
- [The Art of HPC](https://github.com/VictorEijkhout/TheArtOfHPC_vol2_parallelprogramming)
PROG = broadcast
OBJECTS = broadcast.o
CC = mpicc
CFLAGS = -Wall -std=c11
CFLAGS += -I. # add the current directory to the include path
$(PROG): $(OBJECTS) # link the object files into a binary
$(CC) $(CFLAGS) $^ -o $@
.PHONY: run
run: $(PROG) # build and run the program
mpirun ./$(PROG)
$(OBJECTS): %.o: %.c # compile the source files into object files
$(CC) $(CFLAGS) -c $<
.PHONY: clean
clean: # remove the object files and the binary
rm -f $(OBJECTS) $(PROG)
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
int main(int argc, char **argv) {
int arg;
MPI_Init(&argc, &argv);
// Get the number of processes
int num_processes;
MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
// Get the rank of the process
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// Get the name of the processor
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
if (rank == 0) {
if (argc == 1 || // the program is called without parameters
(argc > 1 && !strcmp(argv[1], "-h")) // user asked for help )
) {
printf("\nUsage: init [0-9]+\n");
MPI_Abort(MPI_COMM_WORLD, 1);
}
arg = atoi(argv[1]);
}
MPI_Bcast(&arg, 1, MPI_INT, 0, MPI_COMM_WORLD);
printf("Process %d of %d on %s: arg = %d\n", rank, num_processes,
processor_name, arg);
// Finalize the MPI environment. No more MPI calls can be made after this
MPI_Finalize();
}
PROG = gather
OBJECTS = gather.o
CC = mpicc
CFLAGS = -Wall -std=c11
CFLAGS += -I. # add the current directory to the include path
$(PROG): $(OBJECTS) # link the object files into a binary
$(CC) $(CFLAGS) $^ -o $@
.PHONY: run
run: $(PROG) # build and run the program
mpirun ./$(PROG)
$(OBJECTS): %.o: %.c # compile the source files into object files
$(CC) $(CFLAGS) -c $<
.PHONY: clean
clean: # remove the object files and the binary
rm -f $(OBJECTS) $(PROG)
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define SCATTER_NUM 10
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
// Get the number of processes
int num_processes;
MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
int *bigdata = malloc(SCATTER_NUM * num_processes * sizeof(int));
int *localdata = malloc(SCATTER_NUM * sizeof(int));
// Get the rank of the process
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (int i = 0; i < SCATTER_NUM; i++) {
localdata[i] = rank * SCATTER_NUM + (i + 1);
}
// Get the name of the processor
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
MPI_Gather(localdata, SCATTER_NUM, MPI_INT, bigdata,
SCATTER_NUM, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == 0) {
printf("Process %d of %d on %s received: bigdata[%d] = { ", rank, num_processes,
processor_name, SCATTER_NUM * num_processes);
for (int i = 0; i < SCATTER_NUM * num_processes; i++) {
printf("%d, ", bigdata[i]);
}
printf("}\n");
} else {
printf("Process %d of %d on %s sent: localdata[%d] = { ", rank, num_processes,
processor_name, SCATTER_NUM);
for (int i = 0; i < SCATTER_NUM; i++) {
printf("%d, ", localdata[i]);
}
printf("}\n");
}
// Finalize the MPI environment. No more MPI calls can be made after this
MPI_Finalize();
}
PROG1 = manager
PROG2 = worker
OBJECTS = manager.o worker.o
CC = mpicc
CFLAGS = -Wall -std=c11
CFLAGS += -I. # add the current directory to the include path
.PHONY: all
all: $(PROG1) $(PROG2) # build the program
$(PROG1): manager.o
$(CC) $(CFLAGS) $^ -o $@
$(PROG2): worker.o
$(CC) $(CFLAGS) $^ -o $@
.PHONY: run
run: $(PROG1) $(PROG2) # build and run the program
mpirun -np 1 ./$(PROG1)
$(OBJECTS): %.o: %.c # compile the source files into object files
$(CC) $(CFLAGS) -c $<
.PHONY: clean
clean: # remove the object files and the binary
rm -f $(OBJECTS) $(PROG1) $(PROG2)
/****************************************************************
****************************************************************
****
**** This program file is part of the book and course
**** "Parallel Computing"
**** by Victor Eijkhout, copyright 2013-2020
****
**** spawn_manager.c : worker code for spawn example
****
****************************************************************
****************************************************************
*/
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
#define ASSERT(p) \
if (!(p)) { \
printf("Assertion failed for proc %d at line %d\n", procno, __LINE__); \
return -1; \
}
#define ASSERTm(p, m) \
if (!(p)) { \
printf("Message<<%s>> for proc %d at line %d\n", m, procno, __LINE__); \
return -1; \
}
MPI_Comm comm;
int procno = -1, nprocs;
MPI_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
MPI_Comm_rank(comm, &procno);
MPI_Comm_size(comm, &nprocs);
/*
* To investigate process placement, get host name
*/
{
int namelen = MPI_MAX_PROCESSOR_NAME;
char procname[namelen];
MPI_Get_processor_name(procname, &namelen);
printf("[%d] manager process runs on <<%s>>\n", procno, procname);
}
int world_size, manager_rank, universe_size, *universe_sizep, flag;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &manager_rank);
MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, (void *)&universe_sizep,
&flag);
// codesnippet uverse
if (!flag) {
if (manager_rank == 0) {
printf("This MPI does not support UNIVERSE_SIZE.\nHow many processes "
"total?");
scanf("%d", &universe_size);
}
MPI_Bcast(&universe_size, 1, MPI_INTEGER, 0, MPI_COMM_WORLD);
// codesnippet end
} else {
universe_size = *universe_sizep;
if (manager_rank == 0)
printf("Universe size deduced as %d\n", universe_size);
}
ASSERTm(universe_size > world_size, "No room to start workers");
int nworkers = universe_size - world_size;
/*
* Now spawn the workers. Note that there is a run-time determination
* of what type of worker to spawn, and presumably this calculation must
* be done at run time and cannot be calculated before starting
* the program. If everything is known when the application is
* first started, it is generally better to start them all at once
* in a single MPI_COMM_WORLD.
*/
if (manager_rank == 0)
printf("Now spawning %d workers\n", nworkers);
const char *worker_program = "worker";
int errorcodes[nworkers];
MPI_Comm inter_to_workers; /* intercommunicator */
MPI_Comm_spawn(worker_program, MPI_ARGV_NULL, nworkers, MPI_INFO_NULL, 0,
MPI_COMM_WORLD, &inter_to_workers, errorcodes);
for (int ie = 0; ie < nworkers; ie++)
if (errorcodes[ie] != 0)
printf("Error %d in spawning worker %d\n", errorcodes[ie], ie);
/*
* Parallel code here. The communicator "inter_to_workers" can be used
* to communicate with the spawned processes, which have ranks 0,..
* MPI_UNIVERSE_SIZE-1 in the remote group of the intercommunicator
* "inter_to_workers".
*/
MPI_Finalize();
return 0;
}
/****************************************************************
****************************************************************
****
**** This program file is part of the book and course
**** "Parallel Computing"
**** by Victor Eijkhout, copyright 2013-6
****
**** spawn_worker.c : worker code for spawn example
****
****************************************************************
****************************************************************
*/
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
#define ASSERT(p) \
if (!(p)) { \
printf("Assertion failed for proc %d at line %d\n", procno, __LINE__); \
return -1; \
}
#define ASSERTm(p, m) \
if (!(p)) { \
printf("Message<<%s>> for proc %d at line %d\n", m, procno, __LINE__); \
return -1; \
}
MPI_Comm comm;
int procno = -1, nprocs;
MPI_Init(&argc, &argv);
comm = MPI_COMM_WORLD;
MPI_Comm_rank(comm, &procno);
MPI_Comm_size(comm, &nprocs);
MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);
int nworkers, workerno;
MPI_Comm parent;
// codesnippet spawnworker
MPI_Comm_size(MPI_COMM_WORLD, &nworkers);
MPI_Comm_rank(MPI_COMM_WORLD, &workerno);
MPI_Comm_get_parent(&parent);
// codesnippet end
ASSERTm(parent != MPI_COMM_NULL, "No parent!");
/*
* To investigate process placement, get host name
*/
{
int namelen = MPI_MAX_PROCESSOR_NAME;
char procname[namelen];
MPI_Get_processor_name(procname, &namelen);
printf("[%d] worker process runs on <<%s>>\n", workerno, procname);
}
/*
* Parallel code here.
* The manager is represented as the process with rank 0 in (the remote
* group of) MPI_COMM_PARENT. If the workers need to communicate among
* themselves, they can use MPI_COMM_WORLD.
*/
char hostname[256];
int namelen = 256;
MPI_Get_processor_name(hostname, &namelen);
printf("worker %d running on %s\n", workerno, hostname);
MPI_Finalize();
return 0;
}
PROG = reduction
OBJECTS = reduction.o
CC = mpicc
CFLAGS = -Wall -std=c11
CFLAGS += -I. # add the current directory to the include path
$(PROG): $(OBJECTS) # link the object files into a binary
$(CC) $(CFLAGS) $^ -o $@
.PHONY: run
run: $(PROG) # build and run the program
mpirun ./$(PROG)
$(OBJECTS): %.o: %.c # compile the source files into object files
$(CC) $(CFLAGS) -c $<
.PHONY: clean
clean: # remove the object files and the binary
rm -f $(OBJECTS) $(PROG)
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define SCATTER_NUM 10
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
// Get the number of processes
int num_processes;
MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
int *sendbuf = malloc(SCATTER_NUM * sizeof(int));
int *recvbuf = malloc(SCATTER_NUM * sizeof(int));
// Get the rank of the process
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (int i = 0; i < SCATTER_NUM; i++) {
sendbuf[i] = i + 1;
}
// Get the name of the processor
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
MPI_Reduce(sendbuf, recvbuf, SCATTER_NUM, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) {
printf("Process %d of %d on %s received with reduction: [%d] = { ", rank, num_processes,
processor_name, SCATTER_NUM);
for (int i = 0; i < SCATTER_NUM; i++) {
printf("%d, ", recvbuf[i]);
}
printf("}\n");
} else {
printf("Process %d of %d on %s sent: sendbuf[%d] = { ", rank, num_processes,
processor_name, SCATTER_NUM);
for (int i = 0; i < SCATTER_NUM; i++) {
printf("%d, ", sendbuf[i]);
}
printf("}\n");
}
// Finalize the MPI environment. No more MPI calls can be made after this
MPI_Finalize();
}
PROG = scatter
OBJECTS = scatter.o
CC = mpicc
CFLAGS = -Wall -std=c11
CFLAGS += -I. # add the current directory to the include path
$(PROG): $(OBJECTS) # link the object files into a binary
$(CC) $(CFLAGS) $^ -o $@
.PHONY: run
run: $(PROG) # build and run the program
mpirun ./$(PROG)
$(OBJECTS): %.o: %.c # compile the source files into object files
$(CC) $(CFLAGS) -c $<
.PHONY: clean
clean: # remove the object files and the binary
rm -f $(OBJECTS) $(PROG)
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define SCATTER_NUM 10
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
// Get the number of processes
int num_processes;
MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
int *bigdata = malloc(SCATTER_NUM * num_processes * sizeof(int));
int *localdata = malloc(SCATTER_NUM * sizeof(int));
for (int i = 0; i < 10 * num_processes; i++) {
bigdata[i] = i + 1;
}
// Get the rank of the process
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
// Get the name of the processor
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
MPI_Scatter(bigdata, SCATTER_NUM, MPI_INT, localdata, SCATTER_NUM, MPI_INT, 0,
MPI_COMM_WORLD);
printf("Process %d of %d on %s received: localdata[%d] = {", rank, num_processes, processor_name, SCATTER_NUM);
for (int i = 0; i < SCATTER_NUM; i++) {
printf("%d, ", localdata[i]);
}
printf("}\n");
// Finalize the MPI environment. No more MPI calls can be made after this
MPI_Finalize();
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment