diff --git a/exercises/MPI_examples/README.md b/exercises/MPI_examples/README.md deleted file mode 100644 index a7d5c4f7332b38bb2807fc4a8d6b8fc56cd191e5..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# MPI Examples - -Here are some small program examples that help you understand the MPI basics. - -More details in the slides and in the tutorial. - -## Environment requirements - -Make sure you have OpenMPI installed on your machine. - -## Build - -Each subfolder contains its own makefile, run the following commands to test and run the MPI program in a cluster: - -```bash -# build all -make - -# run the compiled executable -make run - -# clean all compilation files -make clean -``` - -## Credits - -Some exercises or examples are taken and adapted from the following sources: - -- [https://mpitutorial.com/](https://mpitutorial.com/) -- [The Art of HPC](https://github.com/VictorEijkhout/TheArtOfHPC_vol2_parallelprogramming) - diff --git a/exercises/MPI_examples/broadcast/Makefile b/exercises/MPI_examples/broadcast/Makefile deleted file mode 100644 index b40f84739d1b437038c04632b0c669e4f7a32cee..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/broadcast/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -PROG = broadcast -OBJECTS = broadcast.o -CC = mpicc -CFLAGS = -Wall -std=c11 -CFLAGS += -I. # add the current directory to the include path - -$(PROG): $(OBJECTS) # link the object files into a binary - $(CC) $(CFLAGS) $^ -o $@ - -.PHONY: run -run: $(PROG) # build and run the program - mpirun ./$(PROG) - -$(OBJECTS): %.o: %.c # compile the source files into object files - $(CC) $(CFLAGS) -c $< - -.PHONY: clean -clean: # remove the object files and the binary - rm -f $(OBJECTS) $(PROG) - diff --git a/exercises/MPI_examples/broadcast/broadcast.c b/exercises/MPI_examples/broadcast/broadcast.c deleted file mode 100644 index 519557d19007329d9278d9a089cb919df3d9f913..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/broadcast/broadcast.c +++ /dev/null @@ -1,44 +0,0 @@ -#include <mpi.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <unistd.h> - -int main(int argc, char **argv) { - - int arg; - MPI_Init(&argc, &argv); - - // Get the number of processes - int num_processes; - MPI_Comm_size(MPI_COMM_WORLD, &num_processes); - - // Get the rank of the process - int rank; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - - // Get the name of the processor - char processor_name[MPI_MAX_PROCESSOR_NAME]; - int name_len; - MPI_Get_processor_name(processor_name, &name_len); - - if (rank == 0) { - - if (argc == 1 || // the program is called without parameters - (argc > 1 && !strcmp(argv[1], "-h")) // user asked for help ) - ) { - printf("\nUsage: init [0-9]+\n"); - MPI_Abort(MPI_COMM_WORLD, 1); - } - - arg = atoi(argv[1]); - } - - MPI_Bcast(&arg, 1, MPI_INT, 0, MPI_COMM_WORLD); - - printf("Process %d of %d on %s: arg = %d\n", rank, num_processes, - processor_name, arg); - - // Finalize the MPI environment. No more MPI calls can be made after this - MPI_Finalize(); -} diff --git a/exercises/MPI_examples/gather/Makefile b/exercises/MPI_examples/gather/Makefile deleted file mode 100644 index 8272dbe4d34a7ac9ae39bc67a74002be79d14ba5..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/gather/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -PROG = gather -OBJECTS = gather.o -CC = mpicc -CFLAGS = -Wall -std=c11 -CFLAGS += -I. # add the current directory to the include path - -$(PROG): $(OBJECTS) # link the object files into a binary - $(CC) $(CFLAGS) $^ -o $@ - -.PHONY: run -run: $(PROG) # build and run the program - mpirun ./$(PROG) - -$(OBJECTS): %.o: %.c # compile the source files into object files - $(CC) $(CFLAGS) -c $< - -.PHONY: clean -clean: # remove the object files and the binary - rm -f $(OBJECTS) $(PROG) - diff --git a/exercises/MPI_examples/gather/gather.c b/exercises/MPI_examples/gather/gather.c deleted file mode 100644 index 418135ad0f3ea0298bc8efee85c2bfed3062e971..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/gather/gather.c +++ /dev/null @@ -1,52 +0,0 @@ -#include <mpi.h> -#include <stdio.h> -#include <stdlib.h> - -#define SCATTER_NUM 10 - -int main(int argc, char **argv) { - - MPI_Init(&argc, &argv); - - // Get the number of processes - int num_processes; - MPI_Comm_size(MPI_COMM_WORLD, &num_processes); - - int *bigdata = malloc(SCATTER_NUM * num_processes * sizeof(int)); - int *localdata = malloc(SCATTER_NUM * sizeof(int)); - - // Get the rank of the process - int rank; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - - for (int i = 0; i < SCATTER_NUM; i++) { - localdata[i] = rank * SCATTER_NUM + (i + 1); - } - - // Get the name of the processor - char processor_name[MPI_MAX_PROCESSOR_NAME]; - int name_len; - MPI_Get_processor_name(processor_name, &name_len); - - MPI_Gather(localdata, SCATTER_NUM, MPI_INT, bigdata, - SCATTER_NUM, MPI_INT, 0, MPI_COMM_WORLD); - - if (rank == 0) { - printf("Process %d of %d on %s received: bigdata[%d] = { ", rank, num_processes, - processor_name, SCATTER_NUM * num_processes); - for (int i = 0; i < SCATTER_NUM * num_processes; i++) { - printf("%d, ", bigdata[i]); - } - printf("}\n"); - } else { - printf("Process %d of %d on %s sent: localdata[%d] = { ", rank, num_processes, - processor_name, SCATTER_NUM); - for (int i = 0; i < SCATTER_NUM; i++) { - printf("%d, ", localdata[i]); - } - printf("}\n"); - } - - // Finalize the MPI environment. No more MPI calls can be made after this - MPI_Finalize(); -} diff --git a/exercises/MPI_examples/manager-worker/Makefile b/exercises/MPI_examples/manager-worker/Makefile deleted file mode 100644 index 34e78127c3dd42a674268d1362224fd2a2b0b309..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/manager-worker/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -PROG1 = manager -PROG2 = worker -OBJECTS = manager.o worker.o -CC = mpicc -CFLAGS = -Wall -std=c11 -CFLAGS += -I. # add the current directory to the include path - -.PHONY: all -all: $(PROG1) $(PROG2) # build the program - -$(PROG1): manager.o - $(CC) $(CFLAGS) $^ -o $@ - -$(PROG2): worker.o - $(CC) $(CFLAGS) $^ -o $@ - -.PHONY: run -run: $(PROG1) $(PROG2) # build and run the program - mpirun -np 1 ./$(PROG1) - -$(OBJECTS): %.o: %.c # compile the source files into object files - $(CC) $(CFLAGS) -c $< - -.PHONY: clean -clean: # remove the object files and the binary - rm -f $(OBJECTS) $(PROG1) $(PROG2) - diff --git a/exercises/MPI_examples/manager-worker/manager.c b/exercises/MPI_examples/manager-worker/manager.c deleted file mode 100644 index 15060c7065185b9bd288d990722d01472ce99a19..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/manager-worker/manager.c +++ /dev/null @@ -1,101 +0,0 @@ -/**************************************************************** - **************************************************************** - **** - **** This program file is part of the book and course - **** "Parallel Computing" - **** by Victor Eijkhout, copyright 2013-2020 - **** - **** spawn_manager.c : worker code for spawn example - **** - **************************************************************** - **************************************************************** - */ - -#include "mpi.h" -#include <stdio.h> -#include <stdlib.h> -int main(int argc, char *argv[]) { - -#define ASSERT(p) \ - if (!(p)) { \ - printf("Assertion failed for proc %d at line %d\n", procno, __LINE__); \ - return -1; \ - } -#define ASSERTm(p, m) \ - if (!(p)) { \ - printf("Message<<%s>> for proc %d at line %d\n", m, procno, __LINE__); \ - return -1; \ - } - - MPI_Comm comm; - int procno = -1, nprocs; - MPI_Init(&argc, &argv); - comm = MPI_COMM_WORLD; - MPI_Comm_rank(comm, &procno); - MPI_Comm_size(comm, &nprocs); - - /* - * To investigate process placement, get host name - */ - { - int namelen = MPI_MAX_PROCESSOR_NAME; - char procname[namelen]; - MPI_Get_processor_name(procname, &namelen); - printf("[%d] manager process runs on <<%s>>\n", procno, procname); - } - - int world_size, manager_rank, universe_size, *universe_sizep, flag; - - MPI_Comm_size(MPI_COMM_WORLD, &world_size); - MPI_Comm_rank(MPI_COMM_WORLD, &manager_rank); - MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, (void *)&universe_sizep, - &flag); - - // codesnippet uverse - if (!flag) { - if (manager_rank == 0) { - printf("This MPI does not support UNIVERSE_SIZE.\nHow many processes " - "total?"); - scanf("%d", &universe_size); - } - MPI_Bcast(&universe_size, 1, MPI_INTEGER, 0, MPI_COMM_WORLD); - // codesnippet end - } else { - universe_size = *universe_sizep; - if (manager_rank == 0) - printf("Universe size deduced as %d\n", universe_size); - } - ASSERTm(universe_size > world_size, "No room to start workers"); - int nworkers = universe_size - world_size; - - /* - * Now spawn the workers. Note that there is a run-time determination - * of what type of worker to spawn, and presumably this calculation must - * be done at run time and cannot be calculated before starting - * the program. If everything is known when the application is - * first started, it is generally better to start them all at once - * in a single MPI_COMM_WORLD. - */ - - if (manager_rank == 0) - printf("Now spawning %d workers\n", nworkers); - const char *worker_program = "worker"; - int errorcodes[nworkers]; - MPI_Comm inter_to_workers; /* intercommunicator */ - MPI_Comm_spawn(worker_program, MPI_ARGV_NULL, nworkers, MPI_INFO_NULL, 0, - MPI_COMM_WORLD, &inter_to_workers, errorcodes); - for (int ie = 0; ie < nworkers; ie++) - if (errorcodes[ie] != 0) - printf("Error %d in spawning worker %d\n", errorcodes[ie], ie); - - /* - * Parallel code here. The communicator "inter_to_workers" can be used - * to communicate with the spawned processes, which have ranks 0,.. - * MPI_UNIVERSE_SIZE-1 in the remote group of the intercommunicator - * "inter_to_workers". - */ - - MPI_Finalize(); - return 0; -} - diff --git a/exercises/MPI_examples/manager-worker/worker.c b/exercises/MPI_examples/manager-worker/worker.c deleted file mode 100644 index 9fe965e8b44e028b47bb1fe203200108de9267bf..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/manager-worker/worker.c +++ /dev/null @@ -1,73 +0,0 @@ -/**************************************************************** - **************************************************************** - **** - **** This program file is part of the book and course - **** "Parallel Computing" - **** by Victor Eijkhout, copyright 2013-6 - **** - **** spawn_worker.c : worker code for spawn example - **** - **************************************************************** - **************************************************************** - */ - -#include "mpi.h" -#include <stdio.h> -#include <stdlib.h> - -int main(int argc, char *argv[]) { - -#define ASSERT(p) \ - if (!(p)) { \ - printf("Assertion failed for proc %d at line %d\n", procno, __LINE__); \ - return -1; \ - } -#define ASSERTm(p, m) \ - if (!(p)) { \ - printf("Message<<%s>> for proc %d at line %d\n", m, procno, __LINE__); \ - return -1; \ - } - - MPI_Comm comm; - int procno = -1, nprocs; - MPI_Init(&argc, &argv); - comm = MPI_COMM_WORLD; - MPI_Comm_rank(comm, &procno); - MPI_Comm_size(comm, &nprocs); - MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN); - - int nworkers, workerno; - MPI_Comm parent; - - // codesnippet spawnworker - MPI_Comm_size(MPI_COMM_WORLD, &nworkers); - MPI_Comm_rank(MPI_COMM_WORLD, &workerno); - MPI_Comm_get_parent(&parent); - // codesnippet end - ASSERTm(parent != MPI_COMM_NULL, "No parent!"); - - /* - * To investigate process placement, get host name - */ - { - int namelen = MPI_MAX_PROCESSOR_NAME; - char procname[namelen]; - MPI_Get_processor_name(procname, &namelen); - printf("[%d] worker process runs on <<%s>>\n", workerno, procname); - } - - /* - * Parallel code here. - * The manager is represented as the process with rank 0 in (the remote - * group of) MPI_COMM_PARENT. If the workers need to communicate among - * themselves, they can use MPI_COMM_WORLD. - */ - - char hostname[256]; - int namelen = 256; - MPI_Get_processor_name(hostname, &namelen); - printf("worker %d running on %s\n", workerno, hostname); - - MPI_Finalize(); - return 0; -} diff --git a/exercises/MPI_examples/reduction/Makefile b/exercises/MPI_examples/reduction/Makefile deleted file mode 100644 index 30348099dabd59208855e691f8b3aea8e09e2e40..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/reduction/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -PROG = reduction -OBJECTS = reduction.o -CC = mpicc -CFLAGS = -Wall -std=c11 -CFLAGS += -I. # add the current directory to the include path - -$(PROG): $(OBJECTS) # link the object files into a binary - $(CC) $(CFLAGS) $^ -o $@ - -.PHONY: run -run: $(PROG) # build and run the program - mpirun ./$(PROG) - -$(OBJECTS): %.o: %.c # compile the source files into object files - $(CC) $(CFLAGS) -c $< - -.PHONY: clean -clean: # remove the object files and the binary - rm -f $(OBJECTS) $(PROG) - diff --git a/exercises/MPI_examples/reduction/reduction.c b/exercises/MPI_examples/reduction/reduction.c deleted file mode 100644 index 61f922a8199a0c724f1009c1face47f841ea2b87..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/reduction/reduction.c +++ /dev/null @@ -1,51 +0,0 @@ -#include <mpi.h> -#include <stdio.h> -#include <stdlib.h> - -#define SCATTER_NUM 10 - -int main(int argc, char **argv) { - - MPI_Init(&argc, &argv); - - // Get the number of processes - int num_processes; - MPI_Comm_size(MPI_COMM_WORLD, &num_processes); - - int *sendbuf = malloc(SCATTER_NUM * sizeof(int)); - int *recvbuf = malloc(SCATTER_NUM * sizeof(int)); - - // Get the rank of the process - int rank; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - - for (int i = 0; i < SCATTER_NUM; i++) { - sendbuf[i] = i + 1; - } - - // Get the name of the processor - char processor_name[MPI_MAX_PROCESSOR_NAME]; - int name_len; - MPI_Get_processor_name(processor_name, &name_len); - - MPI_Reduce(sendbuf, recvbuf, SCATTER_NUM, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); - - if (rank == 0) { - printf("Process %d of %d on %s received with reduction: [%d] = { ", rank, num_processes, - processor_name, SCATTER_NUM); - for (int i = 0; i < SCATTER_NUM; i++) { - printf("%d, ", recvbuf[i]); - } - printf("}\n"); - } else { - printf("Process %d of %d on %s sent: sendbuf[%d] = { ", rank, num_processes, - processor_name, SCATTER_NUM); - for (int i = 0; i < SCATTER_NUM; i++) { - printf("%d, ", sendbuf[i]); - } - printf("}\n"); - } - - // Finalize the MPI environment. No more MPI calls can be made after this - MPI_Finalize(); -} diff --git a/exercises/MPI_examples/scatter/Makefile b/exercises/MPI_examples/scatter/Makefile deleted file mode 100644 index eb8c598629e2ee61a98f40dd8a487e4a67e3dc61..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/scatter/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -PROG = scatter -OBJECTS = scatter.o -CC = mpicc -CFLAGS = -Wall -std=c11 -CFLAGS += -I. # add the current directory to the include path - -$(PROG): $(OBJECTS) # link the object files into a binary - $(CC) $(CFLAGS) $^ -o $@ - -.PHONY: run -run: $(PROG) # build and run the program - mpirun ./$(PROG) - -$(OBJECTS): %.o: %.c # compile the source files into object files - $(CC) $(CFLAGS) -c $< - -.PHONY: clean -clean: # remove the object files and the binary - rm -f $(OBJECTS) $(PROG) - diff --git a/exercises/MPI_examples/scatter/scatter.c b/exercises/MPI_examples/scatter/scatter.c deleted file mode 100644 index 28b633614fd572b5f4d88a0e4bbbbd0c943a52d1..0000000000000000000000000000000000000000 --- a/exercises/MPI_examples/scatter/scatter.c +++ /dev/null @@ -1,42 +0,0 @@ -#include <mpi.h> -#include <stdio.h> -#include <stdlib.h> - -#define SCATTER_NUM 10 - -int main(int argc, char **argv) { - - MPI_Init(&argc, &argv); - - // Get the number of processes - int num_processes; - MPI_Comm_size(MPI_COMM_WORLD, &num_processes); - - int *bigdata = malloc(SCATTER_NUM * num_processes * sizeof(int)); - int *localdata = malloc(SCATTER_NUM * sizeof(int)); - - for (int i = 0; i < 10 * num_processes; i++) { - bigdata[i] = i + 1; - } - - // Get the rank of the process - int rank; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - - // Get the name of the processor - char processor_name[MPI_MAX_PROCESSOR_NAME]; - int name_len; - MPI_Get_processor_name(processor_name, &name_len); - - MPI_Scatter(bigdata, SCATTER_NUM, MPI_INT, localdata, SCATTER_NUM, MPI_INT, 0, - MPI_COMM_WORLD); - - printf("Process %d of %d on %s received: localdata[%d] = {", rank, num_processes, processor_name, SCATTER_NUM); - for (int i = 0; i < SCATTER_NUM; i++) { - printf("%d, ", localdata[i]); - } - printf("}\n"); - - // Finalize the MPI environment. No more MPI calls can be made after this - MPI_Finalize(); -}