Skip to content
Snippets Groups Projects
Commit b7a587a3 authored by Ondrej Meca's avatar Ondrej Meca
Browse files

ENH: init commit

parents
No related branches found
No related tags found
No related merge requests found
Pipeline #9794 failed
cmake_minimum_required (VERSION 2.6)
project (mpiintro)
find_package(MPI REQUIRED)
include_directories(${MPI_INCLUDE_PATH})
set (CMAKE_CXX_FLAGS "-fopenmp")
foreach(file hello exchange pingpong pingpong2 gather igather msgorder cgather varysize mpiio syncsend barrier threads)
add_executable(${file} ${file}.cpp)
target_link_libraries(${file} ${MPI_LIBRARIES})
endforeach(file)
#include "mpi.h"
#include <vector>
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
std::vector<int> data(10);
if (rank == 1) {
MPI_Recv(data.data(), data.size(), MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("data received from %d.\n", 0);
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 0) {
MPI_Send(data.data(), data.size(), MPI_INT, 1, 0, MPI_COMM_WORLD);
printf("data sent to %d.\n", 1);
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
#include <vector>
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
std::vector<int> data = { rank };
std::vector<int> gather(size);
// if (rank == 0) {
// data.resize(size);
// for (int i = 1; i < size; ++i) {
// MPI_Recv(data.data() + i, 1, MPI_DOUBLE, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
// }
// } else {
// MPI_Ssend(data.data(), 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
// }
int ROOT = 0;
int SEND_COUNT = 1; // per process
int RECV_COUNT = 1; // per process
MPI_Gather(data.data(), SEND_COUNT, MPI_INT, gather.data(), RECV_COUNT, MPI_INT, ROOT, MPI_COMM_WORLD);
if (rank == 0) {
for (int i = 0; i < size; ++i) {
printf("%d ", gather[i]);
}
printf("\n");
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
struct Data {
char name[20];
int x;
double y;
};
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
Data data;
MPI_Datatype dtype;
if (rank == 0) {
int COUNT = 1, TARGET = 1, TAG = 0;
MPI_Send(&data, COUNT, MPI_INT, TARGET, TAG, MPI_COMM_WORLD);
printf("data[%d] sent to %d.\n", data, TARGET);
} else {
int COUNT = 1, SOURCE = 0, TAG = 0;
MPI_Recv(&data, COUNT, MPI_INT, SOURCE, TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("data[%d] received from %d.\n", data, SOURCE);
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int data = 15, max = 10;
if (rank == 0) {
int COUNT = 1, TARGET = 1, TAG = 0;
MPI_Send(&data, COUNT, MPI_INT, TARGET, TAG, MPI_COMM_WORLD);
printf("data[%d] sent to %d.\n", data, TARGET);
} else {
int COUNT = 1, SOURCE = 0, TAG = 0;
MPI_Recv(&data, COUNT, MPI_INT, SOURCE, TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("data[%d] received from %d.\n", data, SOURCE);
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
#include <unistd.h>
#include <vector>
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
std::vector<int> data = { rank };
if (rank == 0) {
data.resize(size);
for (int i = 1; i < size; ++i) {
MPI_Recv(data.data() + i, 1, MPI_DOUBLE, i, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
} else {
if (rank == 1) {
usleep(1000000);
}
MPI_Ssend(data.data(), 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
if (rank != 1) {
usleep(1000000);
}
}
if (rank == 0) {
for (int i = 0; i < size; ++i) {
printf("%d ", data[i]);
}
printf("\n");
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello world from rank: %2d / %d\n", rank, size);
MPI_Finalize();
return 0;
}
#include "mpi.h"
#include <unistd.h>
#include <vector>
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
std::vector<int> data = { rank };
if (rank == 0) {
data.resize(size);
std::vector<MPI_Request> requests(size);
for (int i = 1; i < size; ++i) {
MPI_Irecv(data.data() + i, 1, MPI_DOUBLE, i, 0, MPI_COMM_WORLD, requests.data() + i);
}
MPI_Waitall(size - 1, requests.data() + 1, MPI_STATUSES_IGNORE);
} else {
if (rank == 1) {
usleep(1000000);
}
MPI_Ssend(data.data(), 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
if (rank != 1) {
usleep(1000000);
}
}
if (rank == 0) {
for (int i = 0; i < size; ++i) {
printf("%d ", data[i]);
}
printf("\n");
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
#include <string>
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
std::string data = "xyyzzz";
int length[3] = { 1, 2, 3 };
MPI_Aint displacement[3] = { rank, size + 2 * rank, 3 * size + 3 * rank };
MPI_Datatype dtype;
MPI_Type_create_hindexed(3, length, displacement, MPI_CHAR, &dtype);
MPI_Type_commit(&dtype);
MPI_File MPIfile;
if (MPI_File_open(MPI_COMM_WORLD, "output.txt", MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &MPIfile)) {
printf("MPI cannot create file\n");
} else {
MPI_File_set_view(MPIfile, 0, MPI_CHAR, dtype, "native", MPI_INFO_NULL);
MPI_File_write_all(MPIfile, data.c_str(), data.size(), MPI_CHAR, MPI_STATUS_IGNORE);
MPI_File_close(&MPIfile);
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
#include <unistd.h>
#include <vector>
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
std::vector<int> data({rank});
if (rank == 0) {
MPI_Send(data.data(), data.size(), MPI_INT, 1, 0, MPI_COMM_WORLD); // to 1
usleep(1);
MPI_Send(data.data(), data.size(), MPI_INT, 2, 0, MPI_COMM_WORLD); // to 2
}
if (rank == 1) {
MPI_Recv(data.data(), data.size(), MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); // from 0
MPI_Send(data.data(), data.size(), MPI_INT, 2, 0, MPI_COMM_WORLD); // to 2
}
if (rank == 2) {
MPI_Status status;
MPI_Recv(data.data(), data.size(), MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status); // from *
printf("receive from %d\n", status.MPI_SOURCE);
MPI_Recv(data.data(), data.size(), MPI_INT, MPI_ANY_SOURCE, 0, MPI_COMM_WORLD, &status); // from *
printf("receive from %d\n", status.MPI_SOURCE);
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
#include <unistd.h>
#define INLOOP 0
#define FINISH 1
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int counter = 0;
int COUNT = 1;
int SOURCE = (rank + size - 1) % size;
int TARGET = (rank + 1) % size;
int TAG = 0;
if (rank == 0) { // initialize loop
MPI_Send(&counter, COUNT, MPI_INT, TARGET, TAG, MPI_COMM_WORLD);
}
while (true) {
MPI_Status status;
MPI_Recv(&counter, COUNT, MPI_INT, SOURCE, TAG, MPI_COMM_WORLD, &status);
if (status.MPI_TAG == INLOOP) {
printf("rank %d: counter: %d\n", rank, counter++);
usleep(500000);
}
if (TAG == INLOOP) {
MPI_Send(&counter, COUNT, MPI_INT, TARGET, TAG, MPI_COMM_WORLD);
}
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
#include <unistd.h>
#define INLOOP 0
#define FINISH 1
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int counter = 0, max = 10;
int COUNT = 1;
int SOURCE = (rank + size - 1) % size;
int TARGET = (rank + 1) % size;
int TAG = 0;
if (rank == 0) { // initialize loop
MPI_Send(&counter, COUNT, MPI_INT, TARGET, TAG, MPI_COMM_WORLD);
}
while (TAG == INLOOP) {
MPI_Status status;
MPI_Recv(&counter, COUNT, MPI_INT, SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
if (status.MPI_TAG == INLOOP) {
printf("rank %d: counter: %d\n", rank, counter++);
usleep(500000);
}
if (TAG == INLOOP) {
if (counter == max) { TAG = FINISH; }
MPI_Send(&counter, COUNT, MPI_INT, TARGET, TAG, MPI_COMM_WORLD);
}
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
#include <vector>
#include <sstream>
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
std::stringstream ss(argv[1]);
int datasize;
ss >> datasize;
std::vector<double> send(datasize);
std::vector<double> recv(datasize);
int SOURCE = (size + rank - 1) % size;
int TARGET = (size + rank + 1) % size;
MPI_Send(send.data(), send.size(), MPI_DOUBLE, TARGET, 0, MPI_COMM_WORLD);
MPI_Recv(recv.data(), recv.size(), MPI_DOUBLE, SOURCE, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Finalize();
return 0;
}
#include "mpi.h"
#include "omp.h"
#include <sstream>
#include <unistd.h>
int main(int argc, char **argv) {
int threads;
std::stringstream env(getenv("OMP_NUM_THREADS"));
env >> threads;
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided);
if (provided < MPI_THREAD_FUNNELED) {
printf("MPI does not support required MPI / THREAD combination [provided=%d].\n", provided);
return 0;
}
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#pragma omp parallel for
for (int t = 0; t < threads; t++) {
double start = omp_get_wtime();
while (omp_get_wtime() - start < 5);
printf("Hello world from rank: %d-%d / %d\n", rank, t, size);
}
MPI_Finalize();
return 0;
}
#include "mpi.h"
#include <cstring>
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (rank == 0) {
const char* data = "random size value";
MPI_Send(data, strlen(data), MPI_CHAR, 1, 0, MPI_COMM_WORLD);
} else {
char* data;
MPI_Status status;
MPI_Probe(0, 0, MPI_COMM_WORLD, &status);
int size;
MPI_Get_count(&status, MPI_CHAR, &size);
data = new char[size];
MPI_Recv(data, size, MPI_CHAR, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
printf("received: %s\n", data);
delete[] data;
}
MPI_Finalize();
return 0;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment