/* * script-like code to demonstrate MPI's most * common collective communication operations */ #include #include #include #include namespace { constexpr size_t chars_per_rank = 3; void output_in_order(int rank, int size, char* content, size_t content_size) { for(int i = 0; i < rank; i++) MPI_Barrier(MPI_COMM_WORLD); std::cout << "\trank " << rank << ":"; if(content) for(size_t i = 0; i < content_size; i++) std::cout << " '" << content[i] << "'"; else std::cout << " nullptr"; std::cout << "\n"; std::cout.flush(); for(int i = rank; i < size; i++) MPI_Barrier(MPI_COMM_WORLD); } } int main(int argc, char** argv) { MPI_Init(&argc, &argv); // how many processes are there? int size = 0; MPI_Comm_size(MPI_COMM_WORLD, &size); // what is the rank of this process? int rank = 0; MPI_Comm_rank(MPI_COMM_WORLD, &rank); const size_t content_size = chars_per_rank*size; char* content = new char[content_size]{}; if(!rank) { for(size_t i = 0; i < content_size; i++) content[i] = 'a' + i; std::cout << "Initalizing char content[" << content_size << "].\n"; std::cout.flush(); } output_in_order(rank, size, content, content_size); if(!rank) { std::cout << "\nBroadcasting content[" << content_size << "].\n"; std::cout.flush(); } MPI_Bcast(content, content_size, MPI_CHAR, 0, MPI_COMM_WORLD); output_in_order(rank, size, content, content_size); if(rank) // now let us deallocate "content" if we are not rank 0 { delete[] content; content = new char[content_size]{}; } else { std::cout << "\nScattering content[" << content_size << "] to local_chunk[" << chars_per_rank << "].\n"; std::cout.flush(); } char local_chunk[chars_per_rank] = {}; MPI_Scatter( content, chars_per_rank, MPI_CHAR, local_chunk, chars_per_rank, MPI_CHAR, 0, MPI_COMM_WORLD ); output_in_order(rank, size, local_chunk, chars_per_rank); // now let rank 0 forget content and gather it again from all its processes if(!rank) { std::cout << "\nClearing content.\n\trank 0:"; delete[] content; content = new char[content_size]{}; for(size_t i = 0; i < content_size; i++) std::cout << " '" << content[i] << "'"; std::cout << "\nGathering using MPI_Gather.\n"; std::cout.flush(); } MPI_Gather( local_chunk, chars_per_rank, MPI_CHAR, content, chars_per_rank, MPI_CHAR, 0, MPI_COMM_WORLD ); output_in_order(rank, size, content, content_size); if(!rank) { std::cout << "\nReducing local chunks into 'reduced' using MPI_Reduce with MPI_MAX.\n"; std::cout.flush(); } char reduced[chars_per_rank] = {}; MPI_Reduce(local_chunk, reduced, chars_per_rank, MPI_BYTE, MPI_MAX, 0, MPI_COMM_WORLD); output_in_order(rank, size, reduced, chars_per_rank); if(!rank) { std::cout << "\nClearing content.\n"; std::cout.flush(); } delete[] content; content = new char[content_size]{}; if(!rank) { std::cout << "Gathering using MPI_Allgather.\n"; std::cout.flush(); } MPI_Allgather( local_chunk, chars_per_rank, MPI_CHAR, content, chars_per_rank, MPI_CHAR, MPI_COMM_WORLD ); output_in_order(rank, size, content, content_size); if(!rank) { std::cout << "\nReducing local chunks into 'reduced' using MPI_Allreduce with MPI_MAX.\n"; std::cout.flush(); } MPI_Allreduce(local_chunk, reduced, chars_per_rank, MPI_BYTE, MPI_MAX, MPI_COMM_WORLD); output_in_order(rank, size, reduced, chars_per_rank); MPI_Finalize(); delete[] content; }