c - MPI Asynchronous/One-Sided Communication -



c - MPI Asynchronous/One-Sided Communication -

i have situation similar code below: worker processes work on subset of info , must send unknown amount of info master. possible have master wait , receive unknown number of sends worker processes? there way using one-sided communication? in advance!

#include <errno.h> #include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <time.h> /* sample run/output: $mpirun -np 5 practice.exe @[1]: i=30 @[2]: i=0 @[2]: i=75 @[4]: i=40 @[4]: i=55 @[3]: i=85 @[3]: i=65 */ int main(int argc, char *argv[]) { int i, rank, size, np, nw, num; mpi_init(&argc, &argv); mpi_comm_rank(mpi_comm_world, &rank); mpi_comm_size(mpi_comm_world, &np); nw = np -1; srand(time(null)*rank); if (rank > 0) { (i=(rank-1); i<(nw*10); i+=nw) { num = rand() % 100; if (num % 5 == 0) { printf("@[%d]: i=%d\n", rank, num); // send num master } } } else { // receive num worker } mpi_finalize(); homecoming exit_success; }

sure, there's lots of ways this, doesn't have asynchronous communications. can 1-sided communications, has own problems (you still have able guess how much total memory needed data).

one way figure out how much info have, send ahead master knows how many messages receive, , send info 1 @ time:

#include <errno.h> #include <mpi.h> #include <stdio.h> #include <stdlib.h> #define maxperworker 10 #define tag_num_incoming 1 #define tag_data 2 int main(int argc, char *argv[]) { int i, rank, size, np, nw, num; int mynums[maxperworker], numcount, total; mpi_init(&argc, &argv); mpi_comm_rank(mpi_comm_world, &rank); mpi_comm_size(mpi_comm_world, &np); nw = np -1; srand(time(null)*rank); if (rank > 0) { numcount = 0; total = 0; (i=(rank-1); i<(nw*10); i+=nw) { num = rand() % 100; if (num % 3 == 0) { printf("@[%d]: i=%d\n", rank, num); mynums[numcount] = num; numcount++; total += num; } } /* of course, in case * in 1 message, but.. */ mpi_send(&numcount, 1, mpi_int, 0, tag_num_incoming, mpi_comm_world); (i=0; i<numcount; i++) mpi_send(&(mynums[i]), 1, mpi_int, 0, tag_data, mpi_comm_world); printf("@[%d]: total of nums %d\n", rank, total); } else { int *totals = malloc(sizeof(int)*nw); int *counts = malloc(sizeof(int)*nw); int *sofar = malloc(sizeof(int)*nw); int **data = malloc(sizeof(int *)*nw); int rcv; int totalcounts; int j; int workernum; mpi_status status; (i=0; i<nw; i++) { sofar[i] = 0; totals[i]= 0; } /* number of incoming messages */ (i=0; i<nw; i++) { mpi_recv(&rcv, 1, mpi_int, mpi_any_source, tag_num_incoming, mpi_comm_world, &status); workernum = status.mpi_source-1; counts[workernum] = rcv; totalcounts += rcv; data[workernum] = malloc(sizeof(int)*rcv); } /* real info */ (i=0; i<totalcounts; i++) { mpi_recv(&rcv, 1, mpi_int, mpi_any_source, tag_data, mpi_comm_world, &status); workernum = status.mpi_source-1; data[ workernum ][ sofar[workernum]++ ] = rcv; totals[ workernum ] += rcv; } /* print results */ (i=0; i<nw; i++) { printf("from [%2d]:", i+1); (j=0; j<counts[i]; j++) printf("%3d ", data[i][j]); printf("| %3d\n", totals[i]); } (i=0; i<nw; i++) free(data[i]); free(data); free(totals); free(counts); free(sofar); } mpi_finalize(); homecoming exit_success; }

running on 4 processes, get:

$ mpirun -np 4 ./masterworker1 @[1]: i=39 @[1]: i=81 @[3]: i=9 @[3]: i=45 @[3]: i=0 @[3]: i=57 @[3]: total of nums 111 @[1]: total of nums 120 [ 1]: 39 81 | 120 [ 2]: 24 6 39 | 69 [ 3]: 9 45 0 57 | 111 @[2]: i=24 @[2]: i=6 @[2]: i=39 @[2]: total of nums 69

however, might not feasible -- might not want buffer info (and if could, send in 1 message).

another approach send data, , send special message when you're done sending data, , master keeps recieving until it's heard 1 of these "done" messages each worker:

#include <errno.h> #include <mpi.h> #include <stdio.h> #include <stdlib.h> #define maxperworker 10 #define tag_data 2 #define tag_done 1 int main(int argc, char *argv[]) { int i, rank, size, np, nw, num; int mynums[maxperworker], numcount, total; mpi_init(&argc, &argv); mpi_comm_rank(mpi_comm_world, &rank); mpi_comm_size(mpi_comm_world, &np); nw = np -1; srand(time(null)*rank); if (rank > 0) { numcount = 0; total = 0; (i=(rank-1); i<(nw*10); i+=nw) { num = rand() % 100; if (num % 3 == 0) { printf("@[%d]: i=%d\n", rank, num); total += num; mpi_send(&num, 1, mpi_int, 0, tag_data, mpi_comm_world); } } mpi_send(&num, 1, mpi_int, 0, tag_done, mpi_comm_world); printf("@[%d]: total of nums %d\n", rank, total); } else { int *totals = malloc(sizeof(int)*nw); int *counts = malloc(sizeof(int)*nw); int **data = malloc(sizeof(int *)*nw); int rcv; int j; int workernum; int stillsending; mpi_status status; (i=0; i<nw; i++) { totals[i]= 0; counts[i]= 0; data[i] = malloc(sizeof(int)*maxperworker); } stillsending = nw; /* info */ while (stillsending > 0) { mpi_recv(&rcv, 1, mpi_int, mpi_any_source, mpi_any_tag, mpi_comm_world, &status); workernum = status.mpi_source-1; if (status.mpi_tag == tag_done) { stillsending--; } else if (status.mpi_tag == tag_data) { data[workernum][counts[workernum]] = rcv; totals[workernum] += rcv; counts[workernum]++; } } /* print results */ (i=0; i<nw; i++) { printf("from [%2d]:", i+1); (j=0; j<counts[i]; j++) printf("%3d ", data[i][j]); printf("| %3d\n", totals[i]); } (i=0; i<nw; i++) free(data[i]); free(data); free(totals); free(counts); } mpi_finalize(); homecoming exit_success; }

again on 4 tasks, get:

$ mpirun -np 4 ./masterworker2 @[1]: i=63 @[1]: i=99 @[1]: i=60 @[1]: i=69 @[1]: i=21 @[1]: i=48 @[1]: i=24 @[1]: total of nums 384 @[2]: i=39 @[2]: i=84 @[2]: i=63 @[2]: total of nums 186 @[3]: i=3 @[3]: i=51 @[3]: i=36 @[3]: total of nums 90 [ 1]: 63 99 60 69 21 48 24 | 384 [ 2]: 39 84 63 | 186 [ 3]: 3 51 36 | 90

note in both of these cases i've relied on maxperworker size array preallocate things; don't need though, malloc array , realloc necessary, or utilize std::vector thing if you're willing utilize c++.

c asynchronous mpi

Comments

Popular posts from this blog

iphone - Dismissing a UIAlertView -

intellij idea - Update external libraries with intelij and java -

javascript - send data from a new window to previous window in php -